diff --git a/.gitignore b/.gitignore index ba43271401dc..8549cb17e66c 100644 --- a/.gitignore +++ b/.gitignore @@ -77,4 +77,4 @@ autom4te.cache /depcomp /install-sh /missing -/stamp-h1 \ No newline at end of file +/stamp-h1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..c562438ec94f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +exclude: "\ + ^(\ + .changes|\ + .github|\ + awscli/examples|\ + awscli/topics|\ + awscli/botocore|\ + awscli/s3transfer|\ + awscli/doc|\ + exe/assets|\ + tests/functional/cloudformation/deploy_templates/booleans/input.yml|\ + tests/functional/cloudformation/deploy_templates/nested-tag/input.yml|\ + tests/|\ + CHANGELOG.rst|\ + configure\ + )" +repos: + - repo: 'https://github.com/pre-commit/pre-commit-hooks' + rev: v4.5.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: 'https://github.com/PyCQA/isort' + rev: 5.12.0 + hooks: + - id: isort + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.4.8 + hooks: + - id: ruff + args: [ --fix ] + - id: ruff-format diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0a89c7b112f9..0d60b0eda675 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -67,7 +67,7 @@ Also, ensure your commit messages match this format:: Describe your changes in the imperative mood, e.g. "Add foo to bar", "Update foo component for bar", "Fix race condition for foo". - + The body of the commit message can include: * an explanation of the problem and what this change @@ -120,6 +120,28 @@ can run these commands:: When you push to your remote, the output will contain a URL you can use to open a pull request. +Codestyle +--------- +This project uses `ruff `__ to enforce +codstyle requirements. We've codified this process using a tool called +`pre-commit `__. pre-commit allows us to specify a +config file with all tools required for code linting, and surfaces either a +git commit hook, or single command, for enforcing these. + +To validate your pull request prior to publishing, you can use the following +`installation guide `__ to setup pre-commit. + +If you don't want to use the git commit hook, you can run the below command +to automatically perform the codestyle validation: + +.. code-block:: bash + + $ pre-commit run + +This will automatically perform simple updates (such as white space clean up) +and provide a list of any failing checks. After these are addressed, +you can commit the changes prior to publishing the pull request. + Reporting Issues ---------------- diff --git a/awscli/__init__.py b/awscli/__init__.py index 00e226427bd3..70d203398edc 100644 --- a/awscli/__init__.py +++ b/awscli/__init__.py @@ -15,8 +15,9 @@ ---- A Universal Command Line Environment for Amazon Web Services. """ -import os + import importlib.abc +import os import sys __version__ = '2.22.21' @@ -36,10 +37,18 @@ os.environ['AWS_DATA_PATH'] = os.pathsep.join(_awscli_data_path) -SCALAR_TYPES = set([ - 'string', 'float', 'integer', 'long', 'boolean', 'double', - 'blob', 'timestamp' -]) +SCALAR_TYPES = set( + [ + 'string', + 'float', + 'integer', + 'long', + 'boolean', + 'double', + 'blob', + 'timestamp', + ] +) COMPLEX_TYPES = set(['structure', 'map', 'list']) @@ -57,13 +66,14 @@ class TopLevelImportAliasFinder(importlib.abc.MetaPathFinder): Note: That this import alias only comes into affect if anything is imported from the awscli package. """ + _PACKAGES = [ 'botocore', 's3transfer', ] _TARGET_FINDERS = [ 'pyimod02_importers.PyiFrozenImporter', # Pyinstaller injected finder - '_frozen_importlib_external.PathFinder' # Built-in path finder + '_frozen_importlib_external.PathFinder', # Built-in path finder ] def __init__(self, underlying_finder): diff --git a/awscli/__main__.py b/awscli/__main__.py index 7d49ba7f871c..63263a3cb831 100644 --- a/awscli/__main__.py +++ b/awscli/__main__.py @@ -16,6 +16,5 @@ from awscli.clidriver import main - if __name__ == "__main__": sys.exit(main()) diff --git a/awscli/alias.py b/awscli/alias.py index 7c889c0215df..1e427d8b8096 100644 --- a/awscli/alias.py +++ b/awscli/alias.py @@ -15,12 +15,10 @@ import shlex import subprocess -from botocore.configloader import raw_config_parse - -from awscli.compat import compat_shell_quote from awscli.commands import CLICommand +from awscli.compat import compat_shell_quote from awscli.utils import emit_top_level_args_parsed_event - +from botocore.configloader import raw_config_parse LOG = logging.getLogger(__name__) @@ -42,9 +40,12 @@ class InvalidAliasException(Exception): class AliasLoader(object): - def __init__(self, - alias_filename=os.path.expanduser( - os.path.join('~', '.aws', 'cli', 'alias'))): + def __init__( + self, + alias_filename=os.path.expanduser( + os.path.join('~', '.aws', 'cli', 'alias') + ), + ): """Interface for loading and interacting with alias file :param alias_filename: The name of the file to load aliases from. @@ -60,8 +61,7 @@ def _build_aliases(self): def _load_aliases(self): parsed = {} if os.path.exists(self._filename): - parsed = raw_config_parse( - self._filename, parse_subsections=False) + parsed = raw_config_parse(self._filename, parse_subsections=False) self._normalize_key_names(parsed) return parsed @@ -106,7 +106,8 @@ def _is_external_alias(self, alias_value): def _inject_external_alias(self, alias_name, alias_value, command_table): command_table[alias_name] = ExternalAliasCommand( - alias_name, alias_value) + alias_name, alias_value + ) class AliasCommandInjector(BaseAliasCommandInjector): @@ -125,20 +126,23 @@ def __init__(self, session, alias_loader): def inject_aliases(self, command_table, parser): for alias_name, alias_value in self._get_alias_items(): if self._is_external_alias(alias_value): - self._inject_external_alias(alias_name, alias_value, - command_table) + self._inject_external_alias( + alias_name, alias_value, command_table + ) else: service_alias_cmd_args = [ - alias_name, alias_value, self._session, command_table, - parser + alias_name, + alias_value, + self._session, + command_table, + parser, ] # If the alias name matches something already in the # command table provide the command it is about # to clobber as a possible reference that it will # need to proxy to. if alias_name in command_table: - service_alias_cmd_args.append( - command_table[alias_name]) + service_alias_cmd_args.append(command_table[alias_name]) alias_cmd = ServiceAliasCommand(*service_alias_cmd_args) command_table[alias_name] = alias_cmd @@ -153,14 +157,18 @@ def _retrieve_global_args_parser(self): if self._global_args_parser is None: if self._global_cmd_driver is not None: command_table = self._global_cmd_driver.subcommand_table - self._global_args_parser = \ + self._global_args_parser = ( self._global_cmd_driver.create_parser(command_table) + ) return self._global_args_parser - def on_building_command_table(self, command_table, event_name, - command_object, session, **kwargs): - if not isinstance(command_object, CLICommand) and \ - event_name == 'building-command-table.main': + def on_building_command_table( + self, command_table, event_name, command_object, session, **kwargs + ): + if ( + not isinstance(command_object, CLICommand) + and event_name == 'building-command-table.main' + ): self._global_cmd_driver = command_object return # We have to transform the event name to figure out what the @@ -185,14 +193,18 @@ def on_building_command_table(self, command_table, event_name, for alias_name, alias_value in aliases_for_cmd.items(): if self._is_external_alias(alias_value): self._inject_external_alias( - alias_name, alias_value, command_table) + alias_name, alias_value, command_table + ) else: proxied_sub_command = command_table.get(alias_name) command_table[alias_name] = InternalAliasSubCommand( - alias_name, alias_value, command_object, + alias_name, + alias_value, + command_object, self._retrieve_global_args_parser(), session=session, - proxied_sub_command=proxied_sub_command) + proxied_sub_command=proxied_sub_command, + ) class BaseAliasCommand(CLICommand): @@ -233,10 +245,7 @@ def lineage(self, value): class BaseInternalAliasCommand(BaseAliasCommand): - UNSUPPORTED_GLOBAL_PARAMETERS = [ - 'debug', - 'profile' - ] + UNSUPPORTED_GLOBAL_PARAMETERS = ['debug', 'profile'] def __init__(self, alias_name, alias_value, session): super(BaseInternalAliasCommand, self).__init__(alias_name, alias_value) @@ -248,21 +257,25 @@ def _get_alias_args(self): except ValueError as e: raise InvalidAliasException( 'Value of alias "%s" could not be parsed. ' - 'Received error: %s when parsing:\n%s' % ( - self._alias_name, e, self._alias_value) + 'Received error: %s when parsing:\n%s' + % (self._alias_name, e, self._alias_value) ) alias_args = [arg.strip(os.linesep) for arg in alias_args] LOG.debug( 'Expanded subcommand alias %r with value: %r to: %r', - self._alias_name, self._alias_value, alias_args + self._alias_name, + self._alias_value, + alias_args, ) return alias_args - def _update_parsed_globals(self, arg_parser, parsed_alias_args, - parsed_globals): + def _update_parsed_globals( + self, arg_parser, parsed_alias_args, parsed_globals + ): global_params_to_update = self._get_global_parameters_to_update( - arg_parser, parsed_alias_args) + arg_parser, parsed_alias_args + ) # Emit the top level args parsed event to ensure all possible # customizations that typically get applied are applied to the # global parameters provided in the alias before updating @@ -288,16 +301,24 @@ def _get_global_parameters_to_update(self, arg_parser, parsed_alias_args): if parsed_param in self.UNSUPPORTED_GLOBAL_PARAMETERS: raise InvalidAliasException( 'Global parameter "--%s" detected in alias "%s" ' - 'which is not support in subcommand aliases.' % ( - parsed_param, self._alias_name)) + 'which is not support in subcommand aliases.' + % (parsed_param, self._alias_name) + ) else: global_params_to_update.append(parsed_param) return global_params_to_update class ServiceAliasCommand(BaseInternalAliasCommand): - def __init__(self, alias_name, alias_value, session, command_table, - parser, shadow_proxy_command=None): + def __init__( + self, + alias_name, + alias_value, + session, + command_table, + parser, + shadow_proxy_command=None, + ): """Command for a `toplevel` subcommand alias :type alias_name: string @@ -329,7 +350,8 @@ def __init__(self, alias_name, alias_value, session, command_table, table """ super(ServiceAliasCommand, self).__init__( - alias_name, alias_value, session) + alias_name, alias_value, session + ) self._command_table = command_table self._parser = parser self._shadow_proxy_command = shadow_proxy_command @@ -337,15 +359,20 @@ def __init__(self, alias_name, alias_value, session, command_table, def __call__(self, args, parsed_globals): alias_args = self._get_alias_args() parsed_alias_args, remaining = self._parser.parse_known_args( - alias_args) - self._update_parsed_globals(self._parser, parsed_alias_args, - parsed_globals) + alias_args + ) + self._update_parsed_globals( + self._parser, parsed_alias_args, parsed_globals + ) # Take any of the remaining arguments that were not parsed out and # prepend them to the remaining args provided to the alias. remaining.extend(args) LOG.debug( 'Alias %r passing on arguments: %r to %r command', - self._alias_name, remaining, parsed_alias_args.command) + self._alias_name, + remaining, + parsed_alias_args.command, + ) # Pass the update remaining args and global args to the service command # the alias proxied to. command = self._command_table[parsed_alias_args.command] @@ -356,9 +383,9 @@ def __call__(self, args, parsed_globals): # a built-in command. if shadow_name == parsed_alias_args.command: LOG.debug( - 'Using shadowed command object: %s ' - 'for alias: %s', self._shadow_proxy_command, - self._alias_name + 'Using shadowed command object: %s ' 'for alias: %s', + self._shadow_proxy_command, + self._alias_name, ) command = self._shadow_proxy_command return command(remaining, parsed_globals) @@ -386,32 +413,40 @@ def __init__(self, alias_name, alias_value, invoker=subprocess.call): self._invoker = invoker def __call__(self, args, parsed_globals): - command_components = [ - self._alias_value[1:] - ] + command_components = [self._alias_value[1:]] command_components.extend(compat_shell_quote(a) for a in args) command = ' '.join(command_components) LOG.debug( 'Using external alias %r with value: %r to run: %r', - self._alias_name, self._alias_value, command) + self._alias_name, + self._alias_value, + command, + ) return self._invoker(command, shell=True) class InternalAliasSubCommand(BaseInternalAliasCommand): - - def __init__(self, alias_name, alias_value, command_object, - global_args_parser, session, - proxied_sub_command=None): + def __init__( + self, + alias_name, + alias_value, + command_object, + global_args_parser, + session, + proxied_sub_command=None, + ): super(InternalAliasSubCommand, self).__init__( - alias_name, alias_value, session) + alias_name, alias_value, session + ) self._command_object = command_object self._global_args_parser = global_args_parser self._proxied_sub_command = proxied_sub_command def _process_global_args(self, arg_parser, alias_args, parsed_globals): globally_parseable_args = [parsed_globals.command] + alias_args - alias_globals, remaining = arg_parser\ - .parse_known_args(globally_parseable_args) + alias_globals, remaining = arg_parser.parse_known_args( + globally_parseable_args + ) self._update_parsed_globals(arg_parser, alias_globals, parsed_globals) return remaining @@ -429,7 +464,8 @@ def __call__(self, args, parsed_globals): # embedded as part of the alias value (i.e defined in the alias file) alias_args = self._get_alias_args() cmd_specific_args = self._process_global_args( - self._global_args_parser, alias_args, parsed_globals) + self._global_args_parser, alias_args, parsed_globals + ) cmd_specific_args.extend(args) if self._proxied_sub_command is not None: # If we overwrote an existing command, we just delegate to that @@ -438,8 +474,10 @@ def __call__(self, args, parsed_globals): # command so we remove that value before delegating to the # proxied command. cmd_specific_args = cmd_specific_args[1:] - LOG.debug("Delegating to proxy sub-command with new alias " - "args: %s", alias_args) + LOG.debug( + "Delegating to proxy sub-command with new alias " "args: %s", + alias_args, + ) return self._proxied_sub_command(cmd_specific_args, parsed_globals) else: return self._command_object(cmd_specific_args, parsed_globals) diff --git a/awscli/argparser.py b/awscli/argparser.py index 31bdfa1ccb06..2a7f57e35fd8 100644 --- a/awscli/argparser.py +++ b/awscli/argparser.py @@ -15,7 +15,6 @@ import sys from difflib import get_close_matches - HELP_BLURB = ( "To see help text, you can run:\n" "\n" @@ -40,6 +39,7 @@ class CommandAction(argparse.Action): are dynamically retrieved from the keys of the referenced command table """ + def __init__(self, option_strings, dest, command_table, **kwargs): self.command_table = command_table super(CommandAction, self).__init__( @@ -78,9 +78,9 @@ def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: msg = ['Invalid choice, valid choices are:\n'] - for i in range(len(action.choices))[::self.ChoicesPerLine]: + for i in range(len(action.choices))[:: self.ChoicesPerLine]: current = [] - for choice in action.choices[i:i+self.ChoicesPerLine]: + for choice in action.choices[i : i + self.ChoicesPerLine]: current.append('%-40s' % choice) msg.append(' | '.join(current)) possible = get_close_matches(value, action.choices, cutoff=0.8) @@ -92,7 +92,9 @@ def _check_value(self, action, value): raise argparse.ArgumentError(action, '\n'.join(msg)) def parse_known_args(self, args, namespace=None): - parsed, remaining = super(CLIArgParser, self).parse_known_args(args, namespace) + parsed, remaining = super(CLIArgParser, self).parse_known_args( + args, namespace + ) terminal_encoding = getattr(sys.stdin, 'encoding', 'utf-8') if terminal_encoding is None: # In some cases, sys.stdin won't have an encoding set, @@ -131,15 +133,22 @@ def error(self, message): class MainArgParser(CLIArgParser): Formatter = argparse.RawTextHelpFormatter - def __init__(self, command_table, version_string, - description, argument_table, prog=None): + def __init__( + self, + command_table, + version_string, + description, + argument_table, + prog=None, + ): super(MainArgParser, self).__init__( formatter_class=self.Formatter, add_help=False, conflict_handler='resolve', description=description, usage=USAGE, - prog=prog) + prog=prog, + ) self._build(command_table, version_string, argument_table) def _create_choice_help(self, choices): @@ -152,27 +161,32 @@ def _build(self, command_table, version_string, argument_table): for argument_name in argument_table: argument = argument_table[argument_name] argument.add_to_parser(self) - self.add_argument('--version', action="version", - version=version_string, - help='Display the version of this tool') - self.add_argument('command', action=CommandAction, - command_table=command_table) + self.add_argument( + '--version', + action="version", + version=version_string, + help='Display the version of this tool', + ) + self.add_argument( + 'command', action=CommandAction, command_table=command_table + ) class ServiceArgParser(CLIArgParser): - def __init__(self, operations_table, service_name): super(ServiceArgParser, self).__init__( formatter_class=argparse.RawTextHelpFormatter, add_help=False, conflict_handler='resolve', - usage=USAGE) + usage=USAGE, + ) self._build(operations_table) self._service_name = service_name def _build(self, operations_table): - self.add_argument('operation', action=CommandAction, - command_table=operations_table) + self.add_argument( + 'operation', action=CommandAction, command_table=operations_table + ) class ArgTableArgParser(CLIArgParser): @@ -186,7 +200,8 @@ def __init__(self, argument_table, command_table=None): formatter_class=self.Formatter, add_help=False, usage=USAGE, - conflict_handler='resolve') + conflict_handler='resolve', + ) if command_table is None: command_table = {} self._build(argument_table, command_table) @@ -196,8 +211,12 @@ def _build(self, argument_table, command_table): argument = argument_table[arg_name] argument.add_to_parser(self) if command_table: - self.add_argument('subcommand', action=CommandAction, - command_table=command_table, nargs='?') + self.add_argument( + 'subcommand', + action=CommandAction, + command_table=command_table, + nargs='?', + ) def parse_known_args(self, args, namespace=None): if len(args) == 1 and args[0] == 'help': @@ -206,7 +225,8 @@ def parse_known_args(self, args, namespace=None): return namespace, [] else: return super(ArgTableArgParser, self).parse_known_args( - args, namespace) + args, namespace + ) class SubCommandArgParser(ArgTableArgParser): @@ -220,7 +240,8 @@ class SubCommandArgParser(ArgTableArgParser): def parse_known_args(self, args, namespace=None): parsed_args, remaining = super( - SubCommandArgParser, self).parse_known_args(args, namespace) + SubCommandArgParser, self + ).parse_known_args(args, namespace) if getattr(parsed_args, 'subcommand', None) is not None: new_args = self._remove_subcommand(args, parsed_args) return new_args, parsed_args.subcommand @@ -256,14 +277,17 @@ def _build(self, argument_table, command_table): # fail if any of the required args aren't provided. We don't # want to mutate the arg table that's provided to us, so we # make a copy of it and then set all the required to not required. - non_required_arg_table = self._non_required_arg_table( - argument_table) + non_required_arg_table = self._non_required_arg_table(argument_table) for arg_name in non_required_arg_table: argument = non_required_arg_table[arg_name] argument.add_to_parser(self) if command_table: - self.add_argument('subcommand', action=CommandAction, - command_table=command_table, nargs='?') + self.add_argument( + 'subcommand', + action=CommandAction, + command_table=command_table, + nargs='?', + ) def _non_required_arg_table(self, argument_table): arg_table_copy = {} diff --git a/awscli/argprocess.py b/awscli/argprocess.py index 74d0ed9011d7..9454225f3313 100644 --- a/awscli/argprocess.py +++ b/awscli/argprocess.py @@ -11,17 +11,17 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Module for processing CLI args.""" -import os -import logging -from botocore.compat import OrderedDict, json +import logging +import os -from awscli import SCALAR_TYPES, COMPLEX_TYPES -from awscli import shorthand +from awscli import COMPLEX_TYPES, SCALAR_TYPES, shorthand from awscli.utils import ( - find_service_and_method_in_event_name, is_document_type, - is_document_type_container + find_service_and_method_in_event_name, + is_document_type, + is_document_type_container, ) +from botocore.compat import OrderedDict, json from botocore.utils import is_json_value_header LOG = logging.getLogger('awscli.argprocess') @@ -40,8 +40,7 @@ def __init__(self, cli_name, message): :param message: The error message to display to the user. """ - full_message = ("Error parsing parameter '%s': %s" % - (cli_name, message)) + full_message = "Error parsing parameter '%s': %s" % (cli_name, message) super(ParamError, self).__init__(full_message) self.cli_name = cli_name self.message = message @@ -54,9 +53,10 @@ class ParamSyntaxError(Exception): class ParamUnknownKeyError(Exception): def __init__(self, key, valid_keys): valid_keys = ', '.join(valid_keys) - full_message = ( - "Unknown key '%s', valid choices " - "are: %s" % (key, valid_keys)) + full_message = "Unknown key '%s', valid choices " "are: %s" % ( + key, + valid_keys, + ) super(ParamUnknownKeyError, self).__init__(full_message) @@ -64,7 +64,9 @@ class TooComplexError(Exception): pass -def unpack_argument(session, service_name, operation_name, cli_argument, value): +def unpack_argument( + session, service_name, operation_name, cli_argument, value +): """ Unpack an argument's value from the commandline. This is part one of a two step process in handling commandline arguments. Emits the load-cli-arg @@ -76,11 +78,12 @@ def unpack_argument(session, service_name, operation_name, cli_argument, value): param_name = getattr(cli_argument, 'name', 'anonymous') value_override = session.emit_first_non_none_response( - 'load-cli-arg.%s.%s.%s' % (service_name, - operation_name, - param_name), - param=cli_argument, value=value, service_name=service_name, - operation_name=operation_name) + 'load-cli-arg.%s.%s.%s' % (service_name, operation_name, param_name), + param=cli_argument, + value=value, + service_name=service_name, + operation_name=operation_name, + ) if value_override is not None: value = value_override @@ -102,8 +105,10 @@ def _detect_shape_structure(param, stack): if param.type_name in SCALAR_TYPES: return 'scalar' elif param.type_name == 'structure': - sub_types = [_detect_shape_structure(p, stack) - for p in param.members.values()] + sub_types = [ + _detect_shape_structure(p, stack) + for p in param.members.values() + ] # We're distinguishing between structure(scalar) # and structure(scalars), because for the case of # a single scalar in a structure we can simplify @@ -141,29 +146,31 @@ def unpack_cli_arg(cli_argument, value): :return: The "unpacked" argument than can be sent to the `Operation` object in python. """ - return _unpack_cli_arg(cli_argument.argument_model, value, - cli_argument.cli_name) + return _unpack_cli_arg( + cli_argument.argument_model, value, cli_argument.cli_name + ) def _special_type(model): # check if model is jsonvalue header and that value is serializable - if model.serialization.get('jsonvalue') and \ - model.serialization.get('location') == 'header' and \ - model.type_name == 'string': + if ( + model.serialization.get('jsonvalue') + and model.serialization.get('location') == 'header' + and model.type_name == 'string' + ): return True return False def _unpack_cli_arg(argument_model, value, cli_name): - if is_json_value_header(argument_model) or \ - is_document_type(argument_model): + if is_json_value_header(argument_model) or is_document_type( + argument_model + ): return _unpack_json_cli_arg(argument_model, value, cli_name) elif argument_model.type_name in SCALAR_TYPES: - return unpack_scalar_cli_arg( - argument_model, value, cli_name) + return unpack_scalar_cli_arg(argument_model, value, cli_name) elif argument_model.type_name in COMPLEX_TYPES: - return _unpack_complex_cli_arg( - argument_model, value, cli_name) + return _unpack_complex_cli_arg(argument_model, value, cli_name) else: return str(value) @@ -173,8 +180,8 @@ def _unpack_json_cli_arg(argument_model, value, cli_name): return json.loads(value, object_pairs_hook=OrderedDict) except ValueError as e: raise ParamError( - cli_name, "Invalid JSON: %s\nJSON received: %s" - % (e, value)) + cli_name, "Invalid JSON: %s\nJSON received: %s" % (e, value) + ) def _unpack_complex_cli_arg(argument_model, value, cli_name): @@ -198,8 +205,9 @@ def _unpack_complex_cli_arg(argument_model, value, cli_name): # 2. It's possible this is a list of json objects: # --filters '{"Name": ..}' '{"Name": ...}' member_shape_model = argument_model.member - return [_unpack_cli_arg(member_shape_model, v, cli_name) - for v in value] + return [ + _unpack_cli_arg(member_shape_model, v, cli_name) for v in value + ] except (ValueError, TypeError) as e: # The list params don't have a name/cli_name attached to them # so they will have bad error messages. We're going to @@ -211,13 +219,21 @@ def _unpack_complex_cli_arg(argument_model, value, cli_name): def unpack_scalar_cli_arg(argument_model, value, cli_name=''): # Note the cli_name is used strictly for error reporting. It's # not required to use unpack_scalar_cli_arg - if argument_model.type_name == 'integer' or argument_model.type_name == 'long': + if ( + argument_model.type_name == 'integer' + or argument_model.type_name == 'long' + ): return int(value) - elif argument_model.type_name == 'float' or argument_model.type_name == 'double': + elif ( + argument_model.type_name == 'float' + or argument_model.type_name == 'double' + ): # TODO: losing precision on double types return float(value) - elif argument_model.type_name == 'blob' and \ - argument_model.serialization.get('streaming'): + elif ( + argument_model.type_name == 'blob' + and argument_model.serialization.get('streaming') + ): file_path = os.path.expandvars(value) file_path = os.path.expanduser(file_path) if not os.path.isfile(file_path): @@ -257,7 +273,6 @@ def _is_complex_shape(model): class ParamShorthand(object): - def _uses_old_list_case(self, command_name, operation_name, argument_name): """ Determines whether a given operation for a service needs to use the @@ -265,27 +280,24 @@ def _uses_old_list_case(self, command_name, operation_name, argument_name): a single member. """ cases = { - 'firehose': { - 'put-record-batch': ['records'] - }, + 'firehose': {'put-record-batch': ['records']}, 'workspaces': { 'reboot-workspaces': ['reboot-workspace-requests'], 'rebuild-workspaces': ['rebuild-workspace-requests'], - 'terminate-workspaces': ['terminate-workspace-requests'] + 'terminate-workspaces': ['terminate-workspace-requests'], }, 'elb': { 'remove-tags': ['tags'], 'describe-instance-health': ['instances'], 'deregister-instances-from-load-balancer': ['instances'], - 'register-instances-with-load-balancer': ['instances'] - } + 'register-instances-with-load-balancer': ['instances'], + }, } cases = cases.get(command_name, {}).get(operation_name, []) return argument_name in cases class ParamShorthandParser(ParamShorthand): - def __init__(self): self._parser = shorthand.ShorthandParser() self._visitor = shorthand.BackCompatVisitor() @@ -321,18 +333,21 @@ def __call__(self, cli_argument, value, event_name, **kwargs): if not self._should_parse_as_shorthand(cli_argument, value): return else: - command_name, operation_name = \ + command_name, operation_name = ( find_service_and_method_in_event_name(event_name) + ) return self._parse_as_shorthand( - cli_argument, value, command_name, operation_name) + cli_argument, value, command_name, operation_name + ) - def _parse_as_shorthand(self, cli_argument, value, command_name, - operation_name): + def _parse_as_shorthand( + self, cli_argument, value, command_name, operation_name + ): try: - LOG.debug("Parsing param %s as shorthand", - cli_argument.cli_name) + LOG.debug("Parsing param %s as shorthand", cli_argument.cli_name) handled_value = self._handle_special_cases( - cli_argument, value, command_name, operation_name) + cli_argument, value, command_name, operation_name + ) if handled_value is not None: return handled_value if isinstance(value, list): @@ -357,15 +372,20 @@ def _parse_as_shorthand(self, cli_argument, value, command_name, raise ParamError(cli_argument.cli_name, str(e)) return parsed - def _handle_special_cases(self, cli_argument, value, command_name, - operation_name): + def _handle_special_cases( + self, cli_argument, value, command_name, operation_name + ): # We need to handle a few special cases that the previous # parser handled in order to stay backwards compatible. model = cli_argument.argument_model - if model.type_name == 'list' and \ - model.member.type_name == 'structure' and \ - len(model.member.members) == 1 and \ - self._uses_old_list_case(command_name, operation_name, cli_argument.name): + if ( + model.type_name == 'list' + and model.member.type_name == 'structure' + and len(model.member.members) == 1 + and self._uses_old_list_case( + command_name, operation_name, cli_argument.name + ) + ): # First special case is handling a list of structures # of a single element such as: # @@ -378,11 +398,13 @@ def _handle_special_cases(self, cli_argument, value, command_name, key_name = list(model.member.members.keys())[0] new_values = [{key_name: v} for v in value] return new_values - elif model.type_name == 'structure' and \ - len(model.members) == 1 and \ - 'Value' in model.members and \ - model.members['Value'].type_name == 'string' and \ - '=' not in value: + elif ( + model.type_name == 'structure' + and len(model.members) == 1 + and 'Value' in model.members + and model.members['Value'].type_name == 'string' + and '=' not in value + ): # Second special case is where a structure of a single # value whose member name is "Value" can be specified # as: @@ -401,9 +423,13 @@ def _should_parse_as_shorthand(self, cli_argument, value): else: check_val = value if isinstance(check_val, str) and check_val.strip().startswith( - ('[', '{')): - LOG.debug("Param %s looks like JSON, not considered for " - "param shorthand.", cli_argument.py_name) + ('[', '{') + ): + LOG.debug( + "Param %s looks like JSON, not considered for " + "param shorthand.", + cli_argument.py_name, + ) return False model = cli_argument.argument_model return _supports_shorthand_syntax(model) @@ -421,8 +447,9 @@ def supports_shorthand(self, argument_model): return _supports_shorthand_syntax(argument_model) return False - def generate_shorthand_example(self, cli_argument, command_name, - operation_name): + def generate_shorthand_example( + self, cli_argument, command_name, operation_name + ): """Generate documentation for a CLI argument. :type cli_argument: awscli.arguments.BaseCLIArgument @@ -437,7 +464,8 @@ def generate_shorthand_example(self, cli_argument, command_name, """ docstring = self._handle_special_cases( - cli_argument, command_name, operation_name) + cli_argument, command_name, operation_name + ) if docstring is self._DONT_DOC: return None elif docstring: @@ -455,24 +483,35 @@ def generate_shorthand_example(self, cli_argument, command_name, except TooComplexError: return '' - def _handle_special_cases(self, cli_argument, command_name, operation_name): + def _handle_special_cases( + self, cli_argument, command_name, operation_name + ): model = cli_argument.argument_model - if model.type_name == 'list' and \ - model.member.type_name == 'structure' and \ - len(model.member.members) == 1 and \ - self._uses_old_list_case( - command_name, operation_name, cli_argument.name): + if ( + model.type_name == 'list' + and model.member.type_name == 'structure' + and len(model.member.members) == 1 + and self._uses_old_list_case( + command_name, operation_name, cli_argument.name + ) + ): member_name = list(model.member.members)[0] # Handle special case where the min/max is exactly one. metadata = model.metadata if metadata.get('min') == 1 and metadata.get('max') == 1: return '%s %s1' % (cli_argument.cli_name, member_name) - return '%s %s1 %s2 %s3' % (cli_argument.cli_name, member_name, - member_name, member_name) - elif model.type_name == 'structure' and \ - len(model.members) == 1 and \ - 'Value' in model.members and \ - model.members['Value'].type_name == 'string': + return '%s %s1 %s2 %s3' % ( + cli_argument.cli_name, + member_name, + member_name, + member_name, + ) + elif ( + model.type_name == 'structure' + and len(model.members) == 1 + and 'Value' in model.members + and model.members['Value'].type_name == 'string' + ): return self._DONT_DOC return '' diff --git a/awscli/arguments.py b/awscli/arguments.py index 4cb1b291d267..72e6a2f4475c 100644 --- a/awscli/arguments.py +++ b/awscli/arguments.py @@ -36,15 +36,13 @@ user input and maps the input value to several API parameters. """ -import logging -from botocore import xform_name -from botocore.hooks import first_non_none_response +import logging from awscli.argprocess import unpack_cli_arg from awscli.schema import SchemaTransformer -from botocore import model - +from botocore import model, xform_name +from botocore.hooks import first_non_none_response LOG = logging.getLogger('awscli.arguments') @@ -203,11 +201,24 @@ class CustomArgument(BaseCLIArgument): """ - def __init__(self, name, help_text='', dest=None, default=None, - action=None, required=None, choices=None, nargs=None, - cli_type_name=None, group_name=None, positional_arg=False, - no_paramfile=False, argument_model=None, synopsis='', - const=None): + def __init__( + self, + name, + help_text='', + dest=None, + default=None, + action=None, + required=None, + choices=None, + nargs=None, + cli_type_name=None, + group_name=None, + positional_arg=False, + no_paramfile=False, + argument_model=None, + synopsis='', + const=None, + ): self._name = name self._help = help_text self._dest = dest @@ -235,8 +246,10 @@ def __init__(self, name, help_text='', dest=None, default=None, # If the top level element is a list then set nargs to # accept multiple values seperated by a space. - if self.argument_model is not None and \ - self.argument_model.type_name == 'list': + if ( + self.argument_model is not None + and self.argument_model.type_name == 'list' + ): self._nargs = '+' def _create_scalar_argument_model(self): @@ -337,9 +350,7 @@ def nargs(self): class CLIArgument(BaseCLIArgument): - """Represents a CLI argument that maps to a service parameter. - - """ + """Represents a CLI argument that maps to a service parameter.""" TYPE_MAP = { 'structure': str, @@ -352,12 +363,18 @@ class CLIArgument(BaseCLIArgument): 'long': int, 'boolean': bool, 'double': float, - 'blob': str + 'blob': str, } - def __init__(self, name, argument_model, operation_model, - event_emitter, is_required=False, - serialized_name=None): + def __init__( + self, + name, + argument_model, + operation_model, + event_emitter, + is_required=False, + serialized_name=None, + ): """ :type name: str @@ -433,7 +450,8 @@ def add_to_parser(self, parser): cli_name, help=self.documentation, type=self.cli_type, - required=self.required) + required=self.required, + ) def add_to_params(self, parameters, value): if value is None: @@ -451,16 +469,23 @@ def add_to_params(self, parameters, value): # below. Sometimes this can be more complicated, and subclasses # can customize as they need. unpacked = self._unpack_argument(value) - LOG.debug('Unpacked value of %r for parameter "%s": %r', value, - self.py_name, unpacked) + LOG.debug( + 'Unpacked value of %r for parameter "%s": %r', + value, + self.py_name, + unpacked, + ) parameters[self._serialized_name] = unpacked def _unpack_argument(self, value): service_name = self._operation_model.service_model.service_name operation_name = xform_name(self._operation_model.name, '-') - override = self._emit_first_response('process-cli-arg.%s.%s' % ( - service_name, operation_name), param=self.argument_model, - cli_argument=self, value=value) + override = self._emit_first_response( + 'process-cli-arg.%s.%s' % (service_name, operation_name), + param=self.argument_model, + cli_argument=self, + value=value, + ) if override is not None: # A plugin supplied an alternate conversion, # use it instead. @@ -478,17 +503,18 @@ def _emit_first_response(self, name, **kwargs): class ListArgument(CLIArgument): - @property def nargs(self): return '*' def add_to_parser(self, parser): cli_name = self.cli_name - parser.add_argument(cli_name, - nargs=self.nargs, - type=self.cli_type, - required=self.required) + parser.add_argument( + cli_name, + nargs=self.nargs, + type=self.cli_type, + required=self.required, + ) class BooleanArgument(CLIArgument): @@ -508,17 +534,27 @@ class BooleanArgument(CLIArgument): """ - def __init__(self, name, argument_model, operation_model, - event_emitter, - is_required=False, action='store_true', dest=None, - group_name=None, default=None, - serialized_name=None): - super(BooleanArgument, self).__init__(name, - argument_model, - operation_model, - event_emitter, - is_required, - serialized_name=serialized_name) + def __init__( + self, + name, + argument_model, + operation_model, + event_emitter, + is_required=False, + action='store_true', + dest=None, + group_name=None, + default=None, + serialized_name=None, + ): + super(BooleanArgument, self).__init__( + name, + argument_model, + operation_model, + event_emitter, + is_required, + serialized_name=serialized_name, + ) self._mutex_group = None self._action = action if dest is None: @@ -549,18 +585,25 @@ def add_to_arg_table(self, argument_table): argument_table[self.name] = self negative_name = 'no-%s' % self.name negative_version = self.__class__( - negative_name, self.argument_model, - self._operation_model, self._event_emitter, - action='store_false', dest=self._destination, - group_name=self.group_name, serialized_name=self._serialized_name) + negative_name, + self.argument_model, + self._operation_model, + self._event_emitter, + action='store_false', + dest=self._destination, + group_name=self.group_name, + serialized_name=self._serialized_name, + ) argument_table[negative_name] = negative_version def add_to_parser(self, parser): - parser.add_argument(self.cli_name, - help=self.documentation, - action=self._action, - default=self._default, - dest=self._destination) + parser.add_argument( + self.cli_name, + help=self.documentation, + action=self._action, + default=self._default, + dest=self._destination, + ) @property def group_name(self): diff --git a/awscli/autocomplete/__init__.py b/awscli/autocomplete/__init__.py index 61e108c38e7f..50f76da3d69b 100644 --- a/awscli/autocomplete/__init__.py +++ b/awscli/autocomplete/__init__.py @@ -40,21 +40,24 @@ class LazyClientCreator(object): a client. This class manages this process. """ - def __init__(self, - import_name='awscli.clidriver.create_clidriver'): + + def __init__(self, import_name='awscli.clidriver.create_clidriver'): self._import_name = import_name self._session_cache = {} - def create_client(self, service_name, parsed_region=None, - parsed_profile=None, **kwargs): + def create_client( + self, service_name, parsed_region=None, parsed_profile=None, **kwargs + ): if self._session_cache.get(parsed_profile) is None: session = self.create_session() session.set_config_variable('profile', parsed_profile) self._session_cache[parsed_profile] = session self._session_cache[parsed_profile].set_config_variable( - 'region', parsed_region) + 'region', parsed_region + ) return self._session_cache[parsed_profile].create_client( - service_name, **kwargs) + service_name, **kwargs + ) def create_session(self): return lazy_call(self._import_name).session diff --git a/awscli/autocomplete/autogen.py b/awscli/autocomplete/autogen.py index 5ac01d266ecb..6f5792399a22 100644 --- a/awscli/autocomplete/autogen.py +++ b/awscli/autocomplete/autogen.py @@ -5,14 +5,22 @@ It can also be used to regen completion data as new heuristics are added. """ + import logging +from collections import defaultdict, namedtuple from difflib import SequenceMatcher -from collections import namedtuple, defaultdict - LOG = logging.getLogger(__name__) -Resource = namedtuple('Resource', ['resource_name', 'ident_name', - 'input_parameters', 'operation', 'jp_expr']) +Resource = namedtuple( + 'Resource', + [ + 'resource_name', + 'ident_name', + 'input_parameters', + 'operation', + 'jp_expr', + ], +) class ServerCompletionHeuristic(object): @@ -26,8 +34,9 @@ def __init__(self, singularize=None): singularize = BasicSingularize() self._singularize = singularize - def generate_completion_descriptions(self, service_model, - prune_completions=True): + def generate_completion_descriptions( + self, service_model, prune_completions=True + ): """ :param service_model: A botocore.model.ServiceModel. @@ -48,10 +57,13 @@ def generate_completion_descriptions(self, service_model, if op_name.lower().startswith(self._RESOURCE_VERB_PREFIX): candidates.append(op_name) all_resources = self._generate_resource_descriptions( - candidates, service_model) + candidates, service_model + ) all_operations = self._generate_operations( self._filter_operation_names(service_model.operation_names), - all_resources, service_model) + all_resources, + service_model, + ) if prune_completions: self._prune_resource_identifiers(all_resources, all_operations) return { @@ -61,14 +73,18 @@ def generate_completion_descriptions(self, service_model, } def _filter_operation_names(self, op_names): - return [name for name in op_names - if not name.lower().startswith(self._OPERATION_EXCLUDES)] + return [ + name + for name in op_names + if not name.lower().startswith(self._OPERATION_EXCLUDES) + ] def _generate_resource_descriptions(self, candidates, service_model): all_resources = {} for op_name in candidates: resources = self._resource_for_single_operation( - op_name, service_model) + op_name, service_model + ) if resources is not None: for resource in resources: self._inject_resource(all_resources, resource) @@ -97,15 +113,17 @@ def _generate_operations(self, op_names, resources, service_model): ) return op_map - def _add_completion_data_for_operation(self, op_map, op_name, - service_model, reverse_mapping): + def _add_completion_data_for_operation( + self, op_map, op_name, service_model, reverse_mapping + ): op_model = service_model.operation_model(op_name) input_shape = op_model.input_shape if not input_shape: return for member in input_shape.members: member_name = self._find_matching_member_name( - member, reverse_mapping) + member, reverse_mapping + ) if member_name is None: continue resource_name = self._find_matching_op_name( @@ -114,9 +132,11 @@ def _add_completion_data_for_operation(self, op_map, op_name, op = op_map.setdefault(op_name, {}) param = op.setdefault(member, {}) param['completions'] = [ - {'parameters': {}, - 'resourceName': resource_name, - 'resourceIdentifier': member_name} + { + 'parameters': {}, + 'resourceName': resource_name, + 'resourceIdentifier': member_name, + } ] def _find_matching_op_name(self, op_name, candidates): @@ -139,9 +159,7 @@ def _find_matching_op_name(self, op_name, candidates): matcher.set_seq1(candidate) match_ratio = matcher.ratio() matching_score.append((match_ratio, candidate)) - return sorted( - matching_score, key=lambda x: x[0], reverse=True - )[0][1] + return sorted(matching_score, key=lambda x: x[0], reverse=True)[0][1] def _find_matching_member_name(self, member, reverse_mapping): # Try to find something in the reverse mapping that's close @@ -174,11 +192,18 @@ def _resource_for_single_operation(self, op_name, service_model): # conventions. op_model = service_model.operation_model(op_name) output = op_model.output_shape - list_members = [member for member, shape in output.members.items() - if shape.type_name == 'list'] + list_members = [ + member + for member, shape in output.members.items() + if shape.type_name == 'list' + ] if len(list_members) != 1: - LOG.debug("Operation does not have exactly one list member, " - "skipping: %s (%s)", op_name, list_members) + LOG.debug( + "Operation does not have exactly one list member, " + "skipping: %s (%s)", + op_name, + list_members, + ) return resource_member_name = list_members[0] list_member = output.members[resource_member_name].member @@ -187,52 +212,64 @@ def _resource_for_single_operation(self, op_name, service_model): required_members = op_model.input_shape.required_members if list_member.type_name == 'structure': return self._resource_from_structure( - op_name, resource_member_name, list_member, required_members) + op_name, resource_member_name, list_member, required_members + ) elif list_member.type_name == 'string': - return [self._resource_from_string( - op_name, resource_member_name, required_members, - )] + return [ + self._resource_from_string( + op_name, + resource_member_name, + required_members, + ) + ] - def _resource_from_structure(self, op_name, - resource_member_name, list_member, - required_members): + def _resource_from_structure( + self, op_name, resource_member_name, list_member, required_members + ): op_with_prefix_removed = self._remove_verb_prefix(op_name) - singular_name = self._singularize.make_singular( - op_with_prefix_removed) + singular_name = self._singularize.make_singular(op_with_prefix_removed) resources = [] for member_name in list_member.members: - jp_expr = ( - '{resource_member_name}[].{member_name}').format( - resource_member_name=resource_member_name, - member_name=member_name) - r = Resource(singular_name, member_name, required_members, - op_name, jp_expr) + jp_expr = ('{resource_member_name}[].{member_name}').format( + resource_member_name=resource_member_name, + member_name=member_name, + ) + r = Resource( + singular_name, member_name, required_members, op_name, jp_expr + ) resources.append(r) return resources - def _resource_from_string(self, op_name, resource_member_name, - required_members): + def _resource_from_string( + self, op_name, resource_member_name, required_members + ): op_with_prefix_removed = self._remove_verb_prefix(op_name) - singular_name = self._singularize.make_singular( - op_with_prefix_removed) + singular_name = self._singularize.make_singular(op_with_prefix_removed) singular_member_name = self._singularize.make_singular( - resource_member_name) - r = Resource(singular_name, singular_member_name, required_members, - op_name, - '{resource_member_name}[]'.format( - resource_member_name=resource_member_name)) + resource_member_name + ) + r = Resource( + singular_name, + singular_member_name, + required_members, + op_name, + '{resource_member_name}[]'.format( + resource_member_name=resource_member_name + ), + ) return r def _remove_verb_prefix(self, op_name): for prefix in self._RESOURCE_VERB_PREFIX: # 'ListResources' -> 'Resources' if op_name.lower().startswith(prefix): - op_with_prefix_removed = op_name[len(prefix):] + op_with_prefix_removed = op_name[len(prefix) :] return op_with_prefix_removed def _prune_resource_identifiers(self, all_resources, all_operations): used_identifiers = self._get_identifiers_referenced_by_operations( - all_operations) + all_operations + ) for resource, resource_data in list(all_resources.items()): identifiers = resource_data['resourceIdentifier'] known_ids_for_resource = used_identifiers.get(resource, set()) @@ -249,7 +286,8 @@ def _get_identifiers_referenced_by_operations(self, operations): used_identifiers = {} for completion in self._all_completions(operations): used_identifiers.setdefault(completion['resourceName'], set()).add( - completion['resourceIdentifier']) + completion['resourceIdentifier'] + ) return used_identifiers def _all_completions(self, operations): diff --git a/awscli/autocomplete/completer.py b/awscli/autocomplete/completer.py index ed1baf853046..76dc97d8ec8c 100644 --- a/awscli/autocomplete/completer.py +++ b/awscli/autocomplete/completer.py @@ -19,6 +19,7 @@ class AutoCompleter(object): completions for specific cases (e.g model-based completions, server-side completions, etc). """ + def __init__(self, parser, completers): """ @@ -54,8 +55,16 @@ class CompletionResult(object): stores metadata about the completion. """ - def __init__(self, name, starting_index=0, required=False, - cli_type_name='', help_text='', display_text=None): + + def __init__( + self, + name, + starting_index=0, + required=False, + cli_type_name='', + help_text='', + display_text=None, + ): self.name = name self.starting_index = starting_index self.required = required @@ -65,17 +74,22 @@ def __init__(self, name, starting_index=0, required=False, def __eq__(self, other): return ( - isinstance(other, self.__class__) and - self.name == other.name and - self.starting_index == other.starting_index and - self.display_text == other.display_text + isinstance(other, self.__class__) + and self.name == other.name + and self.starting_index == other.starting_index + and self.display_text == other.display_text ) def __repr__(self): - return '%s(%s, %s, %s, %s, %s, %s)' % (self.__class__.__name__, self.name, - self.starting_index, self.required, - self.cli_type_name, self.help_text, - self.display_text) + return '%s(%s, %s, %s, %s, %s, %s)' % ( + self.__class__.__name__, + self.name, + self.starting_index, + self.required, + self.cli_type_name, + self.help_text, + self.display_text, + ) class BaseCompleter(object): diff --git a/awscli/autocomplete/custom.py b/awscli/autocomplete/custom.py index d9ea75c25e3f..6deee16eedff 100644 --- a/awscli/autocomplete/custom.py +++ b/awscli/autocomplete/custom.py @@ -10,8 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.autocomplete.serverside.custom_completers.ddb.autocomplete import add_ddb_completers -from awscli.autocomplete.serverside.custom_completers.logs.autocomplete import add_log_completers +from awscli.autocomplete.serverside.custom_completers.ddb.autocomplete import ( + add_ddb_completers, +) +from awscli.autocomplete.serverside.custom_completers.logs.autocomplete import ( + add_log_completers, +) def get_custom_completers(): diff --git a/awscli/autocomplete/db.py b/awscli/autocomplete/db.py index 860bba04cb4f..3b41f4082468 100644 --- a/awscli/autocomplete/db.py +++ b/awscli/autocomplete/db.py @@ -1,10 +1,9 @@ -import os import logging +import os import sqlite3 from awscli import __version__ as cli_version - LOG = logging.getLogger(__name__) # We may eventually include a pre-generated version of this index as part @@ -14,7 +13,8 @@ INDEX_FILE = os.path.join(INDEX_DIR, '%s.index' % cli_version) BUILTIN_INDEX_FILE = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - 'data', 'ac.index' + 'data', + 'ac.index', ) @@ -34,10 +34,7 @@ def __init__(self, db_filename=None): @property def _connection(self): if self._db_conn is None: - kwargs = { - 'check_same_thread': False, - 'isolation_level': None - } + kwargs = {'check_same_thread': False, 'isolation_level': None} if self._db_filename.startswith('file::memory:'): # This statement was added because old versions of sqlite # don't support 'uri' but we use it for tests and because diff --git a/awscli/autocomplete/filters.py b/awscli/autocomplete/filters.py index 9a614a09dd21..fb2abd77854d 100644 --- a/awscli/autocomplete/filters.py +++ b/awscli/autocomplete/filters.py @@ -37,26 +37,29 @@ def fuzzy_filter(prefix, completions): matches = list(regex.finditer(name)) if matches: # Prefer the match, closest to the left, then shortest. - best = min(matches, key=lambda m: (m.start(), - len(m.group(1)))) - fuzzy_matches.append(_FuzzyMatch( - len(best.group(1)), - best.start(), - completion) + best = min(matches, key=lambda m: (m.start(), len(m.group(1)))) + fuzzy_matches.append( + _FuzzyMatch(len(best.group(1)), best.start(), completion) ) - return [x for _, _, x in sorted( - fuzzy_matches, - key=lambda match: ( - match.match_length, match.start_pos, - match.completion.display_text, - match.completion.name) - )] + return [ + x + for _, _, x in sorted( + fuzzy_matches, + key=lambda match: ( + match.match_length, + match.start_pos, + match.completion.display_text, + match.completion.name, + ), + ) + ] return completions def startswith_filter(prefix, completions): return [ - completion for completion in completions + completion + for completion in completions if (completion.display_text or completion.name).startswith(prefix) ] diff --git a/awscli/autocomplete/generator.py b/awscli/autocomplete/generator.py index 733fd505e546..4b228b49a466 100644 --- a/awscli/autocomplete/generator.py +++ b/awscli/autocomplete/generator.py @@ -11,12 +11,13 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Generates auto completion index.""" + import os +from awscli import clidriver +from awscli.autocomplete import db from awscli.autocomplete.local import indexer from awscli.autocomplete.serverside.indexer import APICallIndexer -from awscli.autocomplete import db -from awscli import clidriver def generate_index(filename): @@ -64,6 +65,7 @@ class IndexGenerator(object): indices. """ + def __init__(self, indexers): self._indexers = indexers diff --git a/awscli/autocomplete/local/basic.py b/awscli/autocomplete/local/basic.py index 10e82704427b..f263411fe499 100644 --- a/awscli/autocomplete/local/basic.py +++ b/awscli/autocomplete/local/basic.py @@ -13,10 +13,9 @@ import os import re -from awscli.autocomplete.completer import BaseCompleter -from awscli.autocomplete.completer import CompletionResult -from awscli.autocomplete.filters import startswith_filter from awscli.autocomplete import LazyClientCreator +from awscli.autocomplete.completer import BaseCompleter, CompletionResult +from awscli.autocomplete.filters import startswith_filter def strip_html_tags_and_newlines(text): @@ -25,8 +24,12 @@ def strip_html_tags_and_newlines(text): class ProfileCompleter(BaseCompleter): - def __init__(self, session=None, response_filter=startswith_filter, - session_creator=None): + def __init__( + self, + session=None, + response_filter=startswith_filter, + session_creator=None, + ): self._session = session self._filter = response_filter self._session_creator = session_creator @@ -34,15 +37,17 @@ def __init__(self, session=None, response_filter=startswith_filter, self._session_creator = LazyClientCreator() def complete(self, parsed): - if parsed.current_param == 'profile' \ - and parsed.current_fragment is not None: + if ( + parsed.current_param == 'profile' + and parsed.current_fragment is not None + ): return self._filter(parsed.current_fragment, self._get_profiles()) def _get_profiles(self): return map( lambda x: CompletionResult(name=x), - self._get_session().available_profiles - ) + self._get_session().available_profiles, + ) def _get_session(self): if self._session is None: @@ -51,8 +56,12 @@ def _get_session(self): class RegionCompleter(BaseCompleter): - def __init__(self, session=None, response_filter=startswith_filter, - session_creator=None): + def __init__( + self, + session=None, + response_filter=startswith_filter, + session_creator=None, + ): self._session = session self._filter = response_filter self._session_creator = session_creator @@ -60,27 +69,28 @@ def __init__(self, session=None, response_filter=startswith_filter, self._session_creator = LazyClientCreator() def complete(self, parsed): - if parsed.current_param == 'region' \ - and parsed.current_fragment is not None: + if ( + parsed.current_param == 'region' + and parsed.current_fragment is not None + ): if len(parsed.lineage) > 1: service_name = parsed.lineage[1] else: service_name = 'ec2' - return self._filter(parsed.current_fragment, - self._get_region_completions(service_name)) + return self._filter( + parsed.current_fragment, + self._get_region_completions(service_name), + ) def _get_region_completions(self, service_name): return map( - lambda x: CompletionResult(name=x), - self._get_regions(service_name) + lambda x: CompletionResult(name=x), self._get_regions(service_name) ) def _get_regions(self, service_name): if self._session is None: self._session = self._session_creator.create_session() - return self._session.get_available_regions( - service_name=service_name - ) + return self._session.get_available_regions(service_name=service_name) class FilePathCompleter(BaseCompleter): @@ -92,16 +102,19 @@ def __init__(self, path_completer=None, response_filter=startswith_filter): def path_completer(self): if self._path_completer is None: from prompt_toolkit.completion import PathCompleter + self._path_completer = PathCompleter(expanduser=True) return self._path_completer def complete(self, parsed): - if parsed.current_fragment and \ - parsed.current_fragment.startswith(('file://', 'fileb://')): + if parsed.current_fragment and parsed.current_fragment.startswith( + ('file://', 'fileb://') + ): from prompt_toolkit.document import Document + for prefix in ['file://', 'fileb://']: if parsed.current_fragment.startswith(prefix): - filename_part = parsed.current_fragment[len(prefix):] + filename_part = parsed.current_fragment[len(prefix) :] break # PathCompleter makes really strange suggestions for lonely ~ # "username/", so in this case we handle it by ourselves @@ -110,29 +123,32 @@ def complete(self, parsed): dirname = os.path.dirname(filename_part) if dirname and dirname != os.sep: dirname = f'{dirname}{os.sep}' - document = Document( - text=dirname, - cursor_position=len(dirname)) + document = Document(text=dirname, cursor_position=len(dirname)) completions = self.path_completer.get_completions(document, None) results = [ CompletionResult( f'{prefix}' f'{os.path.join(dirname, completion.display[0][1])}', - display_text=completion.display[0][1]) - for completion in completions] + display_text=completion.display[0][1], + ) + for completion in completions + ] return self._filter(os.path.basename(filename_part), results) class ModelIndexCompleter(BaseCompleter): - def __init__(self, index, cli_driver_fetcher=None, - response_filter=startswith_filter): + def __init__( + self, index, cli_driver_fetcher=None, response_filter=startswith_filter + ): self._index = index self._cli_driver_fetcher = cli_driver_fetcher self._filter = response_filter def complete(self, parsed): - are_unparsed_items_paths = [bool(re.search('[./\\\\:]|(--)', item)) - for item in parsed.unparsed_items] + are_unparsed_items_paths = [ + bool(re.search('[./\\\\:]|(--)', item)) + for item in parsed.unparsed_items + ] if parsed.unparsed_items and all(are_unparsed_items_paths): # If all the unparsed items are file paths, then we auto-complete # options for the current fragment. This is to provide @@ -146,11 +162,15 @@ def complete(self, parsed): # instead. if not parsed.current_fragment: parsed.current_fragment = parsed.current_command - return self._filter(parsed.current_fragment, - self._complete_options(parsed)) + return self._filter( + parsed.current_fragment, self._complete_options(parsed) + ) - elif parsed.unparsed_items or parsed.current_fragment is None or \ - parsed.current_param: + elif ( + parsed.unparsed_items + or parsed.current_fragment is None + or parsed.current_param + ): # If there's ever any unparsed items, then the parser # encountered something it didn't understand. We won't # attempt to auto-complete anything here. @@ -171,17 +191,18 @@ def complete(self, parsed): # more commands to complete. commands = self._complete_command(parsed) if not commands: - return self._filter(parsed.current_fragment, - self._complete_options(parsed)) + return self._filter( + parsed.current_fragment, self._complete_options(parsed) + ) return self._filter(parsed.current_fragment, commands) def _complete_command(self, parsed): lineage = parsed.lineage + [parsed.current_command] offset = -len(parsed.current_fragment) - result = [CompletionResult(name, - help_text=full_name, - starting_index=offset) - for name, full_name in self._index.commands_with_full_name(lineage)] + result = [ + CompletionResult(name, help_text=full_name, starting_index=offset) + for name, full_name in self._index.commands_with_full_name(lineage) + ] return result def _outfile_filter(self, completion): @@ -194,17 +215,19 @@ def _complete_options(self, parsed): # '--endpoint' -> 'endpoint' offset = -len(parsed.current_fragment) is_in_global_scope = ( - parsed.lineage == [] and - parsed.current_command == 'aws' + parsed.lineage == [] and parsed.current_command == 'aws' ) arg_names = self._index.arg_names( - lineage=parsed.lineage, command_name=parsed.current_command) + lineage=parsed.lineage, command_name=parsed.current_command + ) results = [] if not is_in_global_scope: for arg_name in arg_names: arg_data = self._index.get_argument_data( lineage=parsed.lineage, - command_name=parsed.current_command, arg_name=arg_name) + command_name=parsed.current_command, + arg_name=arg_name, + ) help_text = None if self._cli_driver_fetcher: help_text = strip_html_tags_and_newlines( @@ -212,20 +235,25 @@ def _complete_options(self, parsed): parsed.lineage, parsed.current_command, arg_name ) ) - results.append(self._outfile_filter( - CompletionResult( - '--%s' % arg_name, - starting_index=offset, - required=arg_data.required, - cli_type_name=arg_data.type_name, - help_text=help_text) - ) + results.append( + self._outfile_filter( + CompletionResult( + '--%s' % arg_name, + starting_index=offset, + required=arg_data.required, + cli_type_name=arg_data.type_name, + help_text=help_text, + ) + ) ) # Global params apply to any scope self._inject_global_params(parsed, results) - return [result for result in results - if result.name.strip('--') not in (list(parsed.parsed_params) + - list(parsed.global_params))] + return [ + result + for result in results + if result.name.strip('--') + not in (list(parsed.parsed_params) + list(parsed.global_params)) + ] def _inject_global_params(self, parsed, results): offset = -len(parsed.current_fragment) @@ -240,31 +268,27 @@ def _inject_global_params(self, parsed, results): ) ) global_param_completions.append( - CompletionResult('--%s' % arg_name, - starting_index=offset, - required=False, - cli_type_name=type_name, - help_text=help_text) + CompletionResult( + '--%s' % arg_name, + starting_index=offset, + required=False, + cli_type_name=type_name, + help_text=help_text, + ) ) results.extend(global_param_completions) class ShorthandCompleter(BaseCompleter): - _PARENS = { - "[": "]", - "{": "}" - } + _PARENS = {"[": "]", "{": "}"} _DUMMY_KEY_VALUE = 'cli_placeholder_for_key_value_replacement' _DUMMY_VALUE = 'cli_placeholder_for_value_replacement' _DUMMY_EQ_VALUE = 'cli_placeholder_for_key_eq_replacement' - _VALUE_PREFIXES = { - 'structure': '{', - 'list': '[', - 'map': '{' - } - - def __init__(self, cli_driver_fetcher=None, - response_filter=startswith_filter): + _VALUE_PREFIXES = {'structure': '{', 'list': '[', 'map': '{'} + + def __init__( + self, cli_driver_fetcher=None, response_filter=startswith_filter + ): self._filter = response_filter self._cli_driver_fetcher = cli_driver_fetcher self._shorthand_parser = None @@ -273,6 +297,7 @@ def __init__(self, cli_driver_fetcher=None, def shorthand_parser(self): if self._shorthand_parser is None: from awscli.shorthand import ShorthandParser + self._shorthand_parser = ShorthandParser() return self._shorthand_parser @@ -284,7 +309,8 @@ def complete(self, parsed): ) if arg_model is None: results = self._get_prompt_for_global_arg( - parsed.current_param, parsed.current_fragment) + parsed.current_param, parsed.current_fragment + ) return results parsed_input = self._parse_fragment(parsed.current_fragment) if parsed_input is not None: @@ -306,8 +332,10 @@ def _get_prompt_for_global_arg(self, arg_name, prefix): if choices and prefix is not None: results = self._filter( prefix, - [CompletionResult(prefix, display_text=choice) - for choice in choices] + [ + CompletionResult(prefix, display_text=choice) + for choice in choices + ], ) return self._set_results_name(results, prefix) @@ -316,7 +344,7 @@ def _set_results_name(self, results, fragment): name_part_len = len(fragment) - len(result.name) result.name = "%s%s" % ( fragment[:name_part_len], - result.display_text + result.display_text, ) return results @@ -352,6 +380,7 @@ def _parse_fragment(self, fragment, attempt=1): # and make one more attempt # --option foo={bar -> foo={bar=DUMMY_EQ_VALUE from awscli.shorthand import ShorthandParseError + if fragment is None: return None if fragment == '': @@ -369,7 +398,8 @@ def _parse_fragment(self, fragment, attempt=1): except ShorthandParseError: if attempt == 1: return self._parse_fragment( - f'{fragment}={self._DUMMY_EQ_VALUE}', attempt + 1) + f'{fragment}={self._DUMMY_EQ_VALUE}', attempt + 1 + ) # if we get here it means that we can't make it parsable and can't # suggest anything so the only solution is to wait till user enter more @@ -423,7 +453,7 @@ def _get_close_brackets_completion(self, fragment): if close_brackets: return CompletionResult( f'{fragment}{close_brackets}', - display_text='Autoclose brackets' + display_text='Autoclose brackets', ) def _get_prompt_for_string(self, arg_model, parsed_input): @@ -437,18 +467,26 @@ def _get_prompt_for_string(self, arg_model, parsed_input): prefix = list(prefix.keys())[0] if prefix == self._DUMMY_VALUE: prefix = '' - return self._filter(prefix, - [CompletionResult(prefix, display_text=enum) - for enum in arg_model.enum]) + return self._filter( + prefix, + [ + CompletionResult(prefix, display_text=enum) + for enum in arg_model.enum + ], + ) def _get_prompt_for_boolean(self, arg_model, parsed_input): all_results = ['true', 'false'] prefix = parsed_input if prefix == self._DUMMY_VALUE: prefix = '' - return self._filter(prefix, - [CompletionResult(prefix, display_text=result) - for result in all_results]) + return self._filter( + prefix, + [ + CompletionResult(prefix, display_text=result) + for result in all_results + ], + ) def _get_prompt_for_list(self, arg_model, parsed_input): # we have two way we can enter lists: @@ -481,13 +519,14 @@ def _get_prompt_for_structure(self, arg_model, parsed_input): # key exists but we don't have such key in model return None return self._get_completion( - arg_model.members[last_key], last_value) + arg_model.members[last_key], last_value + ) entered_keys = set(parsed_input) - set([last_key]) return self._get_struct_keys_completions( - arg_model, entered_keys, last_key) + arg_model, entered_keys, last_key + ) - def _get_struct_keys_completions(self, arg_model, entered_keys, - last_key): + def _get_struct_keys_completions(self, arg_model, entered_keys, last_key): # get suggestions for the structure keys, as CompletionResult.name # we return only part of the suggestion that has not been entered yet results = [] @@ -495,22 +534,25 @@ def _get_struct_keys_completions(self, arg_model, entered_keys, if member_name not in entered_keys: display_text = '%s=%s' % ( member_name, - self._VALUE_PREFIXES.get(member.type_name, '') + self._VALUE_PREFIXES.get(member.type_name, ''), ) - results.append(CompletionResult( - last_key, - help_text=strip_html_tags_and_newlines(member.documentation), - cli_type_name=member.type_name, - display_text=display_text + results.append( + CompletionResult( + last_key, + help_text=strip_html_tags_and_newlines( + member.documentation + ), + cli_type_name=member.type_name, + display_text=display_text, ) ) return self._filter(last_key, results) class QueryCompleter(BaseCompleter): - - def __init__(self, cli_driver_fetcher=None, - response_filter=startswith_filter): + def __init__( + self, cli_driver_fetcher=None, response_filter=startswith_filter + ): self._filter = response_filter self._cli_driver_fetcher = cli_driver_fetcher self._argument_generator = None @@ -520,6 +562,7 @@ def __init__(self, cli_driver_fetcher=None, def jmespath(self): if self._jmespath is None: import jmespath + self._jmespath = jmespath return self._jmespath @@ -527,19 +570,24 @@ def jmespath(self): def argument_generator(self): if self._argument_generator is None: from botocore.utils import ArgumentGenerator + self._argument_generator = ArgumentGenerator return self._argument_generator def complete(self, parsed): if self._cli_driver_fetcher is None: return - if parsed.current_param == 'query' and \ - parsed.current_fragment is not None: + if ( + parsed.current_param == 'query' + and parsed.current_fragment is not None + ): operation_model = self._cli_driver_fetcher.get_operation_model( - parsed.lineage, parsed.current_command) + parsed.lineage, parsed.current_command + ) if operation_model: - return self._get_completions(parsed.current_fragment, - operation_model) + return self._get_completions( + parsed.current_fragment, operation_model + ) def _get_query_and_last_key(self, query): # Because output example has only 1 element in any list if @@ -552,14 +600,16 @@ def _get_query_and_last_key(self, query): def _create_completions(self, results, last_key, fragment): completions = self._filter( last_key, - [CompletionResult(last_key, display_text=result) - for result in results] + [ + CompletionResult(last_key, display_text=result) + for result in results + ], ) for completion in completions: name_part_len = len(fragment) - len(completion.name) completion.name = "%s%s" % ( fragment[:name_part_len], - completion.display_text + completion.display_text, ) return completions @@ -567,18 +617,19 @@ def _is_field_expression(self, expression): is_last_child_field = False is_field = expression.parsed['type'] == 'field' if expression.parsed['children']: - is_last_child_field = \ + is_last_child_field = ( expression.parsed['children'][-1]['type'] == 'field' + ) return is_field or is_last_child_field def _get_completions(self, fragment, operation_model): results = [] last_key = fragment if operation_model.output_shape: - argument_generator = self.argument_generator( - use_member_names=True) + argument_generator = self.argument_generator(use_member_names=True) response = argument_generator.generate_skeleton( - operation_model.output_shape) + operation_model.output_shape + ) if '.' not in fragment: if isinstance(response, dict): results = response.keys() @@ -587,8 +638,11 @@ def _get_completions(self, fragment, operation_model): query, last_key = self._get_query_and_last_key(fragment) expression = self.jmespath.compile(query) parsed_response = expression.search(response) - if parsed_response and isinstance(parsed_response, list) \ - and not self._is_field_expression(expression): + if ( + parsed_response + and isinstance(parsed_response, list) + and not self._is_field_expression(expression) + ): parsed_response = parsed_response[0] if isinstance(parsed_response, dict): results = parsed_response.keys() diff --git a/awscli/autocomplete/local/fetcher.py b/awscli/autocomplete/local/fetcher.py index c21cd9ff2d38..67c9d8d23493 100644 --- a/awscli/autocomplete/local/fetcher.py +++ b/awscli/autocomplete/local/fetcher.py @@ -13,7 +13,6 @@ class CliDriverFetcher: - def __init__(self, cli_driver): self._cli_driver = cli_driver @@ -35,12 +34,18 @@ def get_operation_model(self, lineage, current_command): return command.create_help_command().obj def get_argument_model(self, lineage, current_command, arg_name): - return getattr(self._get_argument( - lineage, current_command, arg_name), 'argument_model', None) + return getattr( + self._get_argument(lineage, current_command, arg_name), + 'argument_model', + None, + ) def get_argument_documentation(self, lineage, current_command, arg_name): - return getattr(self._get_argument( - lineage, current_command, arg_name), 'documentation', '') + return getattr( + self._get_argument(lineage, current_command, arg_name), + 'documentation', + '', + ) def get_global_arg_documentation(self, arg_name): return self._cli_driver.arg_table[arg_name].documentation diff --git a/awscli/autocomplete/local/indexer.py b/awscli/autocomplete/local/indexer.py index 6691e7abdb5d..13a796f3a4d5 100644 --- a/awscli/autocomplete/local/indexer.py +++ b/awscli/autocomplete/local/indexer.py @@ -22,14 +22,14 @@ class ModelIndexer(object): # TODO add full names to custom commands to get rid of this map _HIGH_LEVEL_SERVICE_FULL_NAMES = { 's3': 'High level S3 commands', - 'ddb': 'High level DynamoDB commands' + 'ddb': 'High level DynamoDB commands', } _NON_SERVICE_COMMANDS = ['configure', 'history', 'cli-dev'] _CREATE_CMD_TABLE = """\ CREATE TABLE IF NOT EXISTS command_table ( - command TEXT, + command TEXT, full_name TEXT, parent TEXT REFERENCES command_table, PRIMARY KEY (command, parent) @@ -51,12 +51,12 @@ class ModelIndexer(object): """ _CREATE_COMMAND_TABLE_INDEX = """\ - CREATE INDEX parent_index + CREATE INDEX parent_index ON command_table(parent); """ _CREATE_PARAM_TABLE_INDEX = """\ - CREATE INDEX parent_command_index + CREATE INDEX parent_command_index ON param_table(parent, command); """ @@ -74,10 +74,12 @@ def generate_index(self, clidriver): ) help_command_table = clidriver.create_help_command().command_table command_table = clidriver.subcommand_table - self._generate_arg_index(command=parent, parent='', - arg_table=clidriver.arg_table) - self._generate_command_index(command_table, parent=parent, - help_command_table=help_command_table) + self._generate_arg_index( + command=parent, parent='', arg_table=clidriver.arg_table + ) + self._generate_command_index( + command_table, parent=parent, help_command_table=help_command_table + ) self._generate_table_indexes() @@ -95,10 +97,13 @@ def _generate_arg_index(self, command, parent, arg_table): 'required)' ' VALUES (:argname, :type_name, :command, :parent, :nargs, ' ' :positional_arg, :required)', - argname=name, type_name=value.cli_type_name, - command=command, parent=parent, - nargs=value.nargs, positional_arg=value.positional_arg, - required=required + argname=name, + type_name=value.cli_type_name, + command=command, + parent=parent, + nargs=value.nargs, + positional_arg=value.positional_arg, + required=required, ) def _get_service_full_name(self, name, help_command_table): @@ -109,19 +114,24 @@ def _get_service_full_name(self, name, help_command_table): if service: return service.service_model.metadata['serviceFullName'] - def _generate_command_index(self, command_table, - parent, help_command_table=None): + def _generate_command_index( + self, command_table, parent, help_command_table=None + ): for name, command in command_table.items(): full_name = self._get_service_full_name(name, help_command_table) self._db_connection.execute( 'INSERT INTO command_table (command, parent, full_name) ' 'VALUES (:command, :parent, :full_name)', - command=name, parent=parent, full_name=full_name + command=name, + parent=parent, + full_name=full_name, + ) + self._generate_arg_index( + command=name, parent=parent, arg_table=command.arg_table + ) + self._generate_command_index( + command.subcommand_table, parent='%s.%s' % (parent, name) ) - self._generate_arg_index(command=name, parent=parent, - arg_table=command.arg_table) - self._generate_command_index(command.subcommand_table, - parent='%s.%s' % (parent, name)) def _generate_table_indexes(self): self._db_connection.execute(self._CREATE_COMMAND_TABLE_INDEX) diff --git a/awscli/autocomplete/local/model.py b/awscli/autocomplete/local/model.py index 39d50e5d1590..d81045de2ce8 100644 --- a/awscli/autocomplete/local/model.py +++ b/awscli/autocomplete/local/model.py @@ -16,7 +16,9 @@ in the `service-2.json` files. """ + from collections import namedtuple + from awscli.autocomplete import db # This module and the awscli.autocomplete.db module are imported @@ -26,9 +28,18 @@ # the sqlite3 cache file. -CLIArgument = namedtuple('CLIArgument', ['argname', 'type_name', - 'command', 'parent', 'nargs', - 'positional_arg', 'required']) +CLIArgument = namedtuple( + 'CLIArgument', + [ + 'argname', + 'type_name', + 'command', + 'parent', + 'nargs', + 'positional_arg', + 'required', + ], +) class ModelIndex(object): @@ -39,6 +50,7 @@ class ModelIndex(object): the model based autocompleter. """ + _COMMAND_NAME_QUERY = """ SELECT command, full_name FROM command_table WHERE parent = :parent @@ -74,8 +86,7 @@ def __init__(self, db_filename=None): def _get_db_connection(self): if self._db_connection is None: - self._db_connection = db.DatabaseConnection( - self._db_filename) + self._db_connection = db.DatabaseConnection(self._db_filename) return self._db_connection def command_names(self, lineage): @@ -118,9 +129,12 @@ def arg_names(self, lineage, command_name, positional_arg=False): """ db = self._get_db_connection() parent = '.'.join(lineage) - results = db.execute(self._ARG_NAME_QUERY, - parent=parent, command=command_name, - positional_arg=positional_arg) + results = db.execute( + self._ARG_NAME_QUERY, + parent=parent, + command=command_name, + positional_arg=positional_arg, + ) return [row[0] for row in results] def get_argument_data(self, lineage, command_name, arg_name): @@ -141,8 +155,12 @@ def get_argument_data(self, lineage, command_name, arg_name): """ db = self._get_db_connection() parent = '.'.join(lineage) - results = db.execute(self._ARG_DATA_QUERY, parent=parent, - command=command_name, argname=arg_name) + results = db.execute( + self._ARG_DATA_QUERY, + parent=parent, + command=command_name, + argname=arg_name, + ) match = results.fetchone() if match is not None: return CLIArgument(*match) diff --git a/awscli/autocomplete/main.py b/awscli/autocomplete/main.py index 8055768a1bd8..6a1ae444767a 100644 --- a/awscli/autocomplete/main.py +++ b/awscli/autocomplete/main.py @@ -16,14 +16,16 @@ # everytime a user hits . Try to avoid any expensive module level # work or really heavyweight imports. Prefer to lazy load as much as possible. -from awscli.autocomplete import parser, completer, filters -from awscli.autocomplete.local import model, basic, fetcher -from awscli.autocomplete import serverside -from awscli.autocomplete import custom +from awscli.autocomplete import completer, custom, filters, parser, serverside +from awscli.autocomplete.local import basic, fetcher, model -def create_autocompleter(index_filename=None, custom_completers=None, - driver=None, response_filter=None): +def create_autocompleter( + index_filename=None, + custom_completers=None, + driver=None, + response_filter=None, +): if response_filter is None: response_filter = filters.startswith_filter if custom_completers is None: @@ -36,15 +38,19 @@ def create_autocompleter(index_filename=None, custom_completers=None, completers = [ basic.RegionCompleter(response_filter=response_filter), basic.ProfileCompleter(response_filter=response_filter), - basic.ModelIndexCompleter(index, cli_driver_fetcher, - response_filter=response_filter), + basic.ModelIndexCompleter( + index, cli_driver_fetcher, response_filter=response_filter + ), basic.FilePathCompleter(response_filter=response_filter), serverside.create_server_side_completer( - index_filename, response_filter=response_filter), - basic.ShorthandCompleter(cli_driver_fetcher, - response_filter=response_filter), - basic.QueryCompleter(cli_driver_fetcher, - response_filter=response_filter), + index_filename, response_filter=response_filter + ), + basic.ShorthandCompleter( + cli_driver_fetcher, response_filter=response_filter + ), + basic.QueryCompleter( + cli_driver_fetcher, response_filter=response_filter + ), ] + custom_completers cli_completer = completer.AutoCompleter(cli_parser, completers) return cli_completer diff --git a/awscli/autocomplete/parser.py b/awscli/autocomplete/parser.py index 15b3ed240712..e13cc6ecc0db 100644 --- a/awscli/autocomplete/parser.py +++ b/awscli/autocomplete/parser.py @@ -14,9 +14,16 @@ class ParsedResult(object): - def __init__(self, current_command=None, current_param=None, - global_params=None, parsed_params=None, - lineage=None, current_fragment=None, unparsed_items=None): + def __init__( + self, + current_command=None, + current_param=None, + global_params=None, + parsed_params=None, + lineage=None, + current_fragment=None, + unparsed_items=None, + ): """ :param current_command: The name of the leaf command; the most @@ -126,6 +133,7 @@ class CLIParser(object): not a general purpose AWS CLI parser. """ + def __init__(self, index, return_first_command_match=False): self._index = index self._return_first_command_match = return_first_command_match @@ -149,18 +157,26 @@ def parse(self, command_line, location=None): while remaining_parts: current = remaining_parts.pop(0) if current.startswith('--'): - self._handle_option(current, remaining_parts, - current_args, global_args, parsed, state) + self._handle_option( + current, + remaining_parts, + current_args, + global_args, + parsed, + state, + ) else: current_args = self._handle_positional( - current, state, remaining_parts, parsed) + current, state, remaining_parts, parsed + ) parsed.current_command = state.current_command parsed.current_param = state.current_param parsed.lineage = state.lineage return parsed - def _consume_value(self, remaining_parts, option_name, - lineage, current_command, state): + def _consume_value( + self, remaining_parts, option_name, lineage, current_command, state + ): # We have a special case where a user is trying to complete # a value for an option, which is the last fragment of the command, # e.g. 'aws ec2 describe-instances --instance-ids ' @@ -205,8 +221,9 @@ def _consume_value(self, remaining_parts, option_name, # an empty list being returned. This is acceptable # for auto-completion purposes. value = [] - while len(remaining_parts) > 0 and \ - not remaining_parts == [WORD_BOUNDARY]: + while len(remaining_parts) > 0 and not remaining_parts == [ + WORD_BOUNDARY + ]: if remaining_parts[0].startswith('--'): state.current_param = None break @@ -243,8 +260,15 @@ def _split_to_parts(self, command_line, location): state.current_command = 'aws' return state, parts - def _handle_option(self, current, remaining_parts, current_args, - global_args, parsed, state): + def _handle_option( + self, + current, + remaining_parts, + current_args, + global_args, + parsed, + state, + ): if current_args is None: # If there are no arguments found for this current scope, # it usually indicates we've encounted a command we don't know. @@ -257,15 +281,21 @@ def _handle_option(self, current, remaining_parts, current_args, if option_name in global_args: state.current_param = option_name value = self._consume_value( - remaining_parts, option_name, lineage=[], + remaining_parts, + option_name, + lineage=[], state=state, - current_command='aws') + current_command='aws', + ) parsed.global_params[option_name] = value elif option_name in current_args: state.current_param = option_name value = self._consume_value( - remaining_parts, option_name, state.lineage, - state.current_command, state=state, + remaining_parts, + option_name, + state.lineage, + state.current_command, + state=state, ) parsed.parsed_params[option_name] = value elif self._is_last_word(remaining_parts, current): @@ -284,8 +314,10 @@ def _is_last_word(self, remaining_parts, current): return not remaining_parts and current def _is_part_of_command(self, current, command_names): - return any(command.startswith(current) and command != current - for command in command_names) + return any( + command.startswith(current) and command != current + for command in command_names + ) def _is_command_name(self, current, remaining_parts, command_names): # If _return_first_command_match is True @@ -323,16 +355,18 @@ def _handle_positional(self, current, state, remaining_parts, parsed): state.current_command = current # We also need to get the next set of command line options. current_args = self._index.arg_names( - lineage=state.lineage, - command_name=state.current_command) + lineage=state.lineage, command_name=state.current_command + ) return current_args if not command_names: # If there are no more command names check. See if the command # has a positional argument. This will require an additional # select on the argument index. positional_argname = self._get_positional_argname(state) - if (positional_argname and - positional_argname not in parsed.parsed_params): + if ( + positional_argname + and positional_argname not in parsed.parsed_params + ): # Parse the current string to be a positional argument # if the command has the a positional arg and the positional arg # has not already been parsed. @@ -347,8 +381,8 @@ def _handle_positional(self, current, state, remaining_parts, parsed): parsed.parsed_params[positional_argname] = current state.current_param = None return self._index.arg_names( - lineage=state.lineage, - command_name=state.current_command) + lineage=state.lineage, command_name=state.current_command + ) else: if not remaining_parts: # If this is the last chunk of the command line but @@ -371,7 +405,8 @@ def _handle_positional(self, current, state, remaining_parts, parsed): state.current_param = None return self._index.arg_names( lineage=state.lineage, - command_name=state.current_command) + command_name=state.current_command, + ) else: # Otherwise this is some command we don't know about # so we add it to the list of unparsed_items. @@ -382,7 +417,7 @@ def _get_positional_argname(self, state): positional_args = self._index.arg_names( lineage=state.lineage, command_name=state.current_command, - positional_arg=True + positional_arg=True, ) if positional_args: # We are assuming there is only ever one positional diff --git a/awscli/autocomplete/serverside/__init__.py b/awscli/autocomplete/serverside/__init__.py index 45438a93f8c7..2c41b9af7cd4 100644 --- a/awscli/autocomplete/serverside/__init__.py +++ b/awscli/autocomplete/serverside/__init__.py @@ -11,17 +11,17 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +from awscli.autocomplete import db + # NOTE: This file is imported whenever a user hits TAB. There should not # be any expensive imports in this file. If necessary, use lazy imports # to ensure we only import heavyweight modules when we know we need them. -from awscli.autocomplete.serverside import servercomp -from awscli.autocomplete.serverside import model -from awscli.autocomplete import db +from awscli.autocomplete.serverside import model, servercomp def create_server_side_completer(index_filename, response_filter=None): return servercomp.ServerSideCompleter( - model.DBCompletionLookup( - db.DatabaseConnection(index_filename) - ), - servercomp.LazyClientCreator(), response_filter) + model.DBCompletionLookup(db.DatabaseConnection(index_filename)), + servercomp.LazyClientCreator(), + response_filter, + ) diff --git a/awscli/autocomplete/serverside/custom_completers/__init__.py b/awscli/autocomplete/serverside/custom_completers/__init__.py index 14b8a2435bce..9e64262dbe72 100644 --- a/awscli/autocomplete/serverside/custom_completers/__init__.py +++ b/awscli/autocomplete/serverside/custom_completers/__init__.py @@ -9,4 +9,4 @@ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. \ No newline at end of file +# language governing permissions and limitations under the License. diff --git a/awscli/autocomplete/serverside/custom_completers/ddb/__init__.py b/awscli/autocomplete/serverside/custom_completers/ddb/__init__.py index 14b8a2435bce..9e64262dbe72 100644 --- a/awscli/autocomplete/serverside/custom_completers/ddb/__init__.py +++ b/awscli/autocomplete/serverside/custom_completers/ddb/__init__.py @@ -9,4 +9,4 @@ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. \ No newline at end of file +# language governing permissions and limitations under the License. diff --git a/awscli/autocomplete/serverside/custom_completers/ddb/autocomplete.py b/awscli/autocomplete/serverside/custom_completers/ddb/autocomplete.py index fb9311f32e65..880ce8d168b7 100644 --- a/awscli/autocomplete/serverside/custom_completers/ddb/autocomplete.py +++ b/awscli/autocomplete/serverside/custom_completers/ddb/autocomplete.py @@ -25,7 +25,4 @@ class TableNameCompleter(servercomp.BaseCustomServerSideCompleter): def _get_remote_results(self, parsed): client = self._get_client('dynamodb', parsed) response = self._invoke_api(client, 'list_tables', {}) - return [ - table_name for table_name in response.get('TableNames', []) - ] - + return [table_name for table_name in response.get('TableNames', [])] diff --git a/awscli/autocomplete/serverside/custom_completers/logs/__init__.py b/awscli/autocomplete/serverside/custom_completers/logs/__init__.py index 14b8a2435bce..9e64262dbe72 100644 --- a/awscli/autocomplete/serverside/custom_completers/logs/__init__.py +++ b/awscli/autocomplete/serverside/custom_completers/logs/__init__.py @@ -9,4 +9,4 @@ # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. \ No newline at end of file +# language governing permissions and limitations under the License. diff --git a/awscli/autocomplete/serverside/indexer.py b/awscli/autocomplete/serverside/indexer.py index 9a6e240b7cab..0ec6dd7b3d67 100644 --- a/awscli/autocomplete/serverside/indexer.py +++ b/awscli/autocomplete/serverside/indexer.py @@ -12,11 +12,10 @@ # language governing permissions and limitations under the License. import json -from botocore.exceptions import UnknownServiceError -from botocore import xform_name - import awscli.clidriver from awscli.autocomplete.db import DatabaseConnection +from botocore import xform_name +from botocore.exceptions import UnknownServiceError def create_apicall_indexer(filename): @@ -79,7 +78,8 @@ def _construct_completion_data(self, loader, command): service_name = op_model.service_model.service_name try: completions = loader.load_service_model( - service_name, type_name='completions-1') + service_name, type_name='completions-1' + ) except UnknownServiceError: return None # The completions-1 file is for the entire service. We need @@ -96,8 +96,10 @@ def _construct_completion_data(self, loader, command): continue # At this point we know there's completion info we need. transformed = self._transform_completion_data( - completion_for_op[api_casing], completions['resources'], - service_name) + completion_for_op[api_casing], + completions['resources'], + service_name, + ) self._insert_into_db(transformed, arg_name, command) def _insert_into_db(self, transformed, arg_name, command): @@ -110,8 +112,9 @@ def _insert_into_db(self, transformed, arg_name, command): parent=parent, ) - def _transform_completion_data(self, completions_for_op, resources, - service_name): + def _transform_completion_data( + self, completions_for_op, resources, service_name + ): # The completions-1.json data is strictly model based. That is, # it's based entirely on the service API and has no mention of CLI # commands, parameters, etc. This method attempts to map @@ -133,10 +136,13 @@ def _transform_completion_data(self, completions_for_op, resources, for completion in completions_for_op['completions']: resource = resources[completion['resourceName']] jp_expr = resource['resourceIdentifier'][ - completion['resourceIdentifier']] - transformed = {'parameters': completion['parameters'], - 'service': service_name, - 'operation': xform_name(resource['operation']), - 'jp_expr': jp_expr} + completion['resourceIdentifier'] + ] + transformed = { + 'parameters': completion['parameters'], + 'service': service_name, + 'operation': xform_name(resource['operation']), + 'jp_expr': jp_expr, + } completions.append(transformed) return {'completions': completions} diff --git a/awscli/autocomplete/serverside/model.py b/awscli/autocomplete/serverside/model.py index c853fd70cb18..27db1b86e068 100644 --- a/awscli/autocomplete/serverside/model.py +++ b/awscli/autocomplete/serverside/model.py @@ -1,6 +1,5 @@ -"""Placeholder module for fleshing out the server side model interface. +"""Placeholder module for fleshing out the server side model interface.""" -""" import json @@ -21,8 +20,11 @@ def __init__(self, db_connection): def get_server_completion_data(self, lineage, command_name, param_name): parent = '.'.join(lineage) results = self._db_connection.execute( - self._QUERY, argname=param_name, - command=command_name, parent=parent) + self._QUERY, + argname=param_name, + command=command_name, + parent=parent, + ) match = results.fetchone() if match is not None: return json.loads(match[0]) diff --git a/awscli/autocomplete/serverside/servercomp.py b/awscli/autocomplete/serverside/servercomp.py index dd5e909b7ded..36e47c583473 100644 --- a/awscli/autocomplete/serverside/servercomp.py +++ b/awscli/autocomplete/serverside/servercomp.py @@ -12,16 +12,17 @@ # language governing permissions and limitations under the License. import logging -from awscli.autocomplete.completer import BaseCompleter, CompletionResult from awscli.autocomplete import LazyClientCreator +from awscli.autocomplete.completer import BaseCompleter, CompletionResult from awscli.autocomplete.filters import startswith_filter LOG = logging.getLogger(__name__) class ServerSideCompleter(BaseCompleter): - def __init__(self, completion_lookup, client_creator, - response_filter=None): + def __init__( + self, completion_lookup, client_creator, response_filter=None + ): self._completion_lookup = completion_lookup self._client_creator = client_creator self._jmespath = None @@ -33,6 +34,7 @@ def __init__(self, completion_lookup, client_creator, def jmespath(self): if self._jmespath is None: import jmespath + self._jmespath = jmespath return self._jmespath @@ -52,7 +54,9 @@ def complete(self, parsed): if not self._on_cli_option_value_fragment(parsed): return completion_data = self._completion_lookup.get_server_completion_data( - parsed.lineage, parsed.current_command, parsed.current_param, + parsed.lineage, + parsed.current_command, + parsed.current_param, ) if completion_data is None: return @@ -62,10 +66,11 @@ def complete(self, parsed): # list element. We'll need to update this once we support completions # with a 'parameters' key. raw_results = self._retrieve_remote_completion_data( - parsed, completion_data['completions'][0]) + parsed, completion_data['completions'][0] + ) return self._filter( parsed.current_fragment, - self._convert_to_completion_data(raw_results, parsed) + self._convert_to_completion_data(raw_results, parsed), ) def _convert_to_completion_data(self, raw_results, parsed): @@ -81,7 +86,7 @@ def _get_client(self, service_name, parsed): return self._client_creator.create_client( service_name, parsed_region=parsed.global_params.get('region'), - parsed_profile=parsed.global_params.get('profile') + parsed_profile=parsed.global_params.get('profile'), ) def _retrieve_remote_completion_data(self, parsed, completion_data): @@ -104,8 +109,12 @@ def _invoke_api(self, client, py_name, api_params): except Exception: # We don't want tracebacks to propagate back out to the user # so the best we can do is log the exception. - LOG.debug("Exception raised when calling %s on client %s", - client, py_name, exc_info=True) + LOG.debug( + "Exception raised when calling %s on client %s", + client, + py_name, + exc_info=True, + ) return {} def _map_command_to_api_params(self, parsed, completion_data): @@ -134,15 +143,16 @@ def complete(self, parsed): if remote_result.startswith(parsed.current_fragment): completion_results.append( CompletionResult( - remote_result, -len(parsed.current_fragment)) + remote_result, -len(parsed.current_fragment) + ) ) return completion_results def _is_value_for_param(self, parsed): return ( - parsed.lineage == self._LINEAGE and - parsed.current_command in self._COMMAND_NAMES and - parsed.current_param == self._PARAM_NAME + parsed.lineage == self._LINEAGE + and parsed.current_command in self._COMMAND_NAMES + and parsed.current_param == self._PARAM_NAME ) def _get_remote_results(self, parsed): diff --git a/awscli/autoprompt/core.py b/awscli/autoprompt/core.py index 3b0bb900e47f..81ea431d615e 100644 --- a/awscli/autoprompt/core.py +++ b/awscli/autoprompt/core.py @@ -10,17 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from botocore.exceptions import ProfileNotFound - -from awscli.customizations.exceptions import ParamValidationError -from awscli.autoprompt.prompttoolkit import PromptToolkitPrompter -from awscli.autocomplete.main import create_autocompleter from awscli.autocomplete.filters import fuzzy_filter +from awscli.autocomplete.main import create_autocompleter +from awscli.autoprompt.prompttoolkit import PromptToolkitPrompter +from awscli.customizations.exceptions import ParamValidationError from awscli.errorhandler import SilenceParamValidationMsgErrorHandler +from botocore.exceptions import ProfileNotFound class AutoPromptDriver: - _NO_PROMPT_ARGS = ['help', '--version'] _CLI_AUTO_PROMPT_OPTION = '--cli-auto-prompt' _NO_CLI_AUTO_PROMPT_OPTION = '--no-cli-auto-prompt' @@ -32,13 +30,15 @@ def __init__(self, driver, completion_source=None, prompter=None): self._driver = driver if self._completion_source is None: self._completion_source = create_autocompleter( - driver=self._driver, response_filter=fuzzy_filter) + driver=self._driver, response_filter=fuzzy_filter + ) @property def prompter(self): if self._prompter is None: - self._prompter = AutoPrompter(self._completion_source, - self._driver) + self._prompter = AutoPrompter( + self._completion_source, self._driver + ) return self._prompter def validate_auto_prompt_args_are_mutually_exclusive(self, args): @@ -84,12 +84,14 @@ class AutoPrompter: the UI prompt backend easily if needed. """ + def __init__(self, completion_source, driver, prompter=None): self._completion_source = completion_source self._driver = driver if prompter is None: - prompter = PromptToolkitPrompter(self._completion_source, - self._driver) + prompter = PromptToolkitPrompter( + self._completion_source, self._driver + ) self._prompter = prompter def prompt_for_values(self, original_args): diff --git a/awscli/autoprompt/doc.py b/awscli/autoprompt/doc.py index 12933cca0eec..38d866df50f3 100644 --- a/awscli/autoprompt/doc.py +++ b/awscli/autoprompt/doc.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import io + from docutils.core import publish_string from awscli.bcdoc import docevents, textwriter @@ -23,6 +24,7 @@ class DocsGetter: service commands and service operations. """ + def __init__(self, driver): self._driver = driver self._cache = {} @@ -37,7 +39,7 @@ def _render_docs(self, help_command): text_content = self._convert_rst_to_basic_text(original_cli_help) index = text_content.find('DESCRIPTION') if index > 0: - text_content = text_content[index + len('DESCRIPTION'):] + text_content = text_content[index + len('DESCRIPTION') :] return text_content def _convert_rst_to_basic_text(self, contents): @@ -57,8 +59,9 @@ def _convert_rst_to_basic_text(self, contents): # The report_level override is so that we don't print anything # to stdout/stderr on rendering issues. converted = publish_string( - contents, writer=BasicTextWriter(), - settings_overrides={'report_level': 5, 'halt_level': 5} + contents, + writer=BasicTextWriter(), + settings_overrides={'report_level': 5, 'halt_level': 5}, ) return converted.decode('utf-8').replace('\r', '') @@ -92,7 +95,6 @@ def get_docs(self, parsed): class FileRenderer: - def __init__(self): self._io = io.BytesIO() diff --git a/awscli/autoprompt/factory.py b/awscli/autoprompt/factory.py index 1b35f4b88d66..59cffbc6daea 100644 --- a/awscli/autoprompt/factory.py +++ b/awscli/autoprompt/factory.py @@ -15,23 +15,34 @@ from prompt_toolkit.buffer import Buffer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.key_binding.bindings.focus import focus_next from prompt_toolkit.keys import Keys -from prompt_toolkit.layout import Float, FloatContainer, HSplit, Window, VSplit +from prompt_toolkit.layout import Float, FloatContainer, HSplit, VSplit, Window from prompt_toolkit.layout.controls import BufferControl from prompt_toolkit.layout.dimension import Dimension -from prompt_toolkit.layout.layout import Layout, ConditionalContainer -from prompt_toolkit.layout.menus import CompletionsMenu, MultiColumnCompletionsMenu +from prompt_toolkit.layout.layout import ConditionalContainer, Layout +from prompt_toolkit.layout.menus import ( + CompletionsMenu, + MultiColumnCompletionsMenu, +) from prompt_toolkit.layout.processors import BeforeInput from prompt_toolkit.widgets import SearchToolbar, VerticalLine -from prompt_toolkit.key_binding.bindings.focus import focus_next -from awscli.autoprompt.history import HistoryDriver, HistoryCompleter -from awscli.autoprompt.widgets import ( - HelpPanelWidget, ToolbarWidget, DebugPanelWidget, TitleLine -) from awscli.autoprompt.filters import ( - is_one_column, is_multi_column, doc_section_visible, output_section_visible, - input_buffer_has_focus, doc_window_has_focus, is_history_mode + doc_section_visible, + doc_window_has_focus, + input_buffer_has_focus, + is_history_mode, + is_multi_column, + is_one_column, + output_section_visible, +) +from awscli.autoprompt.history import HistoryCompleter, HistoryDriver +from awscli.autoprompt.widgets import ( + DebugPanelWidget, + HelpPanelWidget, + TitleLine, + ToolbarWidget, ) @@ -40,7 +51,6 @@ class PrompterKeyboardInterrupt(KeyboardInterrupt): class CLIPromptBuffer(Buffer): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._completer = self.completer @@ -73,16 +83,20 @@ def __init__(self, completer, history_driver=None): def history_driver(self): if self._history_driver is None: cache_dir = os.path.expanduser( - os.path.join('~', '.aws', 'cli', 'cache')) + os.path.join('~', '.aws', 'cli', 'cache') + ) history_filename = os.path.join(cache_dir, 'prompt_history.json') self._history_driver = HistoryDriver(history_filename) return self._history_driver def create_input_buffer(self, on_text_changed_callback=None): return CLIPromptBuffer( - name='input_buffer', completer=self._completer, - history=self.history_driver, complete_while_typing=True, - on_text_changed=on_text_changed_callback) + name='input_buffer', + completer=self._completer, + history=self.history_driver, + complete_while_typing=True, + on_text_changed=on_text_changed_callback, + ) def create_doc_buffer(self): return Buffer(name='doc_buffer', read_only=True) @@ -95,12 +109,12 @@ def create_input_buffer_container(self, input_buffer): Window( BufferControl( buffer=input_buffer, - input_processors=[BeforeInput('> aws ')] + input_processors=[BeforeInput('> aws ')], ), height=Dimension( min=self.DIMENSIONS['input_buffer_height_min'] ), - wrap_lines=True + wrap_lines=True, ), [ Float( @@ -108,7 +122,7 @@ def create_input_buffer_container(self, input_buffer): ycursor=True, content=MultiColumnCompletionsMenu( extra_filter=is_multi_column - ) + ), ), Float( xcursor=True, @@ -116,41 +130,51 @@ def create_input_buffer_container(self, input_buffer): content=CompletionsMenu( extra_filter=is_one_column, max_height=self.DIMENSIONS['menu_height_max'], - scroll_offset=self.DIMENSIONS['menu_scroll_offset'] - ) - ) - ] + scroll_offset=self.DIMENSIONS['menu_scroll_offset'], + ), + ), + ], ) def create_bottom_panel(self, doc_window, output_window): - return VSplit([ - ConditionalContainer(doc_window, doc_section_visible), - ConditionalContainer(VerticalLine(), - output_section_visible & doc_section_visible), - ConditionalContainer(output_window, output_section_visible), - ]) + return VSplit( + [ + ConditionalContainer(doc_window, doc_section_visible), + ConditionalContainer( + VerticalLine(), + output_section_visible & doc_section_visible, + ), + ConditionalContainer(output_window, output_section_visible), + ] + ) def create_searchable_window(self, title, output_buffer): search_field = SearchToolbar() - return HSplit([ - TitleLine(title), - Window( - content=BufferControl( - buffer=output_buffer, - search_buffer_control=search_field.control - ), - height=Dimension( - max=self.DIMENSIONS['doc_window_height_max'], - preferred=self.DIMENSIONS['doc_window_height_pref'] + return HSplit( + [ + TitleLine(title), + Window( + content=BufferControl( + buffer=output_buffer, + search_buffer_control=search_field.control, + ), + height=Dimension( + max=self.DIMENSIONS['doc_window_height_max'], + preferred=self.DIMENSIONS['doc_window_height_pref'], + ), + wrap_lines=True, ), - wrap_lines=True - ), - search_field - ]) + search_field, + ] + ) - def create_layout(self, on_input_buffer_text_changed=None, - input_buffer_container=None, doc_window=None, - output_window=None): + def create_layout( + self, + on_input_buffer_text_changed=None, + input_buffer_container=None, + doc_window=None, + output_window=None, + ): # This is the main layout, which consists of: # - The main input buffer with completion menus floating on top of it. # - A separating line between the input buffer and the doc window. @@ -160,27 +184,34 @@ def create_layout(self, on_input_buffer_text_changed=None, # - A help panel # - A debug panel in case debug mode enabled if input_buffer_container is None: - input_buffer = \ - self.create_input_buffer(on_input_buffer_text_changed) - input_buffer_container = \ - self.create_input_buffer_container(input_buffer) + input_buffer = self.create_input_buffer( + on_input_buffer_text_changed + ) + input_buffer_container = self.create_input_buffer_container( + input_buffer + ) if doc_window is None: doc_buffer = self.create_doc_buffer() doc_window = self.create_searchable_window('Doc panel', doc_buffer) if output_window is None: output_buffer = self.create_output_buffer() output_window = self.create_searchable_window( - 'Output panel', output_buffer) + 'Output panel', output_buffer + ) bottom_panel = self.create_bottom_panel(doc_window, output_window) return Layout( - HSplit([ - VSplit([ - HSplit([input_buffer_container, bottom_panel]), - HelpPanelWidget(), - DebugPanelWidget(), - ]), - ToolbarWidget() - ]) + HSplit( + [ + VSplit( + [ + HSplit([input_buffer_container, bottom_panel]), + HelpPanelWidget(), + DebugPanelWidget(), + ] + ), + ToolbarWidget(), + ] + ) ) def create_key_bindings(self): @@ -210,14 +241,16 @@ def _(event): event.app.current_buffer.reset() updated_document = Document( text=current_document.text, - cursor_position=current_document.cursor_position) + cursor_position=current_document.cursor_position, + ) buffer.set_document(updated_document) # If prompter suggested us something ended with slash and # started with 'file://' or 'fileb://' it should be path ended # with directory then we run completion again cur_word = current_document.get_word_under_cursor(WORD=True) - if cur_word.endswith(os.sep) \ - and cur_word.startswith(('file://', 'fileb://')): + if cur_word.endswith(os.sep) and cur_word.startswith( + ('file://', 'fileb://') + ): buffer.start_completion() @self._kb.add(Keys.Escape, filter=is_history_mode) @@ -231,8 +264,10 @@ def _(event): """Exit from history mode if something was selected or just add space to the end of the text and keep suggesting""" buffer = event.app.current_buffer - if (buffer.complete_state - and buffer.complete_state.current_completion): + if ( + buffer.complete_state + and buffer.complete_state.current_completion + ): buffer.switch_history_mode() buffer.insert_text(' ') diff --git a/awscli/autoprompt/filters.py b/awscli/autoprompt/filters.py index 7600828753ac..0f71c43ddcab 100644 --- a/awscli/autoprompt/filters.py +++ b/awscli/autoprompt/filters.py @@ -68,7 +68,7 @@ def input_buffer_has_focus(): @Condition def is_history_mode(): """Only activate these key bindings if input buffer has focus - and history_mode is on """ + and history_mode is on""" buffer = get_app().current_buffer return buffer.name == 'input_buffer' and buffer.history_mode @@ -76,6 +76,6 @@ def is_history_mode(): @Condition def is_debug_mode(): """Only activate these key bindings if input buffer has focus - and history_mode is on """ + and history_mode is on""" app = get_app() return app.debug diff --git a/awscli/autoprompt/history.py b/awscli/autoprompt/history.py index 1a2c6dc6cf3f..2a8ba8932c51 100644 --- a/awscli/autoprompt/history.py +++ b/awscli/autoprompt/history.py @@ -10,17 +10,16 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import logging import json +import logging import os -from prompt_toolkit.completion import Completion, Completer +from prompt_toolkit.completion import Completer, Completion from prompt_toolkit.history import FileHistory from awscli.autocomplete.completer import CompletionResult from awscli.autocomplete.filters import fuzzy_filter - LOG = logging.getLogger(__name__) @@ -37,8 +36,9 @@ def load_history_strings(self): commands = json.load(f).get('commands', []) return reversed(commands) except Exception as e: - LOG.debug('Exception on loading prompt history: %s' % e, - exc_info=True) + LOG.debug( + 'Exception on loading prompt history: %s' % e, exc_info=True + ) return [] def store_string(self, string): @@ -50,7 +50,7 @@ def store_string(self, string): elif not os.path.exists(os.path.dirname(self.filename)): os.makedirs(os.path.dirname(self.filename)) history['commands'].append(string) - history['commands'] = history['commands'][-self._max_commands:] + history['commands'] = history['commands'][-self._max_commands :] with open(self.filename, 'w') as f: json.dump(history, f) except Exception: @@ -58,7 +58,6 @@ def store_string(self, string): class HistoryCompleter(Completer): - def __init__(self, buffer): self.buffer = buffer @@ -73,12 +72,16 @@ def get_completions(self, document, *args): s_line = line.strip() if s_line and s_line not in found_completions: found_completions.add(s_line) - completions.append(CompletionResult( - s_line, - starting_index=-len(current_line))) + completions.append( + CompletionResult( + s_line, starting_index=-len(current_line) + ) + ) if current_line: completions = fuzzy_filter(current_line, completions) - yield from (Completion(c.name, start_position=c.starting_index) - for c in completions) + yield from ( + Completion(c.name, start_position=c.starting_index) + for c in completions + ) except Exception: LOG.debug('Exception on loading prompt history:', exc_info=True) diff --git a/awscli/autoprompt/logger.py b/awscli/autoprompt/logger.py index 1a5653a09f75..a7eda934e5c5 100644 --- a/awscli/autoprompt/logger.py +++ b/awscli/autoprompt/logger.py @@ -12,12 +12,11 @@ # language governing permissions and limitations under the License. import logging -from prompt_toolkit.document import Document from prompt_toolkit.application import get_app +from prompt_toolkit.document import Document class PromptToolkitHandler(logging.StreamHandler): - def emit(self, record): try: app = get_app() diff --git a/awscli/autoprompt/output.py b/awscli/autoprompt/output.py index 01528ca181d9..1f71c183b90d 100644 --- a/awscli/autoprompt/output.py +++ b/awscli/autoprompt/output.py @@ -11,16 +11,15 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import argparse -import logging import io +import logging import re import jmespath -from botocore.utils import ArgumentGenerator -from awscli.formatter import get_formatter from awscli.autocomplete.local.fetcher import CliDriverFetcher - +from awscli.formatter import get_formatter +from botocore.utils import ArgumentGenerator LOG = logging.getLogger(__name__) @@ -34,13 +33,16 @@ def __init__(self, driver): def get_output(self, parsed): operation_model = self._cli_driver_fetcher.get_operation_model( - parsed.lineage, parsed.current_command) + parsed.lineage, parsed.current_command + ) if operation_model: output_shape = getattr(operation_model, 'output_shape', None) if self._shape_has_members(output_shape): operation = ''.join( - [part.capitalize() - for part in parsed.current_command.split('-')] + [ + part.capitalize() + for part in parsed.current_command.split('-') + ] ) output, error_message = self._get_output(parsed) if error_message is not None: @@ -48,13 +50,15 @@ def get_output(self, parsed): query, error_message = self._get_query(parsed) if error_message is not None: return error_message - return self._get_display(operation, output_shape, - output, query) + return self._get_display( + operation, output_shape, output, query + ) return 'No output' def _shape_has_members(self, shape): - return shape and (getattr(shape, 'members', False) or - getattr(shape, 'member', False)) + return shape and ( + getattr(shape, 'members', False) or getattr(shape, 'member', False) + ) def _get_output(self, parsed): error_message = None @@ -65,8 +69,8 @@ def _get_output(self, parsed): output = parsed.global_params.get('output') or session_output if output not in self._output_formats: error_message = ( - "Bad value for --output: %s\n\nValid values are: %s" % - (output, ', '.join(self._output_formats)) + "Bad value for --output: %s\n\nValid values are: %s" + % (output, ', '.join(self._output_formats)) ) return output, error_message diff --git a/awscli/autoprompt/prompttoolkit.py b/awscli/autoprompt/prompttoolkit.py index b7e81f364df7..8f5f91512e37 100644 --- a/awscli/autoprompt/prompttoolkit.py +++ b/awscli/autoprompt/prompttoolkit.py @@ -13,21 +13,19 @@ import logging import shlex import sys -from contextlib import nullcontext, contextmanager +from contextlib import contextmanager, nullcontext from prompt_toolkit.application import Application -from prompt_toolkit.completion import Completer, ThreadedCompleter -from prompt_toolkit.completion import Completion +from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter from prompt_toolkit.document import Document -from awscli.logger import LOG_FORMAT, disable_crt_logging from awscli.autocomplete import parser from awscli.autocomplete.local import model from awscli.autoprompt.doc import DocsGetter -from awscli.autoprompt.output import OutputGetter from awscli.autoprompt.factory import PromptToolkitFactory from awscli.autoprompt.logger import PromptToolkitHandler - +from awscli.autoprompt.output import OutputGetter +from awscli.logger import LOG_FORMAT, disable_crt_logging LOG = logging.getLogger(__name__) @@ -54,12 +52,19 @@ def loggers_handler_switcher(): class PromptToolkitPrompter: - """Handles the actual prompting in the autoprompt workflow. - - """ - def __init__(self, completion_source, driver, completer=None, - factory=None, app=None, cli_parser=None, output=None, - app_input=None): + """Handles the actual prompting in the autoprompt workflow.""" + + def __init__( + self, + completion_source, + driver, + completer=None, + factory=None, + app=None, + cli_parser=None, + output=None, + app_input=None, + ): self._completion_source = completion_source self._output = output self._input = app_input @@ -73,7 +78,8 @@ def __init__(self, completion_source, driver, completer=None, self._parser = cli_parser if self._parser is None: self._parser = parser.CLIParser( - model.ModelIndex(), return_first_command_match=True) + model.ModelIndex(), return_first_command_match=True + ) self._factory = factory self.input_buffer = None self.doc_buffer = None @@ -93,33 +99,44 @@ def args(self, value): def _create_buffers(self): self.input_buffer = self._factory.create_input_buffer( - self.update_bottom_buffers_text) + self.update_bottom_buffers_text + ) self.doc_buffer = self._factory.create_doc_buffer() self.output_buffer = self._factory.create_output_buffer() def _create_containers(self): input_buffer_container = self._factory.create_input_buffer_container( - self.input_buffer) + self.input_buffer + ) doc_window = self._factory.create_searchable_window( - 'Doc panel', self.doc_buffer) + 'Doc panel', self.doc_buffer + ) output_window = self._factory.create_searchable_window( - 'Output panel', self.output_buffer) + 'Output panel', self.output_buffer + ) return input_buffer_container, doc_window, output_window def create_application(self): self._create_buffers() - input_buffer_container, \ - doc_window, output_window = self._create_containers() + input_buffer_container, doc_window, output_window = ( + self._create_containers() + ) layout = self._factory.create_layout( on_input_buffer_text_changed=self.update_bottom_buffers_text, input_buffer_container=input_buffer_container, - doc_window=doc_window, output_window=output_window + doc_window=doc_window, + output_window=output_window, ) kb_manager = self._factory.create_key_bindings() kb = kb_manager.keybindings - app = Application(layout=layout, key_bindings=kb, full_screen=False, - output=self._output, erase_when_done=True, - input=self._input) + app = Application( + layout=layout, + key_bindings=kb, + full_screen=False, + output=self._output, + erase_when_done=True, + input=self._input, + ) self._set_app_defaults(app) return app @@ -132,8 +149,7 @@ def _set_app_defaults(self, app): return app def update_bottom_buffers_text(self, *args): - parsed = self._parser.parse( - 'aws ' + self.input_buffer.document.text) + parsed = self._parser.parse('aws ' + self.input_buffer.document.text) self._update_doc_window_contents(parsed) self._update_output_window_contents(parsed) @@ -182,8 +198,8 @@ def pre_run(self): def _set_input_buffer_text(self, cmd_line_text): """If entered command line does not have trailing space and can not - be autocompleted we assume that it is a completed part of command - and add trailing space to it""" + be autocompleted we assume that it is a completed part of command + and add trailing space to it""" if cmd_line_text[-1] == ' ': return if self._can_autocomplete(cmd_line_text): @@ -240,11 +256,13 @@ class PromptToolkitCompleter(Completer): `prompt_toolkit.Completion` objects. """ + def __init__(self, completion_source): self._completion_source = completion_source - def _convert_to_prompt_completions(self, low_level_completions, - text_before_cursor): + def _convert_to_prompt_completions( + self, low_level_completions, text_before_cursor + ): # Converts the low-level completions from the model autocompleter # and converts them to Completion() objects that are used by # prompt_toolkit. @@ -255,21 +273,30 @@ def _convert_to_prompt_completions(self, low_level_completions, display_text = self._get_display_text(completion) display_meta = self._get_display_meta(completion) location = self._get_starting_location_of_last_word( - text_before_cursor, word_before_cursor) - yield Completion(completion.name, location, display=display_text, - display_meta=display_meta) + text_before_cursor, word_before_cursor + ) + yield Completion( + completion.name, + location, + display=display_text, + display_meta=display_meta, + ) def get_completions(self, document, complete_event): try: text_before_cursor = document.text_before_cursor text_to_autocomplete = 'aws ' + text_before_cursor completions = self._completion_source.autocomplete( - text_to_autocomplete, len(text_to_autocomplete)) + text_to_autocomplete, len(text_to_autocomplete) + ) yield from self._convert_to_prompt_completions( - completions, text_before_cursor) + completions, text_before_cursor + ) except Exception as e: - LOG.debug('Exception caught in PromptToolkitCompleter: %s' % e, - exc_info=True) + LOG.debug( + 'Exception caught in PromptToolkitCompleter: %s' % e, + exc_info=True, + ) def _strip_whitespace(self, text): word_before_cursor = '' @@ -283,17 +310,11 @@ def _prioritize_required_args(self, completions): return required_args + optional_args def _get_required_args(self, completions): - results = [ - arg for arg in completions - if arg.required - ] + results = [arg for arg in completions if arg.required] return results def _get_optional_args(self, completions): - results = [ - arg for arg in completions - if not arg.required - ] + results = [arg for arg in completions if not arg.required] return results def _get_display_text(self, completion): @@ -321,9 +342,10 @@ def _filter_completions(self, completions): def _filter_out_autoprompt_overrides(self, completions): filtered_completions = [ - completion for completion in completions - if completion.name not in ['--cli-auto-prompt', - '--no-cli-auto-prompt'] + completion + for completion in completions + if completion.name + not in ['--cli-auto-prompt', '--no-cli-auto-prompt'] ] return filtered_completions @@ -336,8 +358,9 @@ def _remove_duplicate_completions(self, completions): unique_completions.append(completion) return unique_completions - def _get_starting_location_of_last_word(self, text_before_cursor, - word_before_cursor): + def _get_starting_location_of_last_word( + self, text_before_cursor, word_before_cursor + ): if text_before_cursor and text_before_cursor[-1] == ' ': location = 0 else: diff --git a/awscli/autoprompt/widgets.py b/awscli/autoprompt/widgets.py index cfd4adefc556..7f446ff7e031 100644 --- a/awscli/autoprompt/widgets.py +++ b/awscli/autoprompt/widgets.py @@ -14,27 +14,41 @@ from functools import partial from prompt_toolkit.application import get_app +from prompt_toolkit.buffer import Buffer +from prompt_toolkit.document import Document from prompt_toolkit.filters import has_focus from prompt_toolkit.formatted_text import HTML, to_formatted_text from prompt_toolkit.formatted_text.utils import fragment_list_to_text -from prompt_toolkit.buffer import Buffer -from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.keys import Keys +from prompt_toolkit.layout import ( + ConditionalContainer, + Float, + FloatContainer, + HSplit, + VSplit, + Window, +) from prompt_toolkit.layout.controls import BufferControl from prompt_toolkit.layout.dimension import Dimension from prompt_toolkit.layout.processors import Processor, Transformation -from prompt_toolkit.layout import ( - HSplit, Window, VSplit, FloatContainer, Float, ConditionalContainer -) from prompt_toolkit.widgets import ( - Frame, HorizontalLine, Dialog, Button, TextArea, Label + Button, + Dialog, + Frame, + HorizontalLine, + Label, + TextArea, ) from prompt_toolkit.widgets.base import Border from awscli.autoprompt.filters import ( - help_section_visible, doc_window_has_focus, search_input_has_focus, - input_buffer_has_focus, is_history_mode, is_debug_mode, + doc_window_has_focus, + help_section_visible, + input_buffer_has_focus, + is_debug_mode, + is_history_mode, + search_input_has_focus, ) @@ -43,24 +57,30 @@ class FormatTextProcessor(Processor): format inside a ``prompt_toolkit.buffer.Buffer``. """ + def apply_transformation(self, text_input): # https://python-prompt-toolkit.readthedocs.io/en/master/pages/reference.html#module-prompt_toolkit.formatted_text fragments = to_formatted_text( - HTML(fragment_list_to_text(text_input.fragments))) + HTML(fragment_list_to_text(text_input.fragments)) + ) return Transformation(fragments) class TitleLine: - def __init__(self, title): fill = partial(Window, style='class:frame.border') - self.container = VSplit([ - fill(char=Border.HORIZONTAL), - fill(width=1, height=1, char='|'), - Label(title, style='class:frame.label', dont_extend_width=True), - fill(width=1, height=1, char='|'), - fill(char=Border.HORIZONTAL), - ], height=1) + self.container = VSplit( + [ + fill(char=Border.HORIZONTAL), + fill(width=1, height=1, char='|'), + Label( + title, style='class:frame.label', dont_extend_width=True + ), + fill(width=1, height=1, char='|'), + fill(char=Border.HORIZONTAL), + ], + height=1, + ) def __pt_container__(self): return self.container @@ -92,10 +112,10 @@ def create_window(self, help_buffer): content=BufferControl( buffer=help_buffer, input_processors=[FormatTextProcessor()], - focusable=self.FOCUSABLE + focusable=self.FOCUSABLE, ), wrap_lines=True, - **self.DIMENSIONS + **self.DIMENSIONS, ) @@ -106,7 +126,7 @@ class BaseHelpView(BaseHelpContainer): def create_window(self, help_buffer): return Frame( super(BaseHelpView, self).create_window(help_buffer), - title=self.TITLE + title=self.TITLE, ) @@ -203,7 +223,7 @@ def help_text(self): f'{self.STYLE}[F2] Focus on next panel{self.SPACING}' f'{self.STYLE}[F3] Hide/Show Docs{self.SPACING}' f'{self.STYLE}[F5] Hide/Show Output' - ) + ) class OutputToolbarView(BaseToolbarView): @@ -217,7 +237,7 @@ def help_text(self): f'{self.STYLE}[F2] Focus on next panel{self.SPACING}' f'{self.STYLE}[F3] Hide/Show Docs{self.SPACING}' f'{self.STYLE}[F5] Hide/Show Output' - ) + ) class DebugToolbarView(BaseToolbarView): @@ -226,9 +246,7 @@ class DebugToolbarView(BaseToolbarView): @property def help_text(self): - return ( - f'{self.STYLE}[CONTROL+S] Save log to file' - ) + return f'{self.STYLE}[CONTROL+S] Save log to file' class HistorySignToolbarView(BaseToolbarView): @@ -246,31 +264,37 @@ def help_text(self): class ToolbarWidget: - def __init__(self): - self.container = HSplit([ - ConditionalContainer(HorizontalLine(), ~help_section_visible), - VSplit([ - HistorySignToolbarView(), - ConditionalContainer( - VSplit([InputToolbarView(), - DocToolbarView(), - OutputToolbarView()]), - ~help_section_visible - ) - ]) - ]) + self.container = HSplit( + [ + ConditionalContainer(HorizontalLine(), ~help_section_visible), + VSplit( + [ + HistorySignToolbarView(), + ConditionalContainer( + VSplit( + [ + InputToolbarView(), + DocToolbarView(), + OutputToolbarView(), + ] + ), + ~help_section_visible, + ), + ] + ), + ] + ) def __pt_container__(self): return self.container class HelpPanelWidget: - def __init__(self): self.container = ConditionalContainer( HSplit([DocHelpView(), InputHelpView(), OutputHelpView()]), - help_section_visible + help_section_visible, ) def __pt_container__(self): @@ -289,7 +313,8 @@ def __init__(self): _kb = KeyBindings() _kb.add(Keys.ControlS, filter=is_debug_mode, is_global=True)( - self._activate_dialog) + self._activate_dialog + ) self.float_container = FloatContainer( Window( @@ -299,19 +324,21 @@ def __init__(self): wrap_lines=True, ), key_bindings=_kb, - floats=[] + floats=[], ) self.container = ConditionalContainer( Frame( - HSplit([ - self.float_container, - HorizontalLine(), - DebugToolbarView() - ]), + HSplit( + [ + self.float_container, + HorizontalLine(), + DebugToolbarView(), + ] + ), **self.DIMENSIONS, - title='Debug panel' + title='Debug panel', ), - filter=is_debug_mode + filter=is_debug_mode, ) def _activate_dialog(self, event): @@ -347,15 +374,20 @@ def ok_handler(*args, **kwargs): dialog = Dialog( title='Save logs to file', - body=HSplit([ - Label(text='Log file name', dont_extend_height=True), - textfield, - ], padding=Dimension(preferred=1, max=1)), + body=HSplit( + [ + Label(text='Log file name', dont_extend_height=True), + textfield, + ], + padding=Dimension(preferred=1, max=1), + ), buttons=[ok_button, cancel_button], - with_background=True) + with_background=True, + ) # add keybinding to save file on press Enter in textfield dialog.container.body.container.content.key_bindings.add( - Keys.Enter, filter=has_focus(textfield))(ok_handler) + Keys.Enter, filter=has_focus(textfield) + )(ok_handler) return dialog diff --git a/awscli/bcdoc/docevents.py b/awscli/bcdoc/docevents.py index 54bce6ebd9d9..e861d91ba3b5 100644 --- a/awscli/bcdoc/docevents.py +++ b/awscli/bcdoc/docevents.py @@ -31,76 +31,114 @@ 'doc-subitems-end': '.%s', 'doc-relateditems-start': '.%s', 'doc-relateditem': '.%s.%s', - 'doc-relateditems-end': '.%s' - } + 'doc-relateditems-end': '.%s', +} def generate_events(session, help_command): # Now generate the documentation events - session.emit('doc-breadcrumbs.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-title.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-description.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-synopsis-start.%s' % help_command.event_class, - help_command=help_command) + session.emit( + 'doc-breadcrumbs.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-title.%s' % help_command.event_class, help_command=help_command + ) + session.emit( + 'doc-description.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-synopsis-start.%s' % help_command.event_class, + help_command=help_command, + ) if help_command.arg_table: for arg_name in help_command.arg_table: # An argument can set an '_UNDOCUMENTED' attribute # to True to indicate a parameter that exists # but shouldn't be documented. This can be used # for backwards compatibility of deprecated arguments. - if getattr(help_command.arg_table[arg_name], - '_UNDOCUMENTED', False): + if getattr( + help_command.arg_table[arg_name], '_UNDOCUMENTED', False + ): continue session.emit( - 'doc-synopsis-option.%s.%s' % (help_command.event_class, - arg_name), - arg_name=arg_name, help_command=help_command) - session.emit('doc-synopsis-end.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-options-start.%s' % help_command.event_class, - help_command=help_command) + 'doc-synopsis-option.%s.%s' + % (help_command.event_class, arg_name), + arg_name=arg_name, + help_command=help_command, + ) + session.emit( + 'doc-synopsis-end.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-options-start.%s' % help_command.event_class, + help_command=help_command, + ) if help_command.arg_table: for arg_name in help_command.arg_table: - if getattr(help_command.arg_table[arg_name], - '_UNDOCUMENTED', False): + if getattr( + help_command.arg_table[arg_name], '_UNDOCUMENTED', False + ): continue - session.emit('doc-option.%s.%s' % (help_command.event_class, - arg_name), - arg_name=arg_name, help_command=help_command) - session.emit('doc-option-example.%s.%s' % - (help_command.event_class, arg_name), - arg_name=arg_name, help_command=help_command) - session.emit('doc-options-end.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-global-option.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-subitems-start.%s' % help_command.event_class, - help_command=help_command) + session.emit( + 'doc-option.%s.%s' % (help_command.event_class, arg_name), + arg_name=arg_name, + help_command=help_command, + ) + session.emit( + 'doc-option-example.%s.%s' + % (help_command.event_class, arg_name), + arg_name=arg_name, + help_command=help_command, + ) + session.emit( + 'doc-options-end.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-global-option.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-subitems-start.%s' % help_command.event_class, + help_command=help_command, + ) if help_command.command_table: for command_name in sorted(help_command.command_table.keys()): - if hasattr(help_command.command_table[command_name], - '_UNDOCUMENTED'): + if hasattr( + help_command.command_table[command_name], '_UNDOCUMENTED' + ): continue - session.emit('doc-subitem.%s.%s' - % (help_command.event_class, command_name), - command_name=command_name, - help_command=help_command) - session.emit('doc-subitems-end.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-examples.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-output.%s' % help_command.event_class, - help_command=help_command) - session.emit('doc-relateditems-start.%s' % help_command.event_class, - help_command=help_command) + session.emit( + 'doc-subitem.%s.%s' % (help_command.event_class, command_name), + command_name=command_name, + help_command=help_command, + ) + session.emit( + 'doc-subitems-end.%s' % help_command.event_class, + help_command=help_command, + ) + session.emit( + 'doc-examples.%s' % help_command.event_class, help_command=help_command + ) + session.emit( + 'doc-output.%s' % help_command.event_class, help_command=help_command + ) + session.emit( + 'doc-relateditems-start.%s' % help_command.event_class, + help_command=help_command, + ) if help_command.related_items: for related_item in sorted(help_command.related_items): - session.emit('doc-relateditem.%s.%s' - % (help_command.event_class, related_item), - help_command=help_command, - related_item=related_item) - session.emit('doc-relateditems-end.%s' % help_command.event_class, - help_command=help_command) + session.emit( + 'doc-relateditem.%s.%s' + % (help_command.event_class, related_item), + help_command=help_command, + related_item=related_item, + ) + session.emit( + 'doc-relateditems-end.%s' % help_command.event_class, + help_command=help_command, + ) diff --git a/awscli/bcdoc/docstringparser.py b/awscli/bcdoc/docstringparser.py index cfff547db59d..791f7d5e5021 100644 --- a/awscli/bcdoc/docstringparser.py +++ b/awscli/bcdoc/docstringparser.py @@ -57,6 +57,7 @@ class HTMLTree(object): meaning that the current_node will be the most recently opened tag. When a tag is closed, the current_node moves up to the parent node. """ + def __init__(self, doc): self.doc = doc self.head = StemNode() @@ -122,6 +123,7 @@ class TagNode(StemNode): """ A generic Tag node. It will verify that handlers exist before writing. """ + def __init__(self, tag, attrs=None, parent=None): super(TagNode, self).__init__(parent) self.attrs = attrs @@ -174,6 +176,7 @@ class DataNode(Node): """ A Node that contains only string data. """ + def __init__(self, data, parent=None): super(DataNode, self).__init__(parent) if not isinstance(data, str): diff --git a/awscli/bcdoc/restdoc.py b/awscli/bcdoc/restdoc.py index d194d0e9f0ac..62762c513adf 100644 --- a/awscli/bcdoc/restdoc.py +++ b/awscli/bcdoc/restdoc.py @@ -12,15 +12,14 @@ # language governing permissions and limitations under the License. import logging -from botocore.compat import OrderedDict from awscli.bcdoc.docstringparser import DocStringParser from awscli.bcdoc.style import ReSTStyle +from botocore.compat import OrderedDict LOG = logging.getLogger('bcdocs') class ReSTDocument(object): - def __init__(self, target='man'): self.style = ReSTStyle(self) self.target = target @@ -194,8 +193,9 @@ def add_new_section(self, name, context=None): to the document structure it was instantiated from. """ # Add a new section - section = self.__class__(name=name, target=self.target, - context=context) + section = self.__class__( + name=name, target=self.target, context=context + ) section.path = self.path + [name] # Indent the section apporpriately as well section.style.indentation = self.style.indentation diff --git a/awscli/bcdoc/style.py b/awscli/bcdoc/style.py index 4470d65d3cc6..6ffbae853503 100644 --- a/awscli/bcdoc/style.py +++ b/awscli/bcdoc/style.py @@ -17,7 +17,6 @@ class BaseStyle(object): - def __init__(self, doc, indent_width=2): self.doc = doc self.indent_width = indent_width @@ -65,7 +64,6 @@ def italics(self, s): class ReSTStyle(BaseStyle): - def __init__(self, doc, indent_width=2): BaseStyle.__init__(self, doc, indent_width) self.do_p = True diff --git a/awscli/bcdoc/textwriter.py b/awscli/bcdoc/textwriter.py index 6fc171b9b475..6ccc2dab90fd 100644 --- a/awscli/bcdoc/textwriter.py +++ b/awscli/bcdoc/textwriter.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- """ - Custom docutils writer for plain text. - Based heavily on the Sphinx text writer. See copyright below. +Custom docutils writer for plain text. +Based heavily on the Sphinx text writer. See copyright below. - :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. +:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. +:license: BSD, see LICENSE for details. """ + import os import re import textwrap @@ -19,10 +20,11 @@ class TextWrapper(textwrap.TextWrapper): """Custom subclass that uses a different word separator regex.""" wordsep_re = re.compile( - r'(\s+|' # any whitespace - r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start - r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words - r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash + r'(\s+|' # any whitespace + r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start + r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' + ) # em-dash MAXWIDTH = 70 @@ -81,12 +83,13 @@ def do_format(): if not toformat: return if wrap: - res = my_wrap(''.join(toformat), width=MAXWIDTH-maxindent) + res = my_wrap(''.join(toformat), width=MAXWIDTH - maxindent) else: res = ''.join(toformat).splitlines() if end: res += end result.append((indent, res)) + for itemindent, item in content: if itemindent == -1: toformat.append(item) @@ -107,9 +110,11 @@ def visit_document(self, node): def depart_document(self, node): self.end_state() - self.body = self.nl.join(line and (' '*indent + line) - for indent, lines in self.states[0] - for line in lines) + self.body = self.nl.join( + line and (' ' * indent + line) + for indent, lines in self.states[0] + for line in lines + ) # XXX header/footer? def visit_highlightlang(self, node): @@ -153,7 +158,7 @@ def depart_glossary(self, node): def visit_title(self, node): if isinstance(node.parent, nodes.Admonition): - self.add_text(node.astext()+': ') + self.add_text(node.astext() + ': ') raise nodes.SkipNode self.new_state(0) @@ -280,7 +285,7 @@ def visit_productionlist(self, node): self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') lastname = production['tokenname'] else: - self.add_text('%s ' % (' '*len(lastname))) + self.add_text('%s ' % (' ' * len(lastname))) self.add_text(production.astext() + self.nl) self.end_state(wrap=False) raise nodes.SkipNode @@ -391,8 +396,9 @@ def depart_row(self, node): def visit_entry(self, node): if 'morerows' in node or 'morecols' in node: - raise NotImplementedError('Column or row spanning cells are ' - 'not implemented.') + raise NotImplementedError( + 'Column or row spanning cells are ' 'not implemented.' + ) self.new_state(0) def depart_entry(self, node): @@ -431,7 +437,7 @@ def depart_table(self, node): def writesep(char='-'): out = ['+'] for width in realwidths: - out.append(char * (width+2)) + out.append(char * (width + 2)) out.append('+') self.add_text(''.join(out) + self.nl) @@ -441,7 +447,7 @@ def writerow(row): out = ['|'] for i, cell in enumerate(line): if cell: - out.append(' ' + cell.ljust(realwidths[i]+1)) + out.append(' ' + cell.ljust(realwidths[i] + 1)) else: out.append(' ' * (realwidths[i] + 2)) out.append('|') @@ -460,7 +466,8 @@ def writerow(row): def visit_acks(self, node): self.new_state(0) self.add_text( - ', '.join(n.astext() for n in node.children[0].children) + '.') + ', '.join(n.astext() for n in node.children[0].children) + '.' + ) self.end_state() raise nodes.SkipNode @@ -516,8 +523,9 @@ def depart_list_item(self, node): self.end_state(first='%s. ' % self.list_counter[-1], end=None) def visit_definition_list_item(self, node): - self._li_has_classifier = len(node) >= 2 and \ - isinstance(node[1], nodes.classifier) + self._li_has_classifier = len(node) >= 2 and isinstance( + node[1], nodes.classifier + ) def depart_definition_list_item(self, node): pass @@ -774,6 +782,7 @@ def _visit_admonition(self, node): def _make_depart_admonition(name): def depart_admonition(self, node): self.end_state(first=name.capitalize() + ': ') + return depart_admonition visit_attention = _visit_admonition diff --git a/awscli/clidocs.py b/awscli/clidocs.py index 9d8fb9d59159..725f76c44917 100644 --- a/awscli/clidocs.py +++ b/awscli/clidocs.py @@ -13,30 +13,33 @@ import logging import os import re -from botocore import xform_name -from botocore.model import StringShape -from botocore.utils import is_json_value_header from awscli import SCALAR_TYPES from awscli.argprocess import ParamShorthandDocGen from awscli.bcdoc.docevents import DOC_EVENTS from awscli.topictags import TopicTagDB from awscli.utils import ( - find_service_and_method_in_event_name, is_document_type, - operation_uses_document_types, is_streaming_blob_type, - is_tagged_union_type + find_service_and_method_in_event_name, + is_document_type, + is_streaming_blob_type, + is_tagged_union_type, + operation_uses_document_types, ) +from botocore import xform_name +from botocore.model import StringShape +from botocore.utils import is_json_value_header LOG = logging.getLogger(__name__) -EXAMPLES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'examples') +EXAMPLES_DIR = os.path.join( + os.path.dirname(os.path.abspath(__file__)), 'examples' +) GLOBAL_OPTIONS_FILE = os.path.join(EXAMPLES_DIR, 'global_options.rst') -GLOBAL_OPTIONS_SYNOPSIS_FILE = os.path.join(EXAMPLES_DIR, - 'global_synopsis.rst') +GLOBAL_OPTIONS_SYNOPSIS_FILE = os.path.join( + EXAMPLES_DIR, 'global_synopsis.rst' +) class CLIDocumentEventHandler(object): - def __init__(self, help_command): self.help_command = help_command self.register(help_command.session, help_command.event_class) @@ -91,9 +94,11 @@ def unregister(self): handler method will be unregistered for the all events of that type for the specified ``event_class``. """ - self._map_handlers(self.help_command.session, - self.help_command.event_class, - self.help_command.session.unregister) + self._map_handlers( + self.help_command.session, + self.help_command.event_class, + self.help_command.session.unregister, + ) # These are default doc handlers that apply in the general case. @@ -141,15 +146,17 @@ def doc_synopsis_option(self, arg_name, help_command, **kwargs): # This arg is already documented so we can move on. return option_str = ' | '.join( - [a.cli_name for a in - self._arg_groups[argument.group_name]]) + [a.cli_name for a in self._arg_groups[argument.group_name]] + ) self._documented_arg_groups.append(argument.group_name) elif argument.cli_name.startswith('--'): option_str = '%s ' % argument.cli_name else: option_str = '<%s>' % argument.cli_name - if not (argument.required - or getattr(argument, '_DOCUMENT_AS_REQUIRED', False)): + if not ( + argument.required + or getattr(argument, '_DOCUMENT_AS_REQUIRED', False) + ): option_str = '[%s]' % option_str doc.writeln('%s' % option_str) @@ -177,13 +184,23 @@ def doc_option(self, arg_name, help_command, **kwargs): # This arg is already documented so we can move on. return name = ' | '.join( - ['``%s``' % a.cli_name for a in - self._arg_groups[argument.group_name]]) + [ + '``%s``' % a.cli_name + for a in self._arg_groups[argument.group_name] + ] + ) self._documented_arg_groups.append(argument.group_name) else: name = '``%s``' % argument.cli_name - doc.write('%s (%s)\n' % (name, self._get_argument_type_name( - argument.argument_model, argument.cli_type_name))) + doc.write( + '%s (%s)\n' + % ( + name, + self._get_argument_type_name( + argument.argument_model, argument.cli_type_name + ), + ) + ) doc.style.indent() doc.include_doc_string(argument.documentation) if is_streaming_blob_type(argument.argument_model): @@ -210,8 +227,7 @@ def doc_relateditem(self, help_command, related_item, **kwargs): doc = help_command.doc doc.write('* ') doc.style.sphinx_reference_label( - label='cli:%s' % related_item, - text=related_item + label='cli:%s' % related_item, text=related_item ) doc.write('\n') @@ -231,8 +247,9 @@ def _document_nested_structure(self, model, doc): member_type_name = getattr(model, 'type_name', None) if member_type_name == 'structure': for member_name, member_shape in model.members.items(): - self._doc_member(doc, member_name, member_shape, - stack=[model.name]) + self._doc_member( + doc, member_name, member_shape, stack=[model.name] + ) elif member_type_name == 'list': self._doc_member(doc, '', model.member, stack=[model.name]) elif member_type_name == 'map': @@ -253,15 +270,15 @@ def _doc_member(self, doc, member_name, member_shape, stack): return stack.append(member_shape.name) try: - self._do_doc_member(doc, member_name, - member_shape, stack) + self._do_doc_member(doc, member_name, member_shape, stack) finally: stack.pop() def _do_doc_member(self, doc, member_name, member_shape, stack): docs = member_shape.documentation type_name = self._get_argument_type_name( - member_shape, member_shape.type_name) + member_shape, member_shape.type_name + ) if member_name: doc.write('%s -> (%s)' % (member_name, type_name)) else: @@ -290,26 +307,27 @@ def _do_doc_member(self, doc, member_name, member_shape, stack): def _add_streaming_blob_note(self, doc): doc.style.start_note() - msg = ("This argument is of type: streaming blob. " - "Its value must be the path to a file " - "(e.g. ``path/to/file``) and must **not** " - "be prefixed with ``file://`` or ``fileb://``") + msg = ( + "This argument is of type: streaming blob. " + "Its value must be the path to a file " + "(e.g. ``path/to/file``) and must **not** " + "be prefixed with ``file://`` or ``fileb://``" + ) doc.writeln(msg) doc.style.end_note() def _add_tagged_union_note(self, shape, doc): doc.style.start_note() - members_str = ", ".join( - [f'``{key}``' for key in shape.members.keys()] + members_str = ", ".join([f'``{key}``' for key in shape.members.keys()]) + msg = ( + "This is a Tagged Union structure. Only one of the " + f"following top level keys can be set: {members_str}." ) - msg = ("This is a Tagged Union structure. Only one of the " - f"following top level keys can be set: {members_str}.") doc.writeln(msg) doc.style.end_note() class ProviderDocumentEventHandler(CLIDocumentEventHandler): - def doc_breadcrumbs(self, help_command, event_name, **kwargs): pass @@ -344,7 +362,6 @@ def doc_subitem(self, command_name, help_command, **kwargs): class ServiceDocumentEventHandler(CLIDocumentEventHandler): - # A service document has no synopsis. def doc_synopsis_start(self, help_command, **kwargs): pass @@ -390,7 +407,7 @@ def doc_subitem(self, command_name, help_command, **kwargs): # If the subcommand table has commands in it, # direct the subitem to the command's index because # it has more subcommands to be documented. - if (len(subcommand_table) > 0): + if len(subcommand_table) > 0: file_name = '%s/index' % command_name doc.style.tocitem(command_name, file_name=file_name) else: @@ -398,7 +415,6 @@ def doc_subitem(self, command_name, help_command, **kwargs): class OperationDocumentEventHandler(CLIDocumentEventHandler): - AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI' def doc_description(self, help_command, **kwargs): @@ -409,7 +425,6 @@ def doc_description(self, help_command, **kwargs): self._add_webapi_crosslink(help_command) self._add_note_for_document_types_if_used(help_command) - def _add_webapi_crosslink(self, help_command): doc = help_command.doc operation_model = help_command.obj @@ -422,8 +437,11 @@ def _add_webapi_crosslink(self, help_command): return doc.style.new_paragraph() doc.write("See also: ") - link = '%s/%s/%s' % (self.AWS_DOC_BASE, service_uid, - operation_model.name) + link = '%s/%s/%s' % ( + self.AWS_DOC_BASE, + service_uid, + operation_model.name, + ) doc.style.external_link(title="AWS API Documentation", link=link) doc.writeln('') @@ -439,7 +457,9 @@ def _add_note_for_document_types_if_used(self, help_command): 'not support document types.' % help_command.name ) - def _json_example_value_name(self, argument_model, include_enum_values=True): + def _json_example_value_name( + self, argument_model, include_enum_values=True + ): # If include_enum_values is True, then the valid enum values # are included as the sample JSON value. if isinstance(argument_model, StringShape): @@ -471,7 +491,10 @@ def _do_json_example(self, doc, argument_model, stack): if argument_model.type_name == 'list': doc.write('[') if argument_model.member.type_name in SCALAR_TYPES: - doc.write('%s, ...' % self._json_example_value_name(argument_model.member)) + doc.write( + '%s, ...' + % self._json_example_value_name(argument_model.member) + ) else: doc.style.indent() doc.style.new_line() @@ -514,8 +537,13 @@ def _doc_input_structure_members(self, doc, argument_model, stack): member_model = members[member_name] member_type_name = member_model.type_name if member_type_name in SCALAR_TYPES: - doc.write('"%s": %s' % (member_name, - self._json_example_value_name(member_model))) + doc.write( + '"%s": %s' + % ( + member_name, + self._json_example_value_name(member_model), + ) + ) elif member_type_name == 'structure': doc.write('"%s": ' % member_name) self._json_example(doc, member_model, stack) @@ -533,8 +561,9 @@ def _doc_input_structure_members(self, doc, argument_model, stack): doc.write('}') def doc_option_example(self, arg_name, help_command, event_name, **kwargs): - service_id, operation_name = \ - find_service_and_method_in_event_name(event_name) + service_id, operation_name = find_service_and_method_in_event_name( + event_name + ) doc = help_command.doc cli_argument = help_command.arg_table[arg_name] if cli_argument.group_name in self._arg_groups: @@ -546,7 +575,8 @@ def doc_option_example(self, arg_name, help_command, event_name, **kwargs): docgen = ParamShorthandDocGen() if docgen.supports_shorthand(cli_argument.argument_model): example_shorthand_syntax = docgen.generate_shorthand_example( - cli_argument, service_id, operation_name) + cli_argument, service_id, operation_name + ) if example_shorthand_syntax is None: # If the shorthand syntax returns a value of None, # this indicates to us that there is no example @@ -560,8 +590,11 @@ def doc_option_example(self, arg_name, help_command, event_name, **kwargs): for example_line in example_shorthand_syntax.splitlines(): doc.writeln(example_line) doc.style.end_codeblock() - if argument_model is not None and argument_model.type_name == 'list' and \ - argument_model.member.type_name in SCALAR_TYPES: + if ( + argument_model is not None + and argument_model.type_name == 'list' + and argument_model.member.type_name in SCALAR_TYPES + ): # A list of scalars is special. While you *can* use # JSON ( ["foo", "bar", "baz"] ), you can also just # use the argparse behavior of space separated lists. @@ -572,7 +605,8 @@ def doc_option_example(self, arg_name, help_command, event_name, **kwargs): doc.write('Syntax') doc.style.start_codeblock() example_type = self._json_example_value_name( - member, include_enum_values=False) + member, include_enum_values=False + ) doc.write('%s %s ...' % (example_type, example_type)) if isinstance(member, StringShape) and member.enum: # If we have enum values, we can tell the user @@ -614,7 +648,8 @@ class TopicListerDocumentEventHandler(CLIDocumentEventHandler): 'the list of topics from the command line, run ``aws help topics``. ' 'To access a specific topic from the command line, run ' '``aws help [topicname]``, where ``topicname`` is the name of the ' - 'topic as it appears in the output from ``aws help topics``.') + 'topic as it appears in the output from ``aws help topics``.' + ) def __init__(self, help_command): self.help_command = help_command @@ -633,8 +668,8 @@ def doc_title(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.link_target_definition( - refname='cli:aws help %s' % self.help_command.name, - link='') + refname='cli:aws help %s' % self.help_command.name, link='' + ) doc.style.h1('AWS CLI Topic Guide') def doc_description(self, help_command, **kwargs): @@ -674,11 +709,11 @@ def doc_subitems_start(self, help_command, **kwargs): # each category. for topic_name in sorted(categories[category_name]): description = self._topic_tag_db.get_tag_single_value( - topic_name, 'description') + topic_name, 'description' + ) doc.write('* ') doc.style.sphinx_reference_label( - label='cli:aws help %s' % topic_name, - text=topic_name + label='cli:aws help %s' % topic_name, text=topic_name ) doc.write(': %s\n' % description) # Add a hidden toctree to make sure everything is connected in @@ -689,7 +724,6 @@ def doc_subitems_start(self, help_command, **kwargs): class TopicDocumentEventHandler(TopicListerDocumentEventHandler): - def doc_breadcrumbs(self, help_command, **kwargs): doc = help_command.doc if doc.target != 'man': @@ -697,8 +731,7 @@ def doc_breadcrumbs(self, help_command, **kwargs): doc.style.sphinx_reference_label(label='cli:aws', text='aws') doc.write(' . ') doc.style.sphinx_reference_label( - label='cli:aws help topics', - text='topics' + label='cli:aws help topics', text='topics' ) doc.write(' ]') @@ -706,16 +739,18 @@ def doc_title(self, help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.link_target_definition( - refname='cli:aws help %s' % self.help_command.name, - link='') + refname='cli:aws help %s' % self.help_command.name, link='' + ) title = self._topic_tag_db.get_tag_single_value( - help_command.name, 'title') + help_command.name, 'title' + ) doc.style.h1(title) def doc_description(self, help_command, **kwargs): doc = help_command.doc - topic_filename = os.path.join(self._topic_tag_db.topic_dir, - help_command.name + '.rst') + topic_filename = os.path.join( + self._topic_tag_db.topic_dir, help_command.name + '.rst' + ) contents = self._remove_tags_from_content(topic_filename) doc.writeln(contents) doc.style.new_paragraph() @@ -759,7 +794,8 @@ def doc_global_options(self): for arg in help_command.arg_table: argument = help_command.arg_table.get(arg) help_command.doc.writeln( - f"``{argument.cli_name}`` ({argument.cli_type_name})") + f"``{argument.cli_name}`` ({argument.cli_type_name})" + ) help_command.doc.style.indent() help_command.doc.style.new_paragraph() help_command.doc.include_doc_string(argument.documentation) diff --git a/awscli/clidriver.py b/awscli/clidriver.py index 5e6460182ef0..d4ea69fbbe76 100644 --- a/awscli/clidriver.py +++ b/awscli/clidriver.py @@ -10,69 +10,81 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import copy import json +import logging import os import platform -import sys -import copy -import logging import re +import sys import distro -import botocore.session -from botocore import xform_name -from botocore.compat import copy_kwargs, OrderedDict -from botocore.history import get_global_history_recorder -from botocore.configprovider import InstanceVarProvider -from botocore.configprovider import EnvironmentProvider -from botocore.configprovider import ScopedConfigProvider -from botocore.configprovider import ConstantProvider -from botocore.configprovider import ChainProvider +import botocore.session from awscli import __version__ +from awscli.alias import AliasCommandInjector, AliasLoader +from awscli.argparser import ( + ArgTableArgParser, + FirstPassGlobalArgParser, + MainArgParser, + ServiceArgParser, + SubCommandArgParser, +) +from awscli.argprocess import unpack_argument +from awscli.arguments import ( + BooleanArgument, + CLIArgument, + CustomArgument, + ListArgument, + UnknownArgumentError, +) +from awscli.autoprompt.core import AutoPromptDriver +from awscli.commands import CLICommand from awscli.compat import ( - default_pager, get_stderr_text_writer, get_stdout_text_writer + default_pager, + get_stderr_text_writer, + get_stdout_text_writer, +) +from awscli.constants import PARAM_VALIDATION_ERROR_RC +from awscli.errorhandler import ( + construct_cli_error_handlers_chain, + construct_entry_point_handlers_chain, ) from awscli.formatter import get_formatter -from awscli.plugin import load_plugins -from awscli.commands import CLICommand -from awscli.argparser import MainArgParser -from awscli.argparser import FirstPassGlobalArgParser -from awscli.argparser import ServiceArgParser -from awscli.argparser import ArgTableArgParser -from awscli.argparser import SubCommandArgParser -from awscli.help import ProviderHelpCommand -from awscli.help import ServiceHelpCommand -from awscli.help import OperationHelpCommand -from awscli.arguments import CustomArgument -from awscli.arguments import ListArgument -from awscli.arguments import BooleanArgument -from awscli.arguments import CLIArgument -from awscli.arguments import UnknownArgumentError -from awscli.argprocess import unpack_argument -from awscli.alias import AliasLoader -from awscli.alias import AliasCommandInjector +from awscli.help import ( + OperationHelpCommand, + ProviderHelpCommand, + ServiceHelpCommand, +) from awscli.logger import ( - set_stream_logger, remove_stream_logger, enable_crt_logging, disable_crt_logging, + enable_crt_logging, + remove_stream_logger, + set_stream_logger, ) +from awscli.plugin import load_plugins from awscli.utils import ( + IMDSRegionProvider, + OutputStreamFactory, + add_command_lineage_to_user_agent_extra, add_metadata_component_to_user_agent_extra, - add_command_lineage_to_user_agent_extra + emit_top_level_args_parsed_event, ) -from awscli.utils import emit_top_level_args_parsed_event -from awscli.utils import OutputStreamFactory -from awscli.utils import IMDSRegionProvider -from awscli.constants import PARAM_VALIDATION_ERROR_RC -from awscli.autoprompt.core import AutoPromptDriver -from awscli.errorhandler import ( - construct_cli_error_handlers_chain, construct_entry_point_handlers_chain +from botocore import xform_name +from botocore.compat import OrderedDict, copy_kwargs +from botocore.configprovider import ( + ChainProvider, + ConstantProvider, + EnvironmentProvider, + InstanceVarProvider, + ScopedConfigProvider, ) - +from botocore.history import get_global_history_recorder LOG = logging.getLogger('awscli.clidriver') LOG_FORMAT = ( - '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s' +) HISTORY_RECORDER = get_global_history_recorder() METADATA_FILENAME = 'metadata.json' # Don't remove this line. The idna encoding @@ -84,7 +96,7 @@ # the encodings.idna is imported and registered in the codecs registry, # which will stop the LookupErrors from happening. # See: https://bugs.python.org/issue29288 -u''.encode('idna') +''.encode('idna') def main(): @@ -99,19 +111,21 @@ def create_clidriver(args=None): debug = args.debug session = botocore.session.Session() _set_user_agent_for_session(session) - load_plugins(session.full_config.get('plugins', {}), - event_hooks=session.get_component('event_emitter')) + load_plugins( + session.full_config.get('plugins', {}), + event_hooks=session.get_component('event_emitter'), + ) error_handlers_chain = construct_cli_error_handlers_chain() - driver = CLIDriver(session=session, - error_handler=error_handlers_chain, - debug=debug) + driver = CLIDriver( + session=session, error_handler=error_handlers_chain, debug=debug + ) return driver def _get_distribution_source(): metadata_file = os.path.join( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data'), - METADATA_FILENAME + METADATA_FILENAME, ) metadata = {} if os.path.isfile(metadata_file): @@ -141,9 +155,7 @@ def _get_linux_distribution(): def _add_distribution_source_to_user_agent(session): add_metadata_component_to_user_agent_extra( - session, - 'installer', - _get_distribution_source() + session, 'installer', _get_distribution_source() ) @@ -151,7 +163,7 @@ def _add_linux_distribution_to_user_agent(session): if linux_distribution := _get_distribution(): add_metadata_component_to_user_agent_extra( session, - 'distrib', + 'distrib', linux_distribution, ) @@ -166,9 +178,7 @@ def _set_user_agent_for_session(session): def no_pager_handler(session, parsed_args, **kwargs): if parsed_args.no_cli_pager: config_store = session.get_component('config_store') - config_store.set_config_provider( - 'pager', ConstantProvider(value=None) - ) + config_store.set_config_provider('pager', ConstantProvider(value=None)) class AWSCLIEntryPoint: @@ -184,7 +194,7 @@ def main(self, args): return self._error_handler.handle_exception( e, stdout=get_stdout_text_writer(), - stderr=get_stderr_text_writer() + stderr=get_stderr_text_writer(), ) HISTORY_RECORDER.record('CLI_RC', rc, 'CLI') @@ -223,9 +233,7 @@ def _do_main(self, args): class CLIDriver(object): - - def __init__(self, session=None, error_handler=None, - debug=False): + def __init__(self, session=None, error_handler=None, debug=False): if session is None: self.session = botocore.session.get_session() _set_user_agent_for_session(self.session) @@ -245,32 +253,24 @@ def __init__(self, session=None, error_handler=None, def _update_config_chain(self): config_store = self.session.get_component('config_store') config_store.set_config_provider( - 'region', - self._construct_cli_region_chain() + 'region', self._construct_cli_region_chain() ) config_store.set_config_provider( - 'output', - self._construct_cli_output_chain() + 'output', self._construct_cli_output_chain() ) config_store.set_config_provider( - 'pager', - self._construct_cli_pager_chain() + 'pager', self._construct_cli_pager_chain() ) config_store.set_config_provider( - 'cli_binary_format', - self._construct_cli_binary_format_chain() + 'cli_binary_format', self._construct_cli_binary_format_chain() ) config_store.set_config_provider( - 'cli_auto_prompt', - self._construct_cli_auto_prompt_chain() + 'cli_auto_prompt', self._construct_cli_auto_prompt_chain() ) def _construct_cli_region_chain(self): providers = [ - InstanceVarProvider( - instance_var='region', - session=self.session - ), + InstanceVarProvider(instance_var='region', session=self.session), EnvironmentProvider( name='AWS_REGION', env=os.environ, @@ -344,8 +344,7 @@ def _construct_cli_auto_prompt_chain(self): env=os.environ, ), ScopedConfigProvider( - config_var_name='cli_auto_prompt', - session=self.session + config_var_name='cli_auto_prompt', session=self.session ), ConstantProvider(value='off'), ] @@ -390,24 +389,27 @@ def _build_command_table(self): """ command_table = self._build_builtin_commands(self.session) - self.session.emit('building-command-table.main', - command_table=command_table, - session=self.session, - command_object=self) + self.session.emit( + 'building-command-table.main', + command_table=command_table, + session=self.session, + command_object=self, + ) return command_table def _build_builtin_commands(self, session): commands = OrderedDict() services = session.get_available_services() for service_name in services: - commands[service_name] = ServiceCommand(cli_name=service_name, - session=self.session, - service_name=service_name) + commands[service_name] = ServiceCommand( + cli_name=service_name, + session=self.session, + service_name=service_name, + ) return commands def _add_aliases(self, command_table, parser): - injector = AliasCommandInjector( - self.session, self.alias_loader) + injector = AliasCommandInjector(self.session, self.alias_loader) injector.inject_aliases(command_table, parser) def _build_argument_table(self): @@ -420,29 +422,36 @@ def _build_argument_table(self): cli_argument.add_to_arg_table(argument_table) # Then the final step is to send out an event so handlers # can add extra arguments or modify existing arguments. - self.session.emit('building-top-level-params', - session=self.session, - argument_table=argument_table, - driver=self) + self.session.emit( + 'building-top-level-params', + session=self.session, + argument_table=argument_table, + driver=self, + ) return argument_table def _create_cli_argument(self, option_name, option_params): return CustomArgument( - option_name, help_text=option_params.get('help', ''), + option_name, + help_text=option_params.get('help', ''), dest=option_params.get('dest'), default=option_params.get('default'), action=option_params.get('action'), required=option_params.get('required'), choices=option_params.get('choices'), - cli_type_name=option_params.get('type')) + cli_type_name=option_params.get('type'), + ) def create_help_command(self): cli_data = self._get_cli_data() - return ProviderHelpCommand(self.session, self._get_command_table(), - self._get_argument_table(), - cli_data.get('description', None), - cli_data.get('synopsis', None), - cli_data.get('help_usage', None)) + return ProviderHelpCommand( + self.session, + self._get_command_table(), + self._get_argument_table(), + cli_data.get('description', None), + cli_data.get('synopsis', None), + cli_data.get('help_usage', None), + ) def _cli_version(self): version_string = ( @@ -452,8 +461,10 @@ def _cli_version(self): ) if 'AWS_EXECUTION_ENV' in os.environ: - version_string += f' exec-env/{os.environ.get("AWS_EXECUTION_ENV")}' - + version_string += ( + f' exec-env/{os.environ.get("AWS_EXECUTION_ENV")}' + ) + version_string += f' {_get_distribution_source()}/{platform.machine()}' if linux_distribution := _get_distribution(): @@ -466,10 +477,12 @@ def create_parser(self, command_table): command_table['help'] = self.create_help_command() cli_data = self._get_cli_data() parser = MainArgParser( - command_table, self._cli_version(), + command_table, + self._cli_version(), cli_data.get('description', None), self._get_argument_table(), - prog="aws") + prog="aws", + ) return parser def main(self, args=None): @@ -493,8 +506,7 @@ def main(self, args=None): parsed_args, remaining = parser.parse_known_args(args) self._handle_top_level_args(parsed_args) self._emit_session_event(parsed_args) - HISTORY_RECORDER.record( - 'CLI_VERSION', self._cli_version(), 'CLI') + HISTORY_RECORDER.record('CLI_VERSION', self._cli_version(), 'CLI') HISTORY_RECORDER.record('CLI_ARGUMENTS', args, 'CLI') return command_table[parsed_args.command](remaining, parsed_args) except BaseException as e: @@ -506,7 +518,7 @@ def main(self, args=None): return self._error_handler.handle_exception( e, stdout=get_stdout_text_writer(), - stderr=get_stderr_text_writer() + stderr=get_stderr_text_writer(), ) def _emit_session_event(self, parsed_args): @@ -516,8 +528,10 @@ def _emit_session_event(self, parsed_args): # session components to be reset (such as session.profile = foo) # then all the prior registered components would be removed. self.session.emit( - 'session-initialized', session=self.session, - parsed_args=parsed_args) + 'session-initialized', + session=self.session, + parsed_args=parsed_args, + ) def _show_error(self, msg): LOG.debug(msg, exc_info=True) @@ -536,8 +550,9 @@ def _set_logging(self, debug): loggers_list = ['botocore', 'awscli', 's3transfer', 'urllib3'] if debug: for logger_name in loggers_list: - set_stream_logger(logger_name, logging.DEBUG, - format_string=LOG_FORMAT) + set_stream_logger( + logger_name, logging.DEBUG, format_string=LOG_FORMAT + ) enable_crt_logging() LOG.debug("CLI version: %s", self._cli_version()) LOG.debug("Arguments entered to CLI: %s", sys.argv[1:]) @@ -548,12 +563,10 @@ def _set_logging(self, debug): for logger_name in loggers_list: remove_stream_logger(logger_name) disable_crt_logging() - set_stream_logger(logger_name='awscli', - log_level=logging.ERROR) + set_stream_logger(logger_name='awscli', log_level=logging.ERROR) class ServiceCommand(CLICommand): - """A service command for the CLI. For example, ``aws ec2 ...`` we'd create a ServiceCommand @@ -621,7 +634,8 @@ def _get_command_table(self): def _get_service_model(self): if self._service_model is None: self._service_model = self.session.get_service_model( - self._service_name) + self._service_name + ) return self._service_model def __call__(self, args, parsed_globals): @@ -646,10 +660,12 @@ def _create_command_table(self): operation_model=operation_model, operation_caller=CLIOperationCaller(self.session), ) - self.session.emit('building-command-table.%s' % self._name, - command_table=command_table, - session=self.session, - command_object=self) + self.session.emit( + 'building-command-table.%s' % self._name, + command_table=command_table, + session=self.session, + command_object=self, + ) self._add_lineage(command_table) return command_table @@ -660,23 +676,25 @@ def _add_lineage(self, command_table): def create_help_command(self): command_table = self._get_command_table() - return ServiceHelpCommand(session=self.session, - obj=self._get_service_model(), - command_table=command_table, - arg_table=None, - event_class='.'.join(self.lineage_names), - name=self._name) + return ServiceHelpCommand( + session=self.session, + obj=self._get_service_model(), + command_table=command_table, + arg_table=None, + event_class='.'.join(self.lineage_names), + name=self._name, + ) def create_parser(self): command_table = self._get_command_table() # Also add a 'help' command. command_table['help'] = self.create_help_command() return ServiceArgParser( - operations_table=command_table, service_name=self._name) + operations_table=command_table, service_name=self._name + ) class ServiceOperation(object): - """A single operation of a service. This class represents a single operation for a service, for @@ -690,8 +708,9 @@ class ServiceOperation(object): } DEFAULT_ARG_CLASS = CLIArgument - def __init__(self, name, parent_name, operation_caller, - operation_model, session): + def __init__( + self, name, parent_name, operation_caller, operation_model, session + ): """ :type name: str @@ -780,18 +799,26 @@ def _parse_potential_subcommand(self, args, subcommand_table): def __call__(self, args, parsed_globals): # Once we know we're trying to call a particular operation # of a service we can go ahead and load the parameters. - event = 'before-building-argument-table-parser.%s.%s' % \ - (self._parent_name, self._name) - self._emit(event, argument_table=self.arg_table, args=args, - session=self._session) + event = 'before-building-argument-table-parser.%s.%s' % ( + self._parent_name, + self._name, + ) + self._emit( + event, + argument_table=self.arg_table, + args=args, + session=self._session, + ) subcommand_table = self.subcommand_table maybe_parsed_subcommand = self._parse_potential_subcommand( - args, subcommand_table) + args, subcommand_table + ) if maybe_parsed_subcommand is not None: new_args, subcommand_name = maybe_parsed_subcommand return subcommand_table[subcommand_name](new_args, parsed_globals) operation_parser = self._create_operation_parser( - self.arg_table, subcommand_table) + self.arg_table, subcommand_table + ) self._add_help(operation_parser) parsed_args, remaining = operation_parser.parse_known_args(args) if parsed_args.help == 'help': @@ -801,20 +828,21 @@ def __call__(self, args, parsed_globals): remaining.append(parsed_args.help) if remaining: raise UnknownArgumentError( - "Unknown options: %s" % ', '.join(remaining)) - event = 'operation-args-parsed.%s.%s' % (self._parent_name, - self._name) - self._emit(event, parsed_args=parsed_args, - parsed_globals=parsed_globals) + "Unknown options: %s" % ', '.join(remaining) + ) + event = 'operation-args-parsed.%s.%s' % (self._parent_name, self._name) + self._emit( + event, parsed_args=parsed_args, parsed_globals=parsed_globals + ) call_parameters = self._build_call_parameters( - parsed_args, self.arg_table) - event = 'calling-command.%s.%s' % (self._parent_name, - self._name) + parsed_args, self.arg_table + ) + event = 'calling-command.%s.%s' % (self._parent_name, self._name) override = self._emit_first_non_none_response( event, call_parameters=call_parameters, parsed_args=parsed_args, - parsed_globals=parsed_globals + parsed_globals=parsed_globals, ) # There are two possible values for override. It can be some type # of exception that will be raised if detected or it can represent @@ -837,14 +865,18 @@ def __call__(self, args, parsed_globals): return self._operation_caller.invoke( self._operation_model.service_model.service_name, self._operation_model.name, - call_parameters, parsed_globals) + call_parameters, + parsed_globals, + ) def create_help_command(self): return OperationHelpCommand( self._session, operation_model=self._operation_model, arg_table=self.arg_table, - name=self._name, event_class='.'.join(self.lineage_names)) + name=self._name, + event_class='.'.join(self.lineage_names), + ) def _add_help(self, parser): # The 'help' output is processed a little differently from @@ -874,8 +906,9 @@ def _unpack_arg(self, cli_argument, value): service_name = self._operation_model.service_model.endpoint_prefix operation_name = xform_name(self._name, '-') - return unpack_argument(session, service_name, operation_name, - cli_argument, value) + return unpack_argument( + session, service_name, operation_name, cli_argument, value + ) def _create_argument_table(self): argument_table = OrderedDict() @@ -887,8 +920,9 @@ def _create_argument_table(self): arg_dict = input_shape.members for arg_name, arg_shape in arg_dict.items(): cli_arg_name = xform_name(arg_name, '-') - arg_class = self.ARG_TYPES.get(arg_shape.type_name, - self.DEFAULT_ARG_CLASS) + arg_class = self.ARG_TYPES.get( + arg_shape.type_name, self.DEFAULT_ARG_CLASS + ) is_token = arg_shape.metadata.get('idempotencyToken', False) is_required = arg_name in required_arguments and not is_token event_emitter = self._session.get_component('event_emitter') @@ -898,34 +932,36 @@ def _create_argument_table(self): is_required=is_required, operation_model=self._operation_model, serialized_name=arg_name, - event_emitter=event_emitter) + event_emitter=event_emitter, + ) arg_object.add_to_arg_table(argument_table) LOG.debug(argument_table) - self._emit('building-argument-table.%s.%s' % (self._parent_name, - self._name), - operation_model=self._operation_model, - session=self._session, - command=self, - argument_table=argument_table) + self._emit( + 'building-argument-table.%s.%s' % (self._parent_name, self._name), + operation_model=self._operation_model, + session=self._session, + command=self, + argument_table=argument_table, + ) return argument_table def _emit(self, name, **kwargs): return self._session.emit(name, **kwargs) def _emit_first_non_none_response(self, name, **kwargs): - return self._session.emit_first_non_none_response( - name, **kwargs) + return self._session.emit_first_non_none_response(name, **kwargs) def _create_operation_parser(self, arg_table, subcommand_table): parser = ArgTableArgParser(arg_table, subcommand_table) return parser def _add_customization_to_user_agent(self): - add_command_lineage_to_user_agent_extra(self._session, self.lineage_names) + add_command_lineage_to_user_agent_extra( + self._session, self.lineage_names + ) class CLIOperationCaller(object): - """Call an AWS operation and format the response.""" def __init__(self, session): @@ -957,27 +993,31 @@ def invoke(self, service_name, operation_name, parameters, parsed_globals): """ client = self._session.create_client( - service_name, region_name=parsed_globals.region, + service_name, + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) response = self._make_client_call( - client, operation_name, parameters, parsed_globals) + client, operation_name, parameters, parsed_globals + ) self._display_response(operation_name, response, parsed_globals) return 0 - def _make_client_call(self, client, operation_name, parameters, - parsed_globals): + def _make_client_call( + self, client, operation_name, parameters, parsed_globals + ): py_operation_name = xform_name(operation_name) if client.can_paginate(py_operation_name) and parsed_globals.paginate: paginator = client.get_paginator(py_operation_name) response = paginator.paginate(**parameters) else: response = getattr(client, xform_name(operation_name))( - **parameters) + **parameters + ) return response - def _display_response(self, command_name, response, - parsed_globals): + def _display_response(self, command_name, response, parsed_globals): output = parsed_globals.output if output is None: output = self._session.get_config_variable('output') diff --git a/awscli/commands.py b/awscli/commands.py index c0c9b4477ed2..c6d7a0b96813 100644 --- a/awscli/commands.py +++ b/awscli/commands.py @@ -13,7 +13,6 @@ class CLICommand(object): - """Interface for a CLI command. This class represents a top level CLI command diff --git a/awscli/compat.py b/awscli/compat.py index b6ae89818c90..105ef1120a9f 100644 --- a/awscli/compat.py +++ b/awscli/compat.py @@ -6,31 +6,31 @@ # http://aws.amazon.com/apache2.0/ +import collections.abc as collections_abc +import contextlib +import io +import locale +import os +import os.path +import platform +import queue +import re +import shlex +import signal + # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys -import re -import shlex -import os -import os.path -import platform +import urllib.parse as urlparse import zipfile -import signal -import contextlib -import collections.abc as collections_abc -import locale -import queue -import io -from urllib.request import urlopen from configparser import RawConfigParser from functools import partial -import urllib.parse as urlparse from urllib.error import URLError +from urllib.request import urlopen -from botocore.compat import six -from botocore.compat import OrderedDict +from botocore.compat import OrderedDict, six # Backwards compatible definitions from six PY3 = sys.version_info[0] == 3 @@ -46,6 +46,7 @@ # package the files in a zip container. try: import zlib + ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED except ImportError: ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED @@ -74,14 +75,12 @@ class StdinMissingError(Exception): def __init__(self): - message = ( - 'stdin is required for this operation, but is not available.' - ) + message = 'stdin is required for this operation, but is not available.' super(StdinMissingError, self).__init__(message) class NonTranslatedStdout(object): - """ This context manager sets the line-end translation mode for stdout. + """This context manager sets the line-end translation mode for stdout. It is deliberately set to binary mode so that `\r` does not get added to the line ending. This can be useful when printing commands where a @@ -91,13 +90,16 @@ class NonTranslatedStdout(object): def __enter__(self): if sys.platform == "win32": import msvcrt - self.previous_mode = msvcrt.setmode(sys.stdout.fileno(), - os.O_BINARY) + + self.previous_mode = msvcrt.setmode( + sys.stdout.fileno(), os.O_BINARY + ) return sys.stdout def __exit__(self, type, value, traceback): if sys.platform == "win32": import msvcrt + msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) @@ -330,12 +332,12 @@ def ignore_user_entered_signals(): from platform import linux_distribution except ImportError: _UNIXCONFDIR = '/etc' - def _dist_try_harder(distname, version, id): - """ Tries some special tricks to get the distribution - information in case the default method fails. - Currently supports older SuSE Linux, Caldera OpenLinux and - Slackware Linux distributions. + def _dist_try_harder(distname, version, id): + """Tries some special tricks to get the distribution + information in case the default method fails. + Currently supports older SuSE Linux, Caldera OpenLinux and + Slackware Linux distributions. """ if os.path.exists('/var/adm/inst-log/info'): # SuSE Linux stores distribution information in that file @@ -367,7 +369,7 @@ def _dist_try_harder(distname, version, id): if os.path.isdir('/usr/lib/setup'): # Check for slackware version tag file (thanks to Greg Andruk) verfiles = os.listdir('/usr/lib/setup') - for n in range(len(verfiles)-1, -1, -1): + for n in range(len(verfiles) - 1, -1, -1): if verfiles[n][:14] != 'slack-version-': del verfiles[n] if verfiles: @@ -379,14 +381,13 @@ def _dist_try_harder(distname, version, id): return distname, version, id _release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII) - _lsb_release_version = re.compile(r'(.+)' - r' release ' - r'([\d.]+)' - r'[^(]*(?:\((.+)\))?', re.ASCII) - _release_version = re.compile(r'([^0-9]+)' - r'(?: release )?' - r'([\d.]+)' - r'[^(]*(?:\((.+)\))?', re.ASCII) + _lsb_release_version = re.compile( + r'(.+)' r' release ' r'([\d.]+)' r'[^(]*(?:\((.+)\))?', re.ASCII + ) + _release_version = re.compile( + r'([^0-9]+)' r'(?: release )?' r'([\d.]+)' r'[^(]*(?:\((.+)\))?', + re.ASCII, + ) # See also http://www.novell.com/coolsolutions/feature/11251.html # and http://linuxmafia.com/faq/Admin/release-files.html @@ -394,12 +395,24 @@ def _dist_try_harder(distname, version, id): # and http://www.die.net/doc/linux/man/man1/lsb_release.1.html _supported_dists = ( - 'SuSE', 'debian', 'fedora', 'redhat', 'centos', - 'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo', - 'UnitedLinux', 'turbolinux', 'arch', 'mageia') + 'SuSE', + 'debian', + 'fedora', + 'redhat', + 'centos', + 'mandrake', + 'mandriva', + 'rocks', + 'slackware', + 'yellowdog', + 'gentoo', + 'UnitedLinux', + 'turbolinux', + 'arch', + 'mageia', + ) def _parse_release_file(firstline): - # Default to empty 'version' and 'id' strings. Both defaults are used # when 'firstline' is empty. 'id' defaults to empty when an id can not # be deduced. @@ -429,28 +442,33 @@ def _parse_release_file(firstline): _release_file_re = re.compile(r"(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I) _codename_file_re = re.compile(r"(?:DISTRIB_CODENAME\s*=)\s*(.*)", re.I) - def linux_distribution(distname='', version='', id='', - supported_dists=_supported_dists, - full_distribution_name=1): - return _linux_distribution(distname, version, id, supported_dists, - full_distribution_name) - - def _linux_distribution(distname, version, id, supported_dists, - full_distribution_name): - - """ Tries to determine the name of the Linux OS distribution name. - The function first looks for a distribution release file in - /etc and then reverts to _dist_try_harder() in case no - suitable files are found. - supported_dists may be given to define the set of Linux - distributions to look for. It defaults to a list of currently - supported Linux distributions identified by their release file - name. - If full_distribution_name is true (default), the full - distribution read from the OS is returned. Otherwise the short - name taken from supported_dists is used. - Returns a tuple (distname, version, id) which default to the - args given as parameters. + def linux_distribution( + distname='', + version='', + id='', + supported_dists=_supported_dists, + full_distribution_name=1, + ): + return _linux_distribution( + distname, version, id, supported_dists, full_distribution_name + ) + + def _linux_distribution( + distname, version, id, supported_dists, full_distribution_name + ): + """Tries to determine the name of the Linux OS distribution name. + The function first looks for a distribution release file in + /etc and then reverts to _dist_try_harder() in case no + suitable files are found. + supported_dists may be given to define the set of Linux + distributions to look for. It defaults to a list of currently + supported Linux distributions identified by their release file + name. + If full_distribution_name is true (default), the full + distribution read from the OS is returned. Otherwise the short + name taken from supported_dists is used. + Returns a tuple (distname, version, id) which default to the + args given as parameters. """ # check for the Debian/Ubuntu /etc/lsb-release file first, needed so # that the distribution doesn't get identified as Debian. @@ -470,7 +488,7 @@ def _linux_distribution(distname, version, id, supported_dists, if _u_distname and _u_version: return (_u_distname, _u_version, _u_id) except (EnvironmentError, UnboundLocalError): - pass + pass try: etc = os.listdir(_UNIXCONFDIR) @@ -489,8 +507,12 @@ def _linux_distribution(distname, version, id, supported_dists, return _dist_try_harder(distname, version, id) # Read the first line - with open(os.path.join(_UNIXCONFDIR, file), 'r', - encoding='utf-8', errors='surrogateescape') as f: + with open( + os.path.join(_UNIXCONFDIR, file), + 'r', + encoding='utf-8', + errors='surrogateescape', + ) as f: firstline = f.readline() _distname, _version, _id = _parse_release_file(firstline) diff --git a/awscli/customizations/addexamples.py b/awscli/customizations/addexamples.py index db34371adfdd..49a9ed65b55c 100644 --- a/awscli/customizations/addexamples.py +++ b/awscli/customizations/addexamples.py @@ -26,36 +26,38 @@ For example, ``examples/ec2/ec2-create-key-pair.rst``. """ -import os -import logging +import logging +import os LOG = logging.getLogger(__name__) def add_examples(help_command, **kwargs): doc_path = os.path.join( - os.path.dirname( - os.path.dirname( - os.path.abspath(__file__))), 'examples') - doc_path = os.path.join(doc_path, - help_command.event_class.replace('.', os.path.sep)) + os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'examples' + ) + doc_path = os.path.join( + doc_path, help_command.event_class.replace('.', os.path.sep) + ) doc_path = doc_path + '.rst' LOG.debug("Looking for example file at: %s", doc_path) if os.path.isfile(doc_path): help_command.doc.style.h2('Examples') help_command.doc.style.start_note() - msg = ("

To use the following examples, you must have the AWS " - "CLI installed and configured. See the " - "" - "Getting started guide in the AWS CLI User Guide " - "for more information.

" - "

Unless otherwise stated, all examples have unix-like " - "quotation rules. These examples will need to be adapted " - "to your terminal's quoting rules. See " - "" - "Using quotation marks with strings " - "in the AWS CLI User Guide.

") + msg = ( + "

To use the following examples, you must have the AWS " + "CLI installed and configured. See the " + "" + "Getting started guide in the AWS CLI User Guide " + "for more information.

" + "

Unless otherwise stated, all examples have unix-like " + "quotation rules. These examples will need to be adapted " + "to your terminal's quoting rules. See " + "" + "Using quotation marks with strings " + "in the AWS CLI User Guide.

" + ) help_command.doc.include_doc_string(msg) help_command.doc.style.end_note() fp = open(doc_path) diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py index eb905df769a9..ac87215c2f00 100644 --- a/awscli/customizations/argrename.py +++ b/awscli/customizations/argrename.py @@ -10,12 +10,10 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -""" -""" +""" """ from awscli.customizations import utils - ARGUMENT_RENAMES = { # Mapping of original arg to renamed arg. # The key is ..argname @@ -76,8 +74,7 @@ 'stepfunctions.send-task-success.output': 'task-output', 'clouddirectory.publish-schema.version': 'schema-version', 'mturk.list-qualification-types.query': 'types-query', - 'workdocs.create-notification-subscription.endpoint': - 'notification-endpoint', + 'workdocs.create-notification-subscription.endpoint': 'notification-endpoint', 'workdocs.describe-users.query': 'user-query', 'lex-models.delete-bot.version': 'bot-version', 'lex-models.delete-intent.version': 'intent-version', @@ -117,36 +114,41 @@ # This is useful when you need to change the name of an argument but you # still need to support the old argument. HIDDEN_ALIASES = { - 'mgn.*.replication-servers-security-groups-ids': - 'replication-servers-security-groups-i-ds', + 'mgn.*.replication-servers-security-groups-ids': 'replication-servers-security-groups-i-ds', 'mgn.*.source-server-ids': 'source-server-i-ds', - 'mgn.*.replication-configuration-template-ids': - 'replication-configuration-template-i-ds', - 'elasticache.create-replication-group.preferred-cache-cluster-azs': - 'preferred-cache-cluster-a-zs' + 'mgn.*.replication-configuration-template-ids': 'replication-configuration-template-i-ds', + 'elasticache.create-replication-group.preferred-cache-cluster-azs': 'preferred-cache-cluster-a-zs', } def register_arg_renames(cli): for original, new_name in ARGUMENT_RENAMES.items(): event_portion, original_arg_name = original.rsplit('.', 1) - cli.register('building-argument-table.%s' % event_portion, - rename_arg(original_arg_name, new_name)) + cli.register( + 'building-argument-table.%s' % event_portion, + rename_arg(original_arg_name, new_name), + ) for original, new_name in HIDDEN_ALIASES.items(): event_portion, original_arg_name = original.rsplit('.', 1) - cli.register('building-argument-table.%s' % event_portion, - hidden_alias(original_arg_name, new_name)) + cli.register( + 'building-argument-table.%s' % event_portion, + hidden_alias(original_arg_name, new_name), + ) def rename_arg(original_arg_name, new_name): def _rename_arg(argument_table, **kwargs): if original_arg_name in argument_table: utils.rename_argument(argument_table, original_arg_name, new_name) + return _rename_arg def hidden_alias(original_arg_name, alias_name): def _alias_arg(argument_table, **kwargs): if original_arg_name in argument_table: - utils.make_hidden_alias(argument_table, original_arg_name, alias_name) + utils.make_hidden_alias( + argument_table, original_arg_name, alias_name + ) + return _alias_arg diff --git a/awscli/customizations/arguments.py b/awscli/customizations/arguments.py index 43ec260a7aa4..768c9f20bbec 100644 --- a/awscli/customizations/arguments.py +++ b/awscli/customizations/arguments.py @@ -13,10 +13,11 @@ import os import re +import jmespath + from awscli.arguments import CustomArgument from awscli.compat import compat_open from awscli.customizations.exceptions import ParamValidationError -import jmespath def resolve_given_outfile_path(path): @@ -62,8 +63,10 @@ def __init__(self, session): super(OverrideRequiredArgsArgument, self).__init__(**self.ARG_DATA) def _register_argument_action(self): - self._session.register('before-building-argument-table-parser', - self.override_required_args) + self._session.register( + 'before-building-argument-table-parser', + self.override_required_args, + ) def override_required_args(self, argument_table, args, **kwargs): name_in_cmdline = '--' + self.name @@ -93,16 +96,19 @@ def value(self): class QueryOutFileArgument(StatefulArgument): """An argument that write a JMESPath query result to a file""" - def __init__(self, session, name, query, after_call_event, perm, - *args, **kwargs): + def __init__( + self, session, name, query, after_call_event, perm, *args, **kwargs + ): self._session = session self._query = query self._after_call_event = after_call_event self._perm = perm # Generate default help_text if text was not provided. if 'help_text' not in kwargs: - kwargs['help_text'] = ('Saves the command output contents of %s ' - 'to the given filename' % self.query) + kwargs['help_text'] = ( + 'Saves the command output contents of %s ' + 'to the given filename' % self.query + ) super(QueryOutFileArgument, self).__init__(name, *args, **kwargs) @property @@ -129,7 +135,8 @@ def save_query(self, parsed, **kwargs): if is_parsed_result_successful(parsed): contents = jmespath.search(self.query, parsed) with compat_open( - self.value, 'w', access_permissions=self.perm) as fp: + self.value, 'w', access_permissions=self.perm + ) as fp: # Don't write 'None' to a file -- write ''. if contents is None: fp.write('') @@ -152,8 +159,14 @@ class NestedBlobArgumentHoister(object): requiring the hoist. """ - def __init__(self, source_arg, source_arg_blob_member, - new_arg, new_arg_doc_string, doc_string_addendum): + def __init__( + self, + source_arg, + source_arg_blob_member, + new_arg, + new_arg_doc_string, + doc_string_addendum, + ): self._source_arg = source_arg self._source_arg_blob_member = source_arg_blob_member self._new_arg = new_arg @@ -163,8 +176,7 @@ def __init__(self, source_arg, source_arg_blob_member, def __call__(self, session, argument_table, **kwargs): if not self._valid_target(argument_table): return - self._update_arg( - argument_table, self._source_arg, self._new_arg) + self._update_arg(argument_table, self._source_arg, self._new_arg) def _valid_target(self, argument_table): # Find the source argument and check that it has a member of @@ -173,16 +185,18 @@ def _valid_target(self, argument_table): arg = argument_table[self._source_arg] input_model = arg.argument_model member = input_model.members.get(self._source_arg_blob_member) - if (member is not None and - member.type_name == 'blob'): + if member is not None and member.type_name == 'blob': return True return False def _update_arg(self, argument_table, source_arg, new_arg): argument_table[new_arg] = _NestedBlobArgumentParamOverwrite( - new_arg, source_arg, self._source_arg_blob_member, + new_arg, + source_arg, + self._source_arg_blob_member, help_text=self._new_arg_doc_string, - cli_type_name='blob') + cli_type_name='blob', + ) argument_table[source_arg].required = False argument_table[source_arg].documentation += self._doc_string_addendum @@ -190,7 +204,8 @@ def _update_arg(self, argument_table, source_arg, new_arg): class _NestedBlobArgumentParamOverwrite(CustomArgument): def __init__(self, new_arg, source_arg, source_arg_blob_member, **kwargs): super(_NestedBlobArgumentParamOverwrite, self).__init__( - new_arg, **kwargs) + new_arg, **kwargs + ) self._param_to_overwrite = _reverse_xform_name(source_arg) self._source_arg_blob_member = source_arg_blob_member diff --git a/awscli/customizations/assumerole.py b/awscli/customizations/assumerole.py index b25a80b2def6..760918a62a70 100644 --- a/awscli/customizations/assumerole.py +++ b/awscli/customizations/assumerole.py @@ -1,17 +1,19 @@ -import os import logging +import os -from botocore.exceptions import ProfileNotFound from botocore.credentials import JSONFileCache +from botocore.exceptions import ProfileNotFound LOG = logging.getLogger(__name__) CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache')) def register_assume_role_provider(event_handlers): - event_handlers.register('session-initialized', - inject_assume_role_provider_cache, - unique_id='inject_assume_role_cred_provider_cache') + event_handlers.register( + 'session-initialized', + inject_assume_role_provider_cache, + unique_id='inject_assume_role_cred_provider_cache', + ) def inject_assume_role_provider_cache(session, **kwargs): @@ -33,9 +35,11 @@ def inject_assume_role_provider_cache(session, **kwargs): # immediately return. If it's invalid something else # up the stack will raise ProfileNotFound, otherwise # the configure (and other) commands will work as expected. - LOG.debug("ProfileNotFound caught when trying to inject " - "assume-role cred provider cache. Not configuring " - "JSONFileCache for assume-role.") + LOG.debug( + "ProfileNotFound caught when trying to inject " + "assume-role cred provider cache. Not configuring " + "JSONFileCache for assume-role." + ) return assume_role_provider = cred_chain.get_provider('assume-role') assume_role_provider.cache = JSONFileCache(CACHE_DIR) diff --git a/awscli/customizations/awslambda.py b/awscli/customizations/awslambda.py index 121488ef18ed..a1dad12163d3 100644 --- a/awscli/customizations/awslambda.py +++ b/awscli/customizations/awslambda.py @@ -10,18 +10,18 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import zipfile import copy +import zipfile from contextlib import closing -from awscli.arguments import CustomArgument, CLIArgument -from awscli.customizations.exceptions import ParamValidationError +from awscli.arguments import CLIArgument, CustomArgument from awscli.compat import BytesIO - +from awscli.customizations.exceptions import ParamValidationError ERROR_MSG = ( "--zip-file must be a zip file with the fileb:// prefix.\n" - "Example usage: --zip-file fileb://path/to/file.zip") + "Example usage: --zip-file fileb://path/to/file.zip" +) ZIP_DOCSTRING = ( '

The path to the zip file of the {param_type} you are uploading. ' @@ -31,14 +31,21 @@ def register_lambda_create_function(cli): - cli.register('building-argument-table.lambda.create-function', - ZipFileArgumentHoister('Code').hoist) - cli.register('building-argument-table.lambda.publish-layer-version', - ZipFileArgumentHoister('Content').hoist) - cli.register('building-argument-table.lambda.update-function-code', - _modify_zipfile_docstring) - cli.register('process-cli-arg.lambda.update-function-code', - validate_is_zip_file) + cli.register( + 'building-argument-table.lambda.create-function', + ZipFileArgumentHoister('Code').hoist, + ) + cli.register( + 'building-argument-table.lambda.publish-layer-version', + ZipFileArgumentHoister('Content').hoist, + ) + cli.register( + 'building-argument-table.lambda.update-function-code', + _modify_zipfile_docstring, + ) + cli.register( + 'process-cli-arg.lambda.update-function-code', validate_is_zip_file + ) def validate_is_zip_file(cli_argument, value, **kwargs): @@ -55,6 +62,7 @@ class ZipFileArgumentHoister(object): ReplacedZipFileArgument to prevent its usage and recommend the new top-level injected parameter. """ + def __init__(self, serialized_name): self._serialized_name = serialized_name self._name = serialized_name.lower() @@ -62,8 +70,10 @@ def __init__(self, serialized_name): def hoist(self, session, argument_table, **kwargs): help_text = ZIP_DOCSTRING.format(param_type=self._name) argument_table['zip-file'] = ZipFileArgument( - 'zip-file', help_text=help_text, cli_type_name='blob', - serialized_name=self._serialized_name + 'zip-file', + help_text=help_text, + cli_type_name='blob', + serialized_name=self._serialized_name, ) argument = argument_table[self._name] model = copy.deepcopy(argument.argument_model) @@ -107,6 +117,7 @@ class ZipFileArgument(CustomArgument): --zip-file foo.zip winds up being serialized as { 'Code': { 'ZipFile': } }. """ + def __init__(self, *args, **kwargs): self._param_to_replace = kwargs.pop('serialized_name') super(ZipFileArgument, self).__init__(*args, **kwargs) @@ -131,6 +142,7 @@ class ReplacedZipFileArgument(CLIArgument): contents. And the argument class can inject those bytes into the correct serialization name. """ + def __init__(self, *args, **kwargs): super(ReplacedZipFileArgument, self).__init__(*args, **kwargs) self._cli_name = '--%s' % kwargs['name'] diff --git a/awscli/customizations/binaryformat.py b/awscli/customizations/binaryformat.py index 501284df0a8d..28156ed2dac4 100644 --- a/awscli/customizations/binaryformat.py +++ b/awscli/customizations/binaryformat.py @@ -13,9 +13,8 @@ import base64 import binascii -from botocore.exceptions import ProfileNotFound - from awscli.shorthand import ModelVisitor +from botocore.exceptions import ProfileNotFound def add_binary_formatter(session, parsed_args, **kwargs): diff --git a/awscli/customizations/binaryhoist.py b/awscli/customizations/binaryhoist.py index 94a7d5d6cdd7..247ae4e4a63b 100644 --- a/awscli/customizations/binaryhoist.py +++ b/awscli/customizations/binaryhoist.py @@ -11,10 +11,10 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import copy - from dataclasses import dataclass from typing import Optional -from awscli.arguments import CustomArgument, CLIArgument + +from awscli.arguments import CLIArgument, CustomArgument from awscli.customizations.exceptions import ParamValidationError diff --git a/awscli/customizations/cliinput.py b/awscli/customizations/cliinput.py index 9b8aa2e254f7..d145e967eac2 100644 --- a/awscli/customizations/cliinput.py +++ b/awscli/customizations/cliinput.py @@ -15,9 +15,9 @@ from ruamel.yaml import YAML from ruamel.yaml.error import YAMLError -from awscli.paramfile import get_paramfile, LOCAL_PREFIX_MAP from awscli.argprocess import ParamError, ParamSyntaxError from awscli.customizations.arguments import OverrideRequiredArgsArgument +from awscli.paramfile import LOCAL_PREFIX_MAP, get_paramfile def register_cli_input_args(cli): @@ -49,6 +49,7 @@ class CliInputArgument(OverrideRequiredArgsArgument): The parameters in the file will be overwritten by any arguments specified on the command line. """ + def _register_argument_action(self): self._session.register( 'calling-command.*', self.add_to_call_parameters @@ -65,7 +66,7 @@ def add_to_call_parameters(self, call_parameters, parsed_args, **kwargs): raise ParamError( self.cli_name, "Invalid type: expecting map, " - "received %s" % type(loaded_params) + "received %s" % type(loaded_params), ) self._update_call_parameters(call_parameters, loaded_params) @@ -75,7 +76,8 @@ def _get_arg_value(self, parsed_args): return cli_input_args = [ - k for k, v in vars(parsed_args).items() + k + for k, v in vars(parsed_args).items() if v is not None and k.startswith('cli_input') ] if len(cli_input_args) != 1: @@ -109,6 +111,7 @@ class CliInputJSONArgument(CliInputArgument): generated by ``--generate-cli-skeleton``. The items in the JSON string will not clobber other arguments entered into the command line. """ + ARG_DATA = { 'name': 'cli-input-json', 'group_name': 'cli_input', @@ -120,7 +123,7 @@ class CliInputJSONArgument(CliInputArgument): 'pass arbitrary binary values using a JSON-provided value as the ' 'string will be taken literally. This may not be specified along ' 'with ``--cli-input-yaml``.' - ) + ), } def _load_parameters(self, arg_value): @@ -141,7 +144,7 @@ class CliInputYAMLArgument(CliInputArgument): 'If other arguments are provided on the command line, those ' 'values will override the YAML-provided values. This may not be ' 'specified along with ``--cli-input-json``.' - ) + ), } def _load_parameters(self, arg_value): diff --git a/awscli/customizations/cloudformation/__init__.py b/awscli/customizations/cloudformation/__init__.py index 409966b73e1d..20b8153d41a4 100644 --- a/awscli/customizations/cloudformation/__init__.py +++ b/awscli/customizations/cloudformation/__init__.py @@ -10,8 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.cloudformation.package import PackageCommand from awscli.customizations.cloudformation.deploy import DeployCommand +from awscli.customizations.cloudformation.package import PackageCommand def initialize(cli): diff --git a/awscli/customizations/cloudformation/artifact_exporter.py b/awscli/customizations/cloudformation/artifact_exporter.py index 8a4c18a28460..884749e87ef8 100644 --- a/awscli/customizations/cloudformation/artifact_exporter.py +++ b/awscli/customizations/cloudformation/artifact_exporter.py @@ -11,23 +11,24 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import contextlib import logging import os +import shutil import tempfile -import zipfile -import contextlib import uuid -import shutil -from botocore.utils import set_value_from_jmespath - -from awscli.compat import urlparse +import zipfile from contextlib import contextmanager -from awscli.compat import compat_open -from awscli.customizations.cloudformation import exceptions -from awscli.customizations.cloudformation.yamlhelper import yaml_dump, \ - yaml_parse + import jmespath +from awscli.compat import compat_open, urlparse +from awscli.customizations.cloudformation import exceptions +from awscli.customizations.cloudformation.yamlhelper import ( + yaml_dump, + yaml_parse, +) +from botocore.utils import set_value_from_jmespath LOG = logging.getLogger(__name__) @@ -60,19 +61,16 @@ def is_local_file(path): def is_zip_file(path): - return ( - is_path_value_valid(path) and - zipfile.is_zipfile(path)) - + return is_path_value_valid(path) and zipfile.is_zipfile(path) -def parse_s3_url(url, - bucket_name_property="Bucket", - object_key_property="Key", - version_property=None): - - if isinstance(url, str) \ - and url.startswith("s3://"): +def parse_s3_url( + url, + bucket_name_property="Bucket", + object_key_property="Key", + version_property=None, +): + if isinstance(url, str) and url.startswith("s3://"): # Python < 2.7.10 don't parse query parameters from URI with custom # scheme such as s3://blah/blah. As a workaround, remove scheme # altogether to trigger the parser "s3://foo/bar?v=1" =>"//foo/bar?v=1" @@ -86,19 +84,25 @@ def parse_s3_url(url, # If there is a query string that has a single versionId field, # set the object version and return - if version_property is not None \ - and 'versionId' in query \ - and len(query['versionId']) == 1: + if ( + version_property is not None + and 'versionId' in query + and len(query['versionId']) == 1 + ): result[version_property] = query['versionId'][0] return result - raise ValueError("URL given to the parse method is not a valid S3 url " - "{0}".format(url)) + raise ValueError( + "URL given to the parse method is not a valid S3 url " "{0}".format( + url + ) + ) -def upload_local_artifacts(resource_id, resource_dict, property_name, - parent_dir, uploader): +def upload_local_artifacts( + resource_id, resource_dict, property_name, parent_dir, uploader +): """ Upload local artifacts referenced by the property at given resource and return S3 URL of the uploaded object. It is the responsibility of callers @@ -134,8 +138,11 @@ def upload_local_artifacts(resource_id, resource_dict, property_name, # This check is supporting the case where your resource does not # refer to local artifacts # Nothing to do if property value is an S3 URL - LOG.debug("Property {0} of {1} is already a S3 URL" - .format(property_name, resource_id)) + LOG.debug( + "Property {0} of {1} is already a S3 URL".format( + property_name, resource_id + ) + ) return local_path local_path = make_abs_path(parent_dir, local_path) @@ -149,14 +156,15 @@ def upload_local_artifacts(resource_id, resource_dict, property_name, return uploader.upload_with_dedup(local_path) raise exceptions.InvalidLocalPathError( - resource_id=resource_id, - property_name=property_name, - local_path=local_path) + resource_id=resource_id, + property_name=property_name, + local_path=local_path, + ) def zip_and_upload(local_path, uploader): with zip_folder(local_path) as zipfile: - return uploader.upload_with_dedup(zipfile) + return uploader.upload_with_dedup(zipfile) @contextmanager @@ -169,8 +177,7 @@ def zip_folder(folder_path): :return: Name of the zipfile """ - filename = os.path.join( - tempfile.gettempdir(), "data-" + uuid.uuid4().hex) + filename = os.path.join(tempfile.gettempdir(), "data-" + uuid.uuid4().hex) zipfile_name = make_zip(filename, folder_path) try: @@ -189,8 +196,7 @@ def make_zip(filename, source_root): for root, dirs, files in os.walk(source_root, followlinks=True): for filename in files: full_path = os.path.join(root, filename) - relative_path = os.path.relpath( - full_path, source_root) + relative_path = os.path.relpath(full_path, source_root) zf.write(full_path, relative_path) return zipfile_name @@ -241,17 +247,25 @@ def export(self, resource_id, resource_dict, parent_dir): return if isinstance(property_value, dict): - LOG.debug("Property {0} of {1} resource is not a URL" - .format(self.PROPERTY_NAME, resource_id)) + LOG.debug( + "Property {0} of {1} resource is not a URL".format( + self.PROPERTY_NAME, resource_id + ) + ) return # If property is a file but not a zip file, place file in temp # folder and send the temp folder to be zipped temp_dir = None - if is_local_file(property_value) and not \ - is_zip_file(property_value) and self.FORCE_ZIP: + if ( + is_local_file(property_value) + and not is_zip_file(property_value) + and self.FORCE_ZIP + ): temp_dir = copy_to_temp_dir(property_value) - set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, temp_dir) + set_value_from_jmespath( + resource_dict, self.PROPERTY_NAME, temp_dir + ) try: self.do_export(resource_id, resource_dict, parent_dir) @@ -259,10 +273,11 @@ def export(self, resource_id, resource_dict, parent_dir): except Exception as ex: LOG.debug("Unable to export", exc_info=ex) raise exceptions.ExportFailedError( - resource_id=resource_id, - property_name=self.PROPERTY_NAME, - property_value=property_value, - ex=ex) + resource_id=resource_id, + property_name=self.PROPERTY_NAME, + property_value=property_value, + ex=ex, + ) finally: if temp_dir: shutil.rmtree(temp_dir) @@ -272,10 +287,16 @@ def do_export(self, resource_id, resource_dict, parent_dir): Default export action is to upload artifacts and set the property to S3 URL of the uploaded object """ - uploaded_url = upload_local_artifacts(resource_id, resource_dict, - self.PROPERTY_NAME, - parent_dir, self.uploader) - set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, uploaded_url) + uploaded_url = upload_local_artifacts( + resource_id, + resource_dict, + self.PROPERTY_NAME, + parent_dir, + self.uploader, + ) + set_value_from_jmespath( + resource_dict, self.PROPERTY_NAME, uploaded_url + ) class ResourceWithS3UrlDict(Resource): @@ -297,16 +318,20 @@ def do_export(self, resource_id, resource_dict, parent_dir): of the uploaded object """ - artifact_s3_url = \ - upload_local_artifacts(resource_id, resource_dict, - self.PROPERTY_NAME, - parent_dir, self.uploader) + artifact_s3_url = upload_local_artifacts( + resource_id, + resource_dict, + self.PROPERTY_NAME, + parent_dir, + self.uploader, + ) parsed_url = parse_s3_url( - artifact_s3_url, - bucket_name_property=self.BUCKET_NAME_PROPERTY, - object_key_property=self.OBJECT_KEY_PROPERTY, - version_property=self.VERSION_PROPERTY) + artifact_s3_url, + bucket_name_property=self.BUCKET_NAME_PROPERTY, + object_key_property=self.OBJECT_KEY_PROPERTY, + version_property=self.VERSION_PROPERTY, + ) set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, parsed_url) @@ -440,6 +465,7 @@ class CloudFormationStackResource(Resource): Represents CloudFormation::Stack resource that can refer to a nested stack template via TemplateURL property. """ + RESOURCE_TYPE = "AWS::CloudFormation::Stack" PROPERTY_NAME = "TemplateURL" @@ -455,21 +481,26 @@ def do_export(self, resource_id, resource_dict, parent_dir): template_path = resource_dict.get(self.PROPERTY_NAME, None) - if template_path is None or is_s3_url(template_path) or \ - template_path.startswith("http://") or \ - template_path.startswith("https://"): + if ( + template_path is None + or is_s3_url(template_path) + or template_path.startswith("http://") + or template_path.startswith("https://") + ): # Nothing to do return abs_template_path = make_abs_path(parent_dir, template_path) if not is_local_file(abs_template_path): raise exceptions.InvalidTemplateUrlParameterError( - property_name=self.PROPERTY_NAME, - resource_id=resource_id, - template_path=abs_template_path) + property_name=self.PROPERTY_NAME, + resource_id=resource_id, + template_path=abs_template_path, + ) - exported_template_dict = \ - Template(template_path, parent_dir, self.uploader).export() + exported_template_dict = Template( + template_path, parent_dir, self.uploader + ).export() exported_template_str = yaml_dump(exported_template_dict) @@ -478,13 +509,17 @@ def do_export(self, resource_id, resource_dict, parent_dir): temporary_file.flush() url = self.uploader.upload_with_dedup( - temporary_file.name, "template") + temporary_file.name, "template" + ) # TemplateUrl property requires S3 URL to be in path-style format parts = parse_s3_url(url, version_property="Version") s3_path_url = self.uploader.to_path_style_s3_url( - parts["Key"], parts.get("Version", None)) - set_value_from_jmespath(resource_dict, self.PROPERTY_NAME, s3_path_url) + parts["Key"], parts.get("Version", None) + ) + set_value_from_jmespath( + resource_dict, self.PROPERTY_NAME, s3_path_url + ) class ServerlessApplicationResource(CloudFormationStackResource): @@ -492,15 +527,16 @@ class ServerlessApplicationResource(CloudFormationStackResource): Represents Serverless::Application resource that can refer to a nested app template via Location property. """ + RESOURCE_TYPE = "AWS::Serverless::Application" PROPERTY_NAME = "Location" - class GlueJobCommandScriptLocationResource(Resource): """ Represents Glue::Job resource. """ + RESOURCE_TYPE = "AWS::Glue::Job" # Note the PROPERTY_NAME includes a '.' implying it's nested. PROPERTY_NAME = "Command.ScriptLocation" @@ -510,6 +546,7 @@ class CodeCommitRepositoryS3Resource(ResourceWithS3UrlDict): """ Represents CodeCommit::Repository resource. """ + RESOURCE_TYPE = "AWS::CodeCommit::Repository" PROPERTY_NAME = "Code.S3" BUCKET_NAME_PROPERTY = "Bucket" @@ -538,12 +575,12 @@ class CodeCommitRepositoryS3Resource(ResourceWithS3UrlDict): GlueJobCommandScriptLocationResource, StepFunctionsStateMachineDefinitionResource, ServerlessStateMachineDefinitionResource, - CodeCommitRepositoryS3Resource + CodeCommitRepositoryS3Resource, ] METADATA_EXPORT_LIST = [ ServerlessRepoApplicationReadme, - ServerlessRepoApplicationLicense + ServerlessRepoApplicationLicense, ] @@ -551,29 +588,34 @@ def include_transform_export_handler(template_dict, uploader, parent_dir): if template_dict.get("Name", None) != "AWS::Include": return template_dict - include_location = template_dict.get("Parameters", {}).get("Location", None) - if not include_location \ - or not is_path_value_valid(include_location) \ - or is_s3_url(include_location): + include_location = template_dict.get("Parameters", {}).get( + "Location", None + ) + if ( + not include_location + or not is_path_value_valid(include_location) + or is_s3_url(include_location) + ): # `include_location` is either empty, or not a string, or an S3 URI return template_dict # We are confident at this point that `include_location` is a string containing the local path abs_include_location = os.path.join(parent_dir, include_location) if is_local_file(abs_include_location): - template_dict["Parameters"]["Location"] = uploader.upload_with_dedup(abs_include_location) + template_dict["Parameters"]["Location"] = uploader.upload_with_dedup( + abs_include_location + ) else: raise exceptions.InvalidLocalPathError( resource_id="AWS::Include", property_name="Location", - local_path=abs_include_location) + local_path=abs_include_location, + ) return template_dict -GLOBAL_EXPORT_DICT = { - "Fn::Transform": include_transform_export_handler -} +GLOBAL_EXPORT_DICT = {"Fn::Transform": include_transform_export_handler} class Template(object): @@ -581,17 +623,23 @@ class Template(object): Class to export a CloudFormation template """ - def __init__(self, template_path, parent_dir, uploader, - resources_to_export=RESOURCES_EXPORT_LIST, - metadata_to_export=METADATA_EXPORT_LIST): + def __init__( + self, + template_path, + parent_dir, + uploader, + resources_to_export=RESOURCES_EXPORT_LIST, + metadata_to_export=METADATA_EXPORT_LIST, + ): """ Reads the template and makes it ready for export """ if not (is_local_folder(parent_dir) and os.path.isabs(parent_dir)): - raise ValueError("parent_dir parameter must be " - "an absolute path to a folder {0}" - .format(parent_dir)) + raise ValueError( + "parent_dir parameter must be " + "an absolute path to a folder {0}".format(parent_dir) + ) abs_template_path = make_abs_path(parent_dir, template_path) template_dir = os.path.dirname(abs_template_path) @@ -614,7 +662,9 @@ def export_global_artifacts(self, template_dict): """ for key, val in template_dict.items(): if key in GLOBAL_EXPORT_DICT: - template_dict[key] = GLOBAL_EXPORT_DICT[key](val, self.uploader, self.template_dir) + template_dict[key] = GLOBAL_EXPORT_DICT[key]( + val, self.uploader, self.template_dir + ) elif isinstance(val, dict): self.export_global_artifacts(val) elif isinstance(val, list): @@ -640,7 +690,9 @@ def export_metadata(self, template_dict): continue exporter = exporter_class(self.uploader) - exporter.export(metadata_type, metadata_dict, self.template_dir) + exporter.export( + metadata_type, metadata_dict, self.template_dir + ) return template_dict @@ -665,10 +717,11 @@ def export(self): def export_resources(self, resource_dict): for resource_id, resource in resource_dict.items(): - if resource_id.startswith("Fn::ForEach::"): if not isinstance(resource, list) or len(resource) != 3: - raise exceptions.InvalidForEachIntrinsicFunctionError(resource_id=resource_id) + raise exceptions.InvalidForEachIntrinsicFunctionError( + resource_id=resource_id + ) self.export_resources(resource[2]) continue diff --git a/awscli/customizations/cloudformation/deploy.py b/awscli/customizations/cloudformation/deploy.py index 7e6d9228c467..e94851362582 100644 --- a/awscli/customizations/cloudformation/deploy.py +++ b/awscli/customizations/cloudformation/deploy.py @@ -13,22 +13,19 @@ import functools import json +import logging import os import sys -import logging - -from botocore.client import Config -from awscli.compat import compat_open -from awscli.customizations.exceptions import ParamValidationError +from awscli.compat import compat_open, get_stdout_text_writer from awscli.customizations.cloudformation import exceptions from awscli.customizations.cloudformation.deployer import Deployer -from awscli.customizations.s3uploader import S3Uploader from awscli.customizations.cloudformation.yamlhelper import yaml_parse - from awscli.customizations.commands import BasicCommand -from awscli.compat import get_stdout_text_writer +from awscli.customizations.exceptions import ParamValidationError +from awscli.customizations.s3uploader import S3Uploader from awscli.utils import write_exception +from botocore.client import Config LOG = logging.getLogger(__name__) @@ -61,8 +58,10 @@ def parse(self, data): class CloudFormationLikeParameterOverrideParser(BaseParameterOverrideParser): def can_parse(self, data): for param_pair in data: - if ('ParameterKey' not in param_pair or - 'ParameterValue' not in param_pair): + if ( + 'ParameterKey' not in param_pair + or 'ParameterValue' not in param_pair + ): return False if len(param_pair.keys()) > 2: return False @@ -76,8 +75,7 @@ def parse(self, data): # "ParameterValue": "string", # }] return { - param['ParameterKey']: param['ParameterValue'] - for param in data + param['ParameterKey']: param['ParameterValue'] for param in data } @@ -98,14 +96,14 @@ def parse(self, data): class DeployCommand(BasicCommand): - - MSG_NO_EXECUTE_CHANGESET = \ - ("Changeset created successfully. Run the following command to " - "review changes:" - "\n" - "aws cloudformation describe-change-set --change-set-name " - "{changeset_id}" - "\n") + MSG_NO_EXECUTE_CHANGESET = ( + "Changeset created successfully. Run the following command to " + "review changes:" + "\n" + "aws cloudformation describe-change-set --change-set-name " + "{changeset_id}" + "\n" + ) MSG_EXECUTE_SUCCESS = "Successfully created/updated stack - {stack_name}\n" @@ -113,8 +111,9 @@ class DeployCommand(BasicCommand): TAGS_CMD = "tags" NAME = 'deploy' - DESCRIPTION = BasicCommand.FROM_FILE("cloudformation", - "_deploy_description.rst") + DESCRIPTION = BasicCommand.FROM_FILE( + "cloudformation", "_deploy_description.rst" + ) ARG_TABLE = [ { @@ -123,7 +122,7 @@ class DeployCommand(BasicCommand): 'help_text': ( 'The path where your AWS CloudFormation' ' template is located.' - ) + ), }, { 'name': 'stack-name', @@ -133,7 +132,7 @@ class DeployCommand(BasicCommand): 'The name of the AWS CloudFormation stack you\'re deploying to.' ' If you specify an existing stack, the command updates the' ' stack. If you specify a new stack, the command creates it.' - ) + ), }, { 'name': 's3-bucket', @@ -142,7 +141,7 @@ class DeployCommand(BasicCommand): 'The name of the S3 bucket where this command uploads your ' 'CloudFormation template. This is required the deployments of ' 'templates sized greater than 51,200 bytes' - ) + ), }, { "name": "force-upload", @@ -151,7 +150,7 @@ class DeployCommand(BasicCommand): 'Indicates whether to override existing files in the S3 bucket.' ' Specify this flag to upload artifacts even if they ' ' match existing artifacts in the S3 bucket.' - ) + ), }, { 'name': 's3-prefix', @@ -160,15 +159,14 @@ class DeployCommand(BasicCommand): ' artifacts\' name when it uploads them to the S3 bucket.' ' The prefix name is a path name (folder name) for' ' the S3 bucket.' - ) + ), }, - { 'name': 'kms-key-id', 'help_text': ( 'The ID of an AWS KMS key that the command uses' ' to encrypt artifacts that are at rest in the S3 bucket.' - ) + ), }, { 'name': PARAMETER_OVERRIDE_CMD, @@ -184,7 +182,7 @@ class DeployCommand(BasicCommand): ' parameters that don\'t have a default value.' ' Syntax: ParameterKey1=ParameterValue1' ' ParameterKey2=ParameterValue2 ... or JSON file (see Examples)' - ) + ), }, { 'name': 'capabilities', @@ -194,11 +192,8 @@ class DeployCommand(BasicCommand): 'type': 'array', 'items': { 'type': 'string', - 'enum': [ - 'CAPABILITY_IAM', - 'CAPABILITY_NAMED_IAM' - ] - } + 'enum': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], + }, }, 'default': [], 'help_text': ( @@ -215,8 +210,7 @@ class DeployCommand(BasicCommand): ' custom names, you must specify CAPABILITY_NAMED_IAM. If you' ' don\'t specify this parameter, this action returns an' ' InsufficientCapabilities error.' - ) - + ), }, { 'name': 'no-execute-changeset', @@ -230,7 +224,7 @@ class DeployCommand(BasicCommand): ' AWS CloudFormation change set and then exits without' ' executing the change set. After you view the change set,' ' execute it to implement your changes.' - ) + ), }, { 'name': 'disable-rollback', @@ -242,7 +236,7 @@ class DeployCommand(BasicCommand): 'help_text': ( 'Preserve the state of previously provisioned resources when ' 'the execute-change-set operation fails.' - ) + ), }, { 'name': 'no-disable-rollback', @@ -254,7 +248,7 @@ class DeployCommand(BasicCommand): 'help_text': ( 'Roll back all resource changes when the execute-change-set ' 'operation fails.' - ) + ), }, { 'name': 'role-arn', @@ -263,21 +257,16 @@ class DeployCommand(BasicCommand): 'The Amazon Resource Name (ARN) of an AWS Identity and Access ' 'Management (IAM) role that AWS CloudFormation assumes when ' 'executing the change set.' - ) + ), }, { 'name': 'notification-arns', 'required': False, - 'schema': { - 'type': 'array', - 'items': { - 'type': 'string' - } - }, + 'schema': {'type': 'array', 'items': {'type': 'string'}}, 'help_text': ( 'Amazon Simple Notification Service topic Amazon Resource Names' ' (ARNs) that AWS CloudFormation associates with the stack.' - ) + ), }, { 'name': 'fail-on-empty-changeset', @@ -290,7 +279,7 @@ class DeployCommand(BasicCommand): 'Specify if the CLI should return a non-zero exit code if ' 'there are no changes to be made to the stack. The default ' 'behavior is to return a zero exit code.' - ) + ), }, { 'name': 'no-fail-on-empty-changeset', @@ -302,46 +291,43 @@ class DeployCommand(BasicCommand): 'help_text': ( 'Causes the CLI to return an exit code of 0 if there are no ' 'changes to be made to the stack.' - ) + ), }, { 'name': TAGS_CMD, 'action': 'store', 'required': False, - 'schema': { - 'type': 'array', - 'items': { - 'type': 'string' - } - }, + 'schema': {'type': 'array', 'items': {'type': 'string'}}, 'default': [], 'help_text': ( 'A list of tags to associate with the stack that is created' ' or updated. AWS CloudFormation also propagates these tags' ' to resources in the stack if the resource supports it.' ' Syntax: TagKey1=TagValue1 TagKey2=TagValue2 ...' - ) - } + ), + }, ] def _run_main(self, parsed_args, parsed_globals): - cloudformation_client = \ - self._session.create_client( - 'cloudformation', region_name=parsed_globals.region, - endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + cloudformation_client = self._session.create_client( + 'cloudformation', + region_name=parsed_globals.region, + endpoint_url=parsed_globals.endpoint_url, + verify=parsed_globals.verify_ssl, + ) template_dict, template_str, template_size = self.load_template_file( - parsed_args.template_file) + parsed_args.template_file + ) stack_name = parsed_args.stack_name parameter_overrides = self.parse_parameter_overrides( parsed_args.parameter_overrides ) tags_dict = self.parse_key_value_arg(parsed_args.tags, self.TAGS_CMD) - tags = [{"Key": key, "Value": value} - for key, value in tags_dict.items()] - + tags = [ + {"Key": key, "Value": value} for key, value in tags_dict.items() + ] parameters = self.merge_parameters(template_dict, parameter_overrides) @@ -354,39 +340,62 @@ def _run_main(self, parsed_args, parsed_globals): "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) - s3_uploader = S3Uploader(s3_client, - bucket, - parsed_args.s3_prefix, - parsed_args.kms_key_id, - parsed_args.force_upload) + s3_uploader = S3Uploader( + s3_client, + bucket, + parsed_args.s3_prefix, + parsed_args.kms_key_id, + parsed_args.force_upload, + ) else: s3_uploader = None deployer = Deployer(cloudformation_client) - return self.deploy(deployer, stack_name, template_str, - parameters, parsed_args.capabilities, - parsed_args.execute_changeset, parsed_args.role_arn, - parsed_args.notification_arns, s3_uploader, - tags, parsed_args.fail_on_empty_changeset, - parsed_args.disable_rollback) + return self.deploy( + deployer, + stack_name, + template_str, + parameters, + parsed_args.capabilities, + parsed_args.execute_changeset, + parsed_args.role_arn, + parsed_args.notification_arns, + s3_uploader, + tags, + parsed_args.fail_on_empty_changeset, + parsed_args.disable_rollback, + ) def load_template_file(self, template_file): template_path = os.path.expanduser(template_file) if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( - template_path=template_path) + template_path=template_path + ) with compat_open(template_path, "r") as handle: template_str = handle.read() template_dict = yaml_parse(template_str) template_size = os.path.getsize(template_path) return template_dict, template_str, template_size - def deploy(self, deployer, stack_name, template_str, - parameters, capabilities, execute_changeset, role_arn, - notification_arns, s3_uploader, tags, - fail_on_empty_changeset=False, disable_rollback=False): + def deploy( + self, + deployer, + stack_name, + template_str, + parameters, + capabilities, + execute_changeset, + role_arn, + notification_arns, + s3_uploader, + tags, + fail_on_empty_changeset=False, + disable_rollback=False, + ): try: result = deployer.create_and_wait_for_changeset( stack_name=stack_name, @@ -396,7 +405,7 @@ def deploy(self, deployer, stack_name, template_str, role_arn=role_arn, notification_arns=notification_arns, s3_uploader=s3_uploader, - tags=tags + tags=tags, ) except exceptions.ChangeEmptyError as ex: if fail_on_empty_changeset: @@ -405,14 +414,19 @@ def deploy(self, deployer, stack_name, template_str, return 0 if execute_changeset: - deployer.execute_changeset(result.changeset_id, stack_name, - disable_rollback) + deployer.execute_changeset( + result.changeset_id, stack_name, disable_rollback + ) deployer.wait_for_execute(stack_name, result.changeset_type) - sys.stdout.write(self.MSG_EXECUTE_SUCCESS.format( - stack_name=stack_name)) + sys.stdout.write( + self.MSG_EXECUTE_SUCCESS.format(stack_name=stack_name) + ) else: - sys.stdout.write(self.MSG_NO_EXECUTE_CHANGESET.format( - changeset_id=result.changeset_id)) + sys.stdout.write( + self.MSG_NO_EXECUTE_CHANGESET.format( + changeset_id=result.changeset_id + ) + ) sys.stdout.flush() return 0 @@ -434,10 +448,7 @@ def merge_parameters(self, template_dict, parameter_overrides): return parameter_values for key, value in template_dict["Parameters"].items(): - - obj = { - "ParameterKey": key - } + obj = {"ParameterKey": key} if key in parameter_overrides: obj["ParameterValue"] = parameter_overrides[key] @@ -466,7 +477,7 @@ def parse_parameter_overrides(self, arg_value): parsers = [ CloudFormationLikeParameterOverrideParser(), CodePipelineLikeParameterOverrideParser(), - StringEqualsParameterOverrideParser() + StringEqualsParameterOverrideParser(), ] for parser in parsers: if parser.can_parse(data): @@ -475,13 +486,13 @@ def parse_parameter_overrides(self, arg_value): 'JSON passed to --parameter-overrides must be one of ' 'the formats: ["Key1=Value1","Key2=Value2", ...] , ' '[{"ParameterKey": "Key1", "ParameterValue": "Value1"}, ...] , ' - '["Parameters": {"Key1": "Value1", "Key2": "Value2", ...}]') + '["Parameters": {"Key1": "Value1", "Key2": "Value2", ...}]' + ) else: # In case it was in deploy command format # and was input via command line return self.parse_key_value_arg( - arg_value, - self.PARAMETER_OVERRIDE_CMD + arg_value, self.PARAMETER_OVERRIDE_CMD ) def parse_key_value_arg(self, arg_value, argname): @@ -496,14 +507,13 @@ def parse_key_value_arg(self, arg_value, argname): """ result = {} for data in arg_value: - # Split at first '=' from left key_value_pair = data.split("=", 1) if len(key_value_pair) != 2: raise exceptions.InvalidKeyValuePairArgumentError( - argname=argname, - value=key_value_pair) + argname=argname, value=key_value_pair + ) result[key_value_pair[0]] = key_value_pair[1] diff --git a/awscli/customizations/cloudformation/deployer.py b/awscli/customizations/cloudformation/deployer.py index 3733c55ebc8a..cc80202681d4 100644 --- a/awscli/customizations/cloudformation/deployer.py +++ b/awscli/customizations/cloudformation/deployer.py @@ -11,27 +11,32 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import collections +import logging import sys import time -import logging -import botocore -import collections +from datetime import datetime +import botocore from awscli.customizations.cloudformation import exceptions -from awscli.customizations.cloudformation.artifact_exporter import mktempfile, parse_s3_url - -from datetime import datetime +from awscli.customizations.cloudformation.artifact_exporter import ( + mktempfile, + parse_s3_url, +) LOG = logging.getLogger(__name__) ChangeSetResult = collections.namedtuple( - "ChangeSetResult", ["changeset_id", "changeset_type"]) + "ChangeSetResult", ["changeset_id", "changeset_type"] +) class Deployer(object): - - def __init__(self, cloudformation_client, - changeset_prefix="awscli-cloudformation-package-deploy-"): + def __init__( + self, + cloudformation_client, + changeset_prefix="awscli-cloudformation-package-deploy-", + ): self._client = cloudformation_client self.changeset_prefix = changeset_prefix @@ -63,17 +68,26 @@ def has_stack(self, stack_name): msg = str(e) if "Stack with id {0} does not exist".format(stack_name) in msg: - LOG.debug("Stack with id {0} does not exist".format( - stack_name)) + LOG.debug( + "Stack with id {0} does not exist".format(stack_name) + ) return False else: # We don't know anything about this exception. Don't handle LOG.debug("Unable to get stack details.", exc_info=e) raise e - def create_changeset(self, stack_name, cfn_template, - parameter_values, capabilities, role_arn, - notification_arns, s3_uploader, tags): + def create_changeset( + self, + stack_name, + cfn_template, + parameter_values, + capabilities, + role_arn, + notification_arns, + s3_uploader, + tags, + ): """ Call Cloudformation to create a changeset and wait for it to complete @@ -96,17 +110,27 @@ def create_changeset(self, stack_name, cfn_template, # When creating a new stack, UsePreviousValue=True is invalid. # For such parameters, users should either override with new value, # or set a Default value in template to successfully create a stack. - parameter_values = [x for x in parameter_values - if not x.get("UsePreviousValue", False)] + parameter_values = [ + x + for x in parameter_values + if not x.get("UsePreviousValue", False) + ] else: changeset_type = "UPDATE" # UsePreviousValue not valid if parameter is new summary = self._client.get_template_summary(StackName=stack_name) - existing_parameters = [parameter['ParameterKey'] for parameter in \ - summary['Parameters']] - parameter_values = [x for x in parameter_values - if not (x.get("UsePreviousValue", False) and \ - x["ParameterKey"] not in existing_parameters)] + existing_parameters = [ + parameter['ParameterKey'] + for parameter in summary['Parameters'] + ] + parameter_values = [ + x + for x in parameter_values + if not ( + x.get("UsePreviousValue", False) + and x["ParameterKey"] not in existing_parameters + ) + ] kwargs = { 'ChangeSetName': changeset_name, @@ -126,10 +150,13 @@ def create_changeset(self, stack_name, cfn_template, temporary_file.write(kwargs.pop('TemplateBody')) temporary_file.flush() url = s3_uploader.upload_with_dedup( - temporary_file.name, "template") + temporary_file.name, "template" + ) # TemplateUrl property requires S3 URL to be in path-style format parts = parse_s3_url(url, version_property="Version") - kwargs['TemplateURL'] = s3_uploader.to_path_style_s3_url(parts["Key"], parts.get("Version", None)) + kwargs['TemplateURL'] = s3_uploader.to_path_style_s3_url( + parts["Key"], parts.get("Version", None) + ) # don't set these arguments if not specified to use existing values if role_arn is not None: @@ -159,8 +186,11 @@ def wait_for_changeset(self, changeset_id, stack_name): # Poll every 5 seconds. Changeset creation should be fast waiter_config = {'Delay': 5} try: - waiter.wait(ChangeSetName=changeset_id, StackName=stack_name, - WaiterConfig=waiter_config) + waiter.wait( + ChangeSetName=changeset_id, + StackName=stack_name, + WaiterConfig=waiter_config, + ) except botocore.exceptions.WaiterError as ex: LOG.debug("Create changeset waiter exception", exc_info=ex) @@ -168,17 +198,22 @@ def wait_for_changeset(self, changeset_id, stack_name): status = resp["Status"] reason = resp["StatusReason"] - if status == "FAILED" and \ - "The submitted information didn't contain changes." in reason or \ - "No updates are to be performed" in reason: - raise exceptions.ChangeEmptyError(stack_name=stack_name) - - raise RuntimeError("Failed to create the changeset: {0} " - "Status: {1}. Reason: {2}" - .format(ex, status, reason)) - - def execute_changeset(self, changeset_id, stack_name, - disable_rollback=False): + if ( + status == "FAILED" + and "The submitted information didn't contain changes." + in reason + or "No updates are to be performed" in reason + ): + raise exceptions.ChangeEmptyError(stack_name=stack_name) + + raise RuntimeError( + "Failed to create the changeset: {0} " + "Status: {1}. Reason: {2}".format(ex, status, reason) + ) + + def execute_changeset( + self, changeset_id, stack_name, disable_rollback=False + ): """ Calls CloudFormation to execute changeset @@ -188,12 +223,12 @@ def execute_changeset(self, changeset_id, stack_name, :return: Response from execute-change-set call """ return self._client.execute_change_set( - ChangeSetName=changeset_id, - StackName=stack_name, - DisableRollback=disable_rollback) + ChangeSetName=changeset_id, + StackName=stack_name, + DisableRollback=disable_rollback, + ) def wait_for_execute(self, stack_name, changeset_type): - sys.stdout.write("Waiting for stack create/update to complete\n") sys.stdout.flush() @@ -203,8 +238,9 @@ def wait_for_execute(self, stack_name, changeset_type): elif changeset_type == "UPDATE": waiter = self._client.get_waiter("stack_update_complete") else: - raise RuntimeError("Invalid changeset type {0}" - .format(changeset_type)) + raise RuntimeError( + "Invalid changeset type {0}".format(changeset_type) + ) # Poll every 30 seconds. Polling too frequently risks hitting rate limits # on CloudFormation's DescribeStacks API @@ -220,13 +256,27 @@ def wait_for_execute(self, stack_name, changeset_type): raise exceptions.DeployFailedError(stack_name=stack_name) - def create_and_wait_for_changeset(self, stack_name, cfn_template, - parameter_values, capabilities, role_arn, - notification_arns, s3_uploader, tags): - + def create_and_wait_for_changeset( + self, + stack_name, + cfn_template, + parameter_values, + capabilities, + role_arn, + notification_arns, + s3_uploader, + tags, + ): result = self.create_changeset( - stack_name, cfn_template, parameter_values, capabilities, - role_arn, notification_arns, s3_uploader, tags) + stack_name, + cfn_template, + parameter_values, + capabilities, + role_arn, + notification_arns, + s3_uploader, + tags, + ) self.wait_for_changeset(result.changeset_id, stack_name) return result diff --git a/awscli/customizations/cloudformation/exceptions.py b/awscli/customizations/cloudformation/exceptions.py index b2625cdd27f9..223d0dbd1a72 100644 --- a/awscli/customizations/cloudformation/exceptions.py +++ b/awscli/customizations/cloudformation/exceptions.py @@ -1,4 +1,3 @@ - class CloudFormationCommandError(Exception): fmt = 'An unspecified error occurred' @@ -17,42 +16,50 @@ class ChangeEmptyError(CloudFormationCommandError): class InvalidLocalPathError(CloudFormationCommandError): - fmt = ("Parameter {property_name} of resource {resource_id} refers " - "to a file or folder that does not exist {local_path}") + fmt = ( + "Parameter {property_name} of resource {resource_id} refers " + "to a file or folder that does not exist {local_path}" + ) class InvalidTemplateUrlParameterError(CloudFormationCommandError): - fmt = ("{property_name} parameter of {resource_id} resource is invalid. " - "It must be a S3 URL or path to CloudFormation " - "template file. Actual: {template_path}") + fmt = ( + "{property_name} parameter of {resource_id} resource is invalid. " + "It must be a S3 URL or path to CloudFormation " + "template file. Actual: {template_path}" + ) class ExportFailedError(CloudFormationCommandError): - fmt = ("Unable to upload artifact {property_value} referenced " - "by {property_name} parameter of {resource_id} resource." - "\n" - "{ex}") + fmt = ( + "Unable to upload artifact {property_value} referenced " + "by {property_name} parameter of {resource_id} resource." + "\n" + "{ex}" + ) class InvalidKeyValuePairArgumentError(CloudFormationCommandError): - fmt = ("{value} value passed to --{argname} must be of format " - "Key=Value") + fmt = "{value} value passed to --{argname} must be of format " "Key=Value" class DeployFailedError(CloudFormationCommandError): - fmt = \ - ("Failed to create/update the stack. Run the following command" - "\n" - "to fetch the list of events leading up to the failure" - "\n" - "aws cloudformation describe-stack-events --stack-name {stack_name}") + fmt = ( + "Failed to create/update the stack. Run the following command" + "\n" + "to fetch the list of events leading up to the failure" + "\n" + "aws cloudformation describe-stack-events --stack-name {stack_name}" + ) + class DeployBucketRequiredError(CloudFormationCommandError): - fmt = \ - ("Templates with a size greater than 51,200 bytes must be deployed " - "via an S3 Bucket. Please add the --s3-bucket parameter to your " - "command. The local template will be copied to that S3 bucket and " - "then deployed.") + fmt = ( + "Templates with a size greater than 51,200 bytes must be deployed " + "via an S3 Bucket. Please add the --s3-bucket parameter to your " + "command. The local template will be copied to that S3 bucket and " + "then deployed." + ) class InvalidForEachIntrinsicFunctionError(CloudFormationCommandError): diff --git a/awscli/customizations/cloudformation/package.py b/awscli/customizations/cloudformation/package.py index 9bc7464d442e..4f542442ee05 100644 --- a/awscli/customizations/cloudformation/package.py +++ b/awscli/customizations/cloudformation/package.py @@ -11,25 +11,22 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os +import json import logging +import os import sys -import json - -from botocore.client import Config - +from awscli.customizations.cloudformation import exceptions from awscli.customizations.cloudformation.artifact_exporter import Template from awscli.customizations.cloudformation.yamlhelper import yaml_dump -from awscli.customizations.cloudformation import exceptions from awscli.customizations.commands import BasicCommand from awscli.customizations.s3uploader import S3Uploader +from botocore.client import Config LOG = logging.getLogger(__name__) class PackageCommand(BasicCommand): - MSG_PACKAGED_TEMPLATE_WRITTEN = ( "Successfully packaged artifacts and wrote output template " "to file {output_file_name}." @@ -38,12 +35,14 @@ class PackageCommand(BasicCommand): "\n" "aws cloudformation deploy --template-file {output_file_path} " "--stack-name " - "\n") + "\n" + ) NAME = "package" - DESCRIPTION = BasicCommand.FROM_FILE("cloudformation", - "_package_description.rst") + DESCRIPTION = BasicCommand.FROM_FILE( + "cloudformation", "_package_description.rst" + ) ARG_TABLE = [ { @@ -52,18 +51,16 @@ class PackageCommand(BasicCommand): 'help_text': ( 'The path where your AWS CloudFormation' ' template is located.' - ) + ), }, - { 'name': 's3-bucket', 'required': True, 'help_text': ( 'The name of the S3 bucket where this command uploads' ' the artifacts that are referenced in your template.' - ) + ), }, - { 'name': 's3-prefix', 'help_text': ( @@ -71,17 +68,15 @@ class PackageCommand(BasicCommand): ' artifacts\' name when it uploads them to the S3 bucket.' ' The prefix name is a path name (folder name) for' ' the S3 bucket.' - ) + ), }, - { 'name': 'kms-key-id', 'help_text': ( 'The ID of an AWS KMS key that the command uses' ' to encrypt artifacts that are at rest in the S3 bucket.' - ) + ), }, - { "name": "output-template-file", "help_text": ( @@ -89,18 +84,16 @@ class PackageCommand(BasicCommand): " output AWS CloudFormation template. If you don't specify" " a path, the command writes the template to the standard" " output." - ) + ), }, - { "name": "use-json", "action": "store_true", "help_text": ( "Indicates whether to use JSON as the format for the output AWS" " CloudFormation template. YAML is used by default." - ) + ), }, - { "name": "force-upload", "action": "store_true", @@ -108,7 +101,7 @@ class PackageCommand(BasicCommand): 'Indicates whether to override existing files in the S3 bucket.' ' Specify this flag to upload artifacts even if they ' ' match existing artifacts in the S3 bucket.' - ) + ), }, { "name": "metadata", @@ -116,11 +109,11 @@ class PackageCommand(BasicCommand): "schema": { "type": "map", "key": {"type": "string"}, - "value": {"type": "string"} + "value": {"type": "string"}, }, "help_text": "A map of metadata to attach to *ALL* the artifacts that" - " are referenced in your template." - } + " are referenced in your template.", + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -128,20 +121,24 @@ def _run_main(self, parsed_args, parsed_globals): "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) template_path = parsed_args.template_file if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( - template_path=template_path) + template_path=template_path + ) bucket = parsed_args.s3_bucket - self.s3_uploader = S3Uploader(s3_client, - bucket, - parsed_args.s3_prefix, - parsed_args.kms_key_id, - parsed_args.force_upload) + self.s3_uploader = S3Uploader( + s3_client, + bucket, + parsed_args.s3_prefix, + parsed_args.kms_key_id, + parsed_args.force_upload, + ) # attach the given metadata to the artifacts to be uploaded self.s3_uploader.artifact_metadata = parsed_args.metadata @@ -154,8 +151,9 @@ def _run_main(self, parsed_args, parsed_globals): if output_file: msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format( - output_file_name=output_file, - output_file_path=os.path.abspath(output_file)) + output_file_name=output_file, + output_file_path=os.path.abspath(output_file), + ) sys.stdout.write(msg) sys.stdout.flush() @@ -166,7 +164,9 @@ def _export(self, template_path, use_json): exported_template = template.export() if use_json: - exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False) + exported_str = json.dumps( + exported_template, indent=4, ensure_ascii=False + ) else: exported_str = yaml_dump(exported_template) diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py index 4d5f641768e2..f9e8b2d8a4db 100644 --- a/awscli/customizations/cloudformation/yamlhelper.py +++ b/awscli/customizations/cloudformation/yamlhelper.py @@ -14,10 +14,9 @@ import ruamel.yaml from ruamel.yaml.resolver import ScalarNode, SequenceNode -from botocore.compat import json -from botocore.compat import OrderedDict from awscli.utils import dump_yaml_to_str +from botocore.compat import OrderedDict, json def intrinsics_multi_constructor(loader, tag_prefix, node): @@ -73,9 +72,10 @@ def _add_yaml_1_1_boolean_resolvers(resolver_cls): '|true|True|TRUE|false|False|FALSE' '|on|On|ON|off|Off|OFF)$' ) - boolean_first_chars = list(u'yYnNtTfFoO') + boolean_first_chars = list('yYnNtTfFoO') resolver_cls.add_implicit_resolver( - 'tag:yaml.org,2002:bool', boolean_regex, boolean_first_chars) + 'tag:yaml.org,2002:bool', boolean_regex, boolean_first_chars + ) def yaml_dump(dict_to_dump): @@ -111,9 +111,11 @@ def yaml_parse(yamlstr): yaml = ruamel.yaml.YAML(typ="safe", pure=True) yaml.Constructor.add_constructor( ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - _dict_constructor) + _dict_constructor, + ) yaml.Constructor.add_multi_constructor( - "!", intrinsics_multi_constructor) + "!", intrinsics_multi_constructor + ) _add_yaml_1_1_boolean_resolvers(yaml.Resolver) return yaml.load(yamlstr) diff --git a/awscli/customizations/cloudfront.py b/awscli/customizations/cloudfront.py index 6c02bf260dbe..0e6abfce5e80 100644 --- a/awscli/customizations/cloudfront.py +++ b/awscli/customizations/cloudfront.py @@ -11,18 +11,17 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import hashlib +import random import sys import time -import random from awscrt.crypto import RSA, RSASignatureAlgorithm -from botocore.utils import parse_to_aware_datetime -from botocore.signers import CloudFrontSigner - from awscli.arguments import CustomArgument -from awscli.customizations.utils import validate_mutually_exclusive_handler from awscli.customizations.commands import BasicCommand +from awscli.customizations.utils import validate_mutually_exclusive_handler +from botocore.signers import CloudFrontSigner +from botocore.utils import parse_to_aware_datetime def register(event_handler): @@ -30,37 +29,52 @@ def register(event_handler): # Provides a simpler --paths for ``aws cloudfront create-invalidation`` event_handler.register( - 'building-argument-table.cloudfront.create-invalidation', _add_paths) + 'building-argument-table.cloudfront.create-invalidation', _add_paths + ) event_handler.register( 'operation-args-parsed.cloudfront.create-invalidation', - validate_mutually_exclusive_handler(['invalidation_batch'], ['paths'])) + validate_mutually_exclusive_handler(['invalidation_batch'], ['paths']), + ) event_handler.register( 'operation-args-parsed.cloudfront.create-distribution', validate_mutually_exclusive_handler( ['default_root_object', 'origin_domain_name'], - ['distribution_config'])) + ['distribution_config'], + ), + ) event_handler.register( 'building-argument-table.cloudfront.create-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( - 'origin-domain-name', OriginDomainName(argument_table))) + 'origin-domain-name', OriginDomainName(argument_table) + ), + ) event_handler.register( 'building-argument-table.cloudfront.create-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( - 'default-root-object', CreateDefaultRootObject(argument_table))) + 'default-root-object', CreateDefaultRootObject(argument_table) + ), + ) context = {} event_handler.register( - 'top-level-args-parsed', context.update, unique_id='cloudfront') + 'top-level-args-parsed', context.update, unique_id='cloudfront' + ) event_handler.register( 'operation-args-parsed.cloudfront.update-distribution', validate_mutually_exclusive_handler( - ['default_root_object'], ['distribution_config'])) + ['default_root_object'], ['distribution_config'] + ), + ) event_handler.register( 'building-argument-table.cloudfront.update-distribution', lambda argument_table, **kwargs: argument_table.__setitem__( - 'default-root-object', UpdateDefaultRootObject( - context=context, argument_table=argument_table))) + 'default-root-object', + UpdateDefaultRootObject( + context=context, argument_table=argument_table + ), + ), + ) def unique_string(prefix='cli'): @@ -73,7 +87,6 @@ def _add_paths(argument_table, **kwargs): class PathsArgument(CustomArgument): - def __init__(self): doc = ( 'The space-separated paths to be invalidated.' @@ -86,17 +99,23 @@ def add_to_params(self, parameters, value): parameters['InvalidationBatch'] = { "CallerReference": unique_string(), "Paths": {"Quantity": len(value), "Items": value}, - } + } class ExclusiveArgument(CustomArgument): DOC = '%s This argument and --%s are mutually exclusive.' - def __init__(self, name, argument_table, - exclusive_to='distribution-config', help_text=''): + def __init__( + self, + name, + argument_table, + exclusive_to='distribution-config', + help_text='', + ): argument_table[exclusive_to].required = False super(ExclusiveArgument, self).__init__( - name, help_text=self.DOC % (help_text, exclusive_to)) + name, help_text=self.DOC % (help_text, exclusive_to) + ) def distribution_config_template(self): return { @@ -108,12 +127,9 @@ def distribution_config_template(self): "QueryString": False, "Cookies": {"Forward": "none"}, }, - "TrustedSigners": { - "Enabled": False, - "Quantity": 0 - }, + "TrustedSigners": {"Enabled": False, "Quantity": 0}, "ViewerProtocolPolicy": "allow-all", - "MinTTL": 0 + "MinTTL": 0, }, "Enabled": True, "Comment": "", @@ -123,14 +139,17 @@ def distribution_config_template(self): class OriginDomainName(ExclusiveArgument): def __init__(self, argument_table): super(OriginDomainName, self).__init__( - 'origin-domain-name', argument_table, - help_text='The domain name for your origin.') + 'origin-domain-name', + argument_table, + help_text='The domain name for your origin.', + ) def add_to_params(self, parameters, value): if value is None: return parameters.setdefault( - 'DistributionConfig', self.distribution_config_template()) + 'DistributionConfig', self.distribution_config_template() + ) origin_id = unique_string(prefix=value) item = {"Id": origin_id, "DomainName": value, "OriginPath": ''} if item['DomainName'].endswith('.s3.amazonaws.com'): @@ -140,36 +159,50 @@ def add_to_params(self, parameters, value): item["S3OriginConfig"] = {"OriginAccessIdentity": ""} else: item["CustomOriginConfig"] = { - 'HTTPPort': 80, 'HTTPSPort': 443, - 'OriginProtocolPolicy': 'http-only'} + 'HTTPPort': 80, + 'HTTPSPort': 443, + 'OriginProtocolPolicy': 'http-only', + } parameters['DistributionConfig']['Origins'] = { - "Quantity": 1, "Items": [item]} + "Quantity": 1, + "Items": [item], + } parameters['DistributionConfig']['DefaultCacheBehavior'][ - 'TargetOriginId'] = origin_id + 'TargetOriginId' + ] = origin_id class CreateDefaultRootObject(ExclusiveArgument): def __init__(self, argument_table, help_text=''): super(CreateDefaultRootObject, self).__init__( - 'default-root-object', argument_table, help_text=help_text or ( + 'default-root-object', + argument_table, + help_text=help_text + or ( 'The object that you want CloudFront to return (for example, ' - 'index.html) when a viewer request points to your root URL.')) + 'index.html) when a viewer request points to your root URL.' + ), + ) def add_to_params(self, parameters, value): if value is not None: parameters.setdefault( - 'DistributionConfig', self.distribution_config_template()) + 'DistributionConfig', self.distribution_config_template() + ) parameters['DistributionConfig']['DefaultRootObject'] = value class UpdateDefaultRootObject(CreateDefaultRootObject): def __init__(self, context, argument_table): super(UpdateDefaultRootObject, self).__init__( - argument_table, help_text=( + argument_table, + help_text=( 'The object that you want CloudFront to return (for example, ' 'index.html) when a viewer request points to your root URL. ' 'CLI will automatically make a get-distribution-config call ' - 'to load and preserve your other settings.')) + 'to load and preserve your other settings.' + ), + ) self.context = context def add_to_params(self, parameters, value): @@ -178,7 +211,8 @@ def add_to_params(self, parameters, value): 'cloudfront', region_name=self.context['parsed_args'].region, endpoint_url=self.context['parsed_args'].endpoint_url, - verify=self.context['parsed_args'].verify_ssl) + verify=self.context['parsed_args'].verify_ssl, + ) response = client.get_distribution_config(Id=parameters['Id']) parameters['IfMatch'] = response['ETag'] parameters['DistributionConfig'] = response['DistributionConfig'] @@ -210,7 +244,8 @@ class SignCommand(BasicCommand): 'required': True, 'help_text': ( "The active CloudFront key pair Id for the key pair " - "that you're using to generate the signature."), + "that you're using to generate the signature." + ), }, { 'name': 'private-key', @@ -218,39 +253,49 @@ class SignCommand(BasicCommand): 'help_text': 'file://path/to/your/private-key.pem', }, { - 'name': 'date-less-than', 'required': True, - 'help_text': - 'The expiration date and time for the URL. ' + DATE_FORMAT, + 'name': 'date-less-than', + 'required': True, + 'help_text': 'The expiration date and time for the URL. ' + + DATE_FORMAT, }, { 'name': 'date-greater-than', - 'help_text': - 'An optional start date and time for the URL. ' + DATE_FORMAT, + 'help_text': 'An optional start date and time for the URL. ' + + DATE_FORMAT, }, { 'name': 'ip-address', 'help_text': ( 'An optional IP address or IP address range to allow client ' - 'making the GET request from. Format: x.x.x.x/x or x.x.x.x'), + 'making the GET request from. Format: x.x.x.x/x or x.x.x.x' + ), }, ] def _run_main(self, args, parsed_globals): signer = CloudFrontSigner( - args.key_pair_id, RSASigner(args.private_key).sign) + args.key_pair_id, RSASigner(args.private_key).sign + ) date_less_than = parse_to_aware_datetime(args.date_less_than) date_greater_than = args.date_greater_than if date_greater_than is not None: date_greater_than = parse_to_aware_datetime(date_greater_than) if date_greater_than is not None or args.ip_address is not None: policy = signer.build_policy( - args.url, date_less_than, date_greater_than=date_greater_than, - ip_address=args.ip_address) - sys.stdout.write(signer.generate_presigned_url( - args.url, policy=policy)) + args.url, + date_less_than, + date_greater_than=date_greater_than, + ip_address=args.ip_address, + ) + sys.stdout.write( + signer.generate_presigned_url(args.url, policy=policy) + ) else: - sys.stdout.write(signer.generate_presigned_url( - args.url, date_less_than=date_less_than)) + sys.stdout.write( + signer.generate_presigned_url( + args.url, date_less_than=date_less_than + ) + ) return 0 @@ -261,6 +306,5 @@ def __init__(self, private_key): def sign(self, message): return self.priv_key.sign( - RSASignatureAlgorithm.PKCS1_5_SHA1, - hashlib.sha1(message).digest() + RSASignatureAlgorithm.PKCS1_5_SHA1, hashlib.sha1(message).digest() ) diff --git a/awscli/customizations/cloudsearch.py b/awscli/customizations/cloudsearch.py index 8ea8f0a5f265..141e91c432ff 100644 --- a/awscli/customizations/cloudsearch.py +++ b/awscli/customizations/cloudsearch.py @@ -13,8 +13,8 @@ import logging -from awscli.customizations.flatten import FlattenArguments, SEP from awscli.customizations.exceptions import ParamValidationError +from awscli.customizations.flatten import SEP, FlattenArguments from botocore.compat import OrderedDict LOG = logging.getLogger(__name__) @@ -23,7 +23,7 @@ 'Int': int, 'Double': float, 'IntArray': int, - 'DoubleArray': float + 'DoubleArray': float, } @@ -71,13 +71,16 @@ def index_hydrate(params, container, cli_type, key, value): "define-expression": { "expression": { "keep": False, - "flatten": OrderedDict([ - # Order is crucial here! We're - # flattening ExpressionValue to be "expression", - # but this is the name ("expression") of the our parent - # key, the top level nested param. - ("ExpressionName", {"name": "name"}), - ("ExpressionValue", {"name": "expression"}),]), + "flatten": OrderedDict( + [ + # Order is crucial here! We're + # flattening ExpressionValue to be "expression", + # but this is the name ("expression") of the our parent + # key, the top level nested param. + ("ExpressionName", {"name": "name"}), + ("ExpressionValue", {"name": "expression"}), + ] + ), } }, "define-index-field": { @@ -85,30 +88,57 @@ def index_hydrate(params, container, cli_type, key, value): "keep": False, # We use an ordered dict because `type` needs to be parsed before # any of the Options values. - "flatten": OrderedDict([ - ("IndexFieldName", {"name": "name"}), - ("IndexFieldType", {"name": "type"}), - ("IntOptions.DefaultValue", {"name": "default-value", - "type": "string", - "hydrate": index_hydrate}), - ("IntOptions.FacetEnabled", {"name": "facet-enabled", - "hydrate": index_hydrate }), - ("IntOptions.SearchEnabled", {"name": "search-enabled", - "hydrate": index_hydrate}), - ("IntOptions.ReturnEnabled", {"name": "return-enabled", - "hydrate": index_hydrate}), - ("IntOptions.SortEnabled", {"name": "sort-enabled", - "hydrate": index_hydrate}), - ("IntOptions.SourceField", {"name": "source-field", - "type": "string", - "hydrate": index_hydrate }), - ("TextOptions.HighlightEnabled", {"name": "highlight-enabled", - "hydrate": index_hydrate}), - ("TextOptions.AnalysisScheme", {"name": "analysis-scheme", - "hydrate": index_hydrate}) - ]) + "flatten": OrderedDict( + [ + ("IndexFieldName", {"name": "name"}), + ("IndexFieldType", {"name": "type"}), + ( + "IntOptions.DefaultValue", + { + "name": "default-value", + "type": "string", + "hydrate": index_hydrate, + }, + ), + ( + "IntOptions.FacetEnabled", + {"name": "facet-enabled", "hydrate": index_hydrate}, + ), + ( + "IntOptions.SearchEnabled", + {"name": "search-enabled", "hydrate": index_hydrate}, + ), + ( + "IntOptions.ReturnEnabled", + {"name": "return-enabled", "hydrate": index_hydrate}, + ), + ( + "IntOptions.SortEnabled", + {"name": "sort-enabled", "hydrate": index_hydrate}, + ), + ( + "IntOptions.SourceField", + { + "name": "source-field", + "type": "string", + "hydrate": index_hydrate, + }, + ), + ( + "TextOptions.HighlightEnabled", + { + "name": "highlight-enabled", + "hydrate": index_hydrate, + }, + ), + ( + "TextOptions.AnalysisScheme", + {"name": "analysis-scheme", "hydrate": index_hydrate}, + ), + ] + ), } - } + }, } diff --git a/awscli/customizations/cloudsearchdomain.py b/awscli/customizations/cloudsearchdomain.py index 9a6b16610a6e..27ac41d1457c 100644 --- a/awscli/customizations/cloudsearchdomain.py +++ b/awscli/customizations/cloudsearchdomain.py @@ -17,11 +17,14 @@ * Add validation that --endpoint-url is required. """ + from awscli.customizations.exceptions import ParamValidationError + def register_cloudsearchdomain(cli): - cli.register_last('calling-command.cloudsearchdomain', - validate_endpoint_url) + cli.register_last( + 'calling-command.cloudsearchdomain', validate_endpoint_url + ) def validate_endpoint_url(parsed_globals, **kwargs): diff --git a/awscli/customizations/cloudtrail/__init__.py b/awscli/customizations/cloudtrail/__init__.py index 6be90d3d6f5e..fda5a0e29cf6 100644 --- a/awscli/customizations/cloudtrail/__init__.py +++ b/awscli/customizations/cloudtrail/__init__.py @@ -28,4 +28,6 @@ def inject_commands(command_table, session, **kwargs): must not collide with existing low-level API call names. """ command_table['validate-logs'] = CloudTrailValidateLogs(session) - command_table['verify-query-results'] = CloudTrailVerifyQueryResult(session) + command_table['verify-query-results'] = CloudTrailVerifyQueryResult( + session + ) diff --git a/awscli/customizations/cloudtrail/utils.py b/awscli/customizations/cloudtrail/utils.py index 995b32fcace4..8273a5278ac8 100644 --- a/awscli/customizations/cloudtrail/utils.py +++ b/awscli/customizations/cloudtrail/utils.py @@ -11,9 +11,10 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from dateutil import tz, parser from datetime import timedelta +from dateutil import parser, tz + DATE_FORMAT = "%Y%m%dT%H%M%SZ" @@ -94,5 +95,6 @@ def get_public_key(self, signature_generate_time, public_key_fingerprint): return key["Value"] raise RuntimeError( - "No public keys found for key with fingerprint: %s" % public_key_fingerprint + "No public keys found for key with fingerprint: %s" + % public_key_fingerprint ) diff --git a/awscli/customizations/cloudtrail/validation.py b/awscli/customizations/cloudtrail/validation.py index aabce6c70f43..92144ee2d3fd 100644 --- a/awscli/customizations/cloudtrail/validation.py +++ b/awscli/customizations/cloudtrail/validation.py @@ -12,25 +12,27 @@ # language governing permissions and limitations under the License. import base64 import binascii -import json import hashlib +import json import logging import re import sys import zlib -from zlib import error as ZLibError from datetime import datetime, timedelta -from dateutil import tz, parser +from zlib import error as ZLibError from awscrt.crypto import RSA, RSASignatureAlgorithm +from dateutil import parser, tz -from awscli.customizations.cloudtrail.utils import get_trail_by_arn, \ - get_account_id_from_arn, PublicKeyProvider +from awscli.customizations.cloudtrail.utils import ( + PublicKeyProvider, + get_account_id_from_arn, + get_trail_by_arn, +) from awscli.customizations.commands import BasicCommand from awscli.customizations.exceptions import ParamValidationError from botocore.exceptions import ClientError - LOG = logging.getLogger(__name__) DATE_FORMAT = '%Y%m%dT%H%M%SZ' DISPLAY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' @@ -81,11 +83,19 @@ def assert_cloudtrail_arn_is_valid(trail_arn): ) -def create_digest_traverser(cloudtrail_client, organization_client, - s3_client_provider, trail_arn, - trail_source_region=None, on_invalid=None, - on_gap=None, on_missing=None, bucket=None, - prefix=None, account_id=None): +def create_digest_traverser( + cloudtrail_client, + organization_client, + s3_client_provider, + trail_arn, + trail_source_region=None, + on_invalid=None, + on_gap=None, + on_missing=None, + bucket=None, + prefix=None, + account_id=None, +): """Creates a CloudTrail DigestTraverser and its object graph. :type cloudtrail_client: botocore.client.CloudTrail @@ -134,9 +144,11 @@ def create_digest_traverser(cloudtrail_client, organization_client, if not account_id: raise ParamValidationError( "Missing required parameter for organization " - "trail: '--account-id'") + "trail: '--account-id'" + ) organization_id = organization_client.describe_organization()[ - 'Organization']['Id'] + 'Organization' + ]['Id'] # Determine the region from the ARN (e.g., arn:aws:cloudtrail:REGION:...) trail_region = trail_arn.split(':')[3] @@ -147,16 +159,22 @@ def create_digest_traverser(cloudtrail_client, organization_client, account_id = get_account_id_from_arn(trail_arn) digest_provider = DigestProvider( - account_id=account_id, trail_name=trail_name, + account_id=account_id, + trail_name=trail_name, s3_client_provider=s3_client_provider, trail_source_region=trail_source_region, trail_home_region=trail_region, - organization_id=organization_id) + organization_id=organization_id, + ) return DigestTraverser( - digest_provider=digest_provider, starting_bucket=bucket, - starting_prefix=prefix, on_invalid=on_invalid, on_gap=on_gap, + digest_provider=digest_provider, + starting_bucket=bucket, + starting_prefix=prefix, + on_invalid=on_invalid, + on_gap=on_gap, on_missing=on_missing, - public_key_provider=PublicKeyProvider(cloudtrail_client)) + public_key_provider=PublicKeyProvider(cloudtrail_client), + ) class S3ClientProvider(object): @@ -165,6 +183,7 @@ class S3ClientProvider(object): This class will cache the location constraints of previously requested buckets and cache previously created clients for the same region. """ + def __init__(self, session, get_bucket_location_region='us-east-1'): self._session = session self._get_bucket_location_region = get_bucket_location_region @@ -196,22 +215,29 @@ def _create_client(self, region_name): class DigestError(ValueError): """Exception raised when a digest fails to validate""" + pass class DigestSignatureError(DigestError): """Exception raised when a digest signature is invalid""" + def __init__(self, bucket, key): - message = ('Digest file\ts3://%s/%s\tINVALID: signature verification ' - 'failed') % (bucket, key) + message = ( + 'Digest file\ts3://%s/%s\tINVALID: signature verification ' + 'failed' + ) % (bucket, key) super(DigestSignatureError, self).__init__(message) class InvalidDigestFormat(DigestError): """Exception raised when a digest has an invalid format""" + def __init__(self, bucket, key): - message = 'Digest file\ts3://%s/%s\tINVALID: invalid format' % (bucket, - key) + message = 'Digest file\ts3://%s/%s\tINVALID: invalid format' % ( + bucket, + key, + ) super(InvalidDigestFormat, self).__init__(message) @@ -224,9 +250,16 @@ class DigestProvider(object): dict. This class is not responsible for validation or iterating from one digest to the next. """ - def __init__(self, s3_client_provider, account_id, trail_name, - trail_home_region, trail_source_region=None, - organization_id=None): + + def __init__( + self, + s3_client_provider, + account_id, + trail_name, + trail_home_region, + trail_source_region=None, + organization_id=None, + ): self._client_provider = s3_client_provider self.trail_name = trail_name self.account_id = account_id @@ -254,7 +287,8 @@ def load_digest_keys_in_range(self, bucket, prefix, start_date, end_date): target_start_date = format_date(normalize_date(start_date)) # Add one hour to the end_date to get logs that spilled over to next. target_end_date = format_date( - normalize_date(end_date + timedelta(hours=1))) + normalize_date(end_date + timedelta(hours=1)) + ) # Ensure digests are from the same trail. digest_key_regex = re.compile(self._create_digest_key_regex(prefix)) for key in key_filter: @@ -276,19 +310,23 @@ def fetch_digest(self, bucket, key): client = self._client_provider.get_client(bucket) result = client.get_object(Bucket=bucket, Key=key) try: - digest = zlib.decompress(result['Body'].read(), - zlib.MAX_WBITS | 16) + digest = zlib.decompress( + result['Body'].read(), zlib.MAX_WBITS | 16 + ) digest_data = json.loads(digest.decode()) except (ValueError, ZLibError): # Cannot gzip decode or JSON parse. raise InvalidDigestFormat(bucket, key) # Add the expected digest signature and algorithm to the dict. - if 'signature' not in result['Metadata'] \ - or 'signature-algorithm' not in result['Metadata']: + if ( + 'signature' not in result['Metadata'] + or 'signature-algorithm' not in result['Metadata'] + ): raise DigestSignatureError(bucket, key) digest_data['_signature'] = result['Metadata']['signature'] - digest_data['_signature_algorithm'] = \ - result['Metadata']['signature-algorithm'] + digest_data['_signature_algorithm'] = result['Metadata'][ + 'signature-algorithm' + ] return digest_data, digest def _create_digest_key(self, start_date, key_prefix): @@ -310,7 +348,7 @@ def _create_digest_key(self, start_date, key_prefix): 'ymd': date.strftime('%Y/%m/%d'), 'source_region': self.trail_source_region, 'home_region': self.trail_home_region, - 'name': self.trail_name + 'name': self.trail_name, } if self.organization_id: template += '{organization_id}/' @@ -332,7 +370,7 @@ def _create_digest_key_regex(self, key_prefix): 'account_id': re.escape(self.account_id), 'source_region': re.escape(self.trail_source_region), 'home_region': re.escape(self.trail_home_region), - 'name': re.escape(self.trail_name) + 'name': re.escape(self.trail_name), } if self.organization_id: template += '{organization_id}/' @@ -350,15 +388,29 @@ def _create_digest_key_regex(self, key_prefix): class DigestTraverser(object): """Retrieves and validates digests within a date range.""" + # These keys are required to be present before validating the contents # of a digest. - required_digest_keys = ['digestPublicKeyFingerprint', 'digestS3Bucket', - 'digestS3Object', 'previousDigestSignature', - 'digestEndTime', 'digestStartTime'] + required_digest_keys = [ + 'digestPublicKeyFingerprint', + 'digestS3Bucket', + 'digestS3Object', + 'previousDigestSignature', + 'digestEndTime', + 'digestStartTime', + ] - def __init__(self, digest_provider, starting_bucket, starting_prefix, - public_key_provider, digest_validator=None, - on_invalid=None, on_gap=None, on_missing=None): + def __init__( + self, + digest_provider, + starting_bucket, + starting_prefix, + public_key_provider, + digest_validator=None, + on_invalid=None, + on_gap=None, + on_missing=None, + ): """ :type digest_provider: DigestProvider :param digest_provider: DigestProvider object @@ -409,57 +461,92 @@ def traverse(self, start_date, end_date=None): while key and start_date <= last_start_date: try: digest, end_date = self._load_and_validate_digest( - public_keys, bucket, key) + public_keys, bucket, key + ) last_start_date = normalize_date( - parse_date(digest['digestStartTime'])) + parse_date(digest['digestStartTime']) + ) previous_bucket = digest.get('previousDigestS3Bucket', None) yield digest if previous_bucket is None: # The chain is broken, so find next in digest store. key, end_date = self._find_next_digest( - digests=digests, bucket=bucket, last_key=key, - last_start_date=last_start_date, cb=self._on_gap, - is_cb_conditional=True) + digests=digests, + bucket=bucket, + last_key=key, + last_start_date=last_start_date, + cb=self._on_gap, + is_cb_conditional=True, + ) else: key = digest['previousDigestS3Object'] if previous_bucket != bucket: bucket = previous_bucket # The bucket changed so reload the digest list. digests = self._load_digests( - bucket, prefix, start_date, end_date) + bucket, prefix, start_date, end_date + ) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise e key, end_date = self._find_next_digest( - digests=digests, bucket=bucket, last_key=key, - last_start_date=last_start_date, cb=self._on_missing, - message=str(e)) + digests=digests, + bucket=bucket, + last_key=key, + last_start_date=last_start_date, + cb=self._on_missing, + message=str(e), + ) except DigestError as e: key, end_date = self._find_next_digest( - digests=digests, bucket=bucket, last_key=key, - last_start_date=last_start_date, cb=self._on_invalid, - message=str(e)) + digests=digests, + bucket=bucket, + last_key=key, + last_start_date=last_start_date, + cb=self._on_invalid, + message=str(e), + ) except Exception as e: # Any other unexpected errors. key, end_date = self._find_next_digest( - digests=digests, bucket=bucket, last_key=key, - last_start_date=last_start_date, cb=self._on_invalid, + digests=digests, + bucket=bucket, + last_key=key, + last_start_date=last_start_date, + cb=self._on_invalid, message='Digest file\ts3://%s/%s\tINVALID: %s' - % (bucket, key, str(e))) + % (bucket, key, str(e)), + ) def _load_digests(self, bucket, prefix, start_date, end_date): return self.digest_provider.load_digest_keys_in_range( - bucket=bucket, prefix=prefix, - start_date=start_date, end_date=end_date) + bucket=bucket, + prefix=prefix, + start_date=start_date, + end_date=end_date, + ) - def _find_next_digest(self, digests, bucket, last_key, last_start_date, - cb=None, is_cb_conditional=False, message=None): + def _find_next_digest( + self, + digests, + bucket, + last_key, + last_start_date, + cb=None, + is_cb_conditional=False, + message=None, + ): """Finds the next digest in the bucket and invokes any callback.""" next_key, next_end_date = self._get_last_digest(digests, last_key) if cb and (not is_cb_conditional or next_key): - cb(bucket=bucket, next_key=next_key, last_key=last_key, - next_end_date=next_end_date, last_start_date=last_start_date, - message=message) + cb( + bucket=bucket, + next_key=next_key, + last_key=last_key, + next_end_date=next_end_date, + last_start_date=last_start_date, + message=message, + ) return next_key, next_end_date def _get_last_digest(self, digests, before_key=None): @@ -474,14 +561,16 @@ def _get_last_digest(self, digests, before_key=None): elif before_key is None: next_key = digests.pop() next_key_date = normalize_date( - parse_date(extract_digest_key_date(next_key))) + parse_date(extract_digest_key_date(next_key)) + ) return next_key, next_key_date # find a key before the given key. before_key_date = parse_date(extract_digest_key_date(before_key)) while digests: next_key = digests.pop() next_key_date = normalize_date( - parse_date(extract_digest_key_date(next_key))) + parse_date(extract_digest_key_date(next_key)) + ) if next_key_date < before_key_date: LOG.debug("Next found key: %s", next_key) return next_key, next_key_date @@ -499,33 +588,51 @@ def _load_and_validate_digest(self, public_keys, bucket, key): if required_key not in digest_data: raise InvalidDigestFormat(bucket, key) # Ensure the bucket and key are the same as what's expected. - if digest_data['digestS3Bucket'] != bucket \ - or digest_data['digestS3Object'] != key: + if ( + digest_data['digestS3Bucket'] != bucket + or digest_data['digestS3Object'] != key + ): raise DigestError( - ('Digest file\ts3://%s/%s\tINVALID: has been moved from its ' - 'original location') % (bucket, key)) + ( + 'Digest file\ts3://%s/%s\tINVALID: has been moved from its ' + 'original location' + ) + % (bucket, key) + ) # Get the public keys in the given time range. fingerprint = digest_data['digestPublicKeyFingerprint'] if fingerprint not in public_keys: raise DigestError( - ('Digest file\ts3://%s/%s\tINVALID: public key not found in ' - 'region %s for fingerprint %s') % - (bucket, key, self.digest_provider.trail_home_region, - fingerprint)) + ( + 'Digest file\ts3://%s/%s\tINVALID: public key not found in ' + 'region %s for fingerprint %s' + ) + % ( + bucket, + key, + self.digest_provider.trail_home_region, + fingerprint, + ) + ) public_key_hex = public_keys[fingerprint]['Value'] self._digest_validator.validate( - bucket, key, public_key_hex, digest_data, digest) + bucket, key, public_key_hex, digest_data, digest + ) end_date = normalize_date(parse_date(digest_data['digestEndTime'])) return digest_data, end_date def _load_public_keys(self, start_date, end_date): public_keys = self._public_key_provider.get_public_keys( - start_date, end_date) + start_date, end_date + ) if not public_keys: raise RuntimeError( - 'No public keys found between %s and %s' % - (format_display_date(start_date), - format_display_date(end_date))) + 'No public keys found between %s and %s' + % ( + format_display_date(start_date), + format_display_date(end_date), + ) + ) return public_keys @@ -554,17 +661,20 @@ def validate(self, bucket, key, public_key, digest_data, inflated_digest): public_key = RSA.new_public_key_from_der_data(decoded_key) except RuntimeError: raise DigestError( - ('Digest file\ts3://%s/%s\tINVALID: Unable to load PKCS #1 key' - ' with fingerprint %s') - % (bucket, key, digest_data['digestPublicKeyFingerprint'])) + ( + 'Digest file\ts3://%s/%s\tINVALID: Unable to load PKCS #1 key' + ' with fingerprint %s' + ) + % (bucket, key, digest_data['digestPublicKeyFingerprint']) + ) to_sign = self._create_string_to_sign(digest_data, inflated_digest) signature_bytes = binascii.unhexlify(digest_data['_signature']) result = public_key.verify( - signature_algorithm=RSASignatureAlgorithm.PKCS1_5_SHA256, - digest=hashlib.sha256(to_sign).digest(), - signature=signature_bytes + signature_algorithm=RSASignatureAlgorithm.PKCS1_5_SHA256, + digest=hashlib.sha256(to_sign).digest(), + signature=signature_bytes, ) if not result: # The previous implementation caught a cryptography.exceptions.InvalidSignature @@ -582,7 +692,8 @@ def _create_string_to_sign(self, digest_data, inflated_digest): digest_data['digestS3Bucket'], digest_data['digestS3Object'], hashlib.sha256(inflated_digest).hexdigest(), - previous_signature) + previous_signature, + ) LOG.debug('Digest string to sign: %s', string_to_sign) return string_to_sign.encode() @@ -591,6 +702,7 @@ class CloudTrailValidateLogs(BasicCommand): """ Validates log digests and log files, optionally saving them to disk. """ + NAME = 'validate-logs' DESCRIPTION = """ Validates CloudTrail logs for a given period of time. @@ -637,34 +749,67 @@ class CloudTrailValidateLogs(BasicCommand): """ ARG_TABLE = [ - {'name': 'trail-arn', 'required': True, 'cli_type_name': 'string', - 'help_text': 'Specifies the ARN of the trail to be validated'}, - {'name': 'start-time', 'required': True, 'cli_type_name': 'string', - 'help_text': ('Specifies that log files delivered on or after the ' - 'specified UTC timestamp value will be validated. ' - 'Example: "2015-01-08T05:21:42Z".')}, - {'name': 'end-time', 'cli_type_name': 'string', - 'help_text': ('Optionally specifies that log files delivered on or ' - 'before the specified UTC timestamp value will be ' - 'validated. The default value is the current time. ' - 'Example: "2015-01-08T12:31:41Z".')}, - {'name': 's3-bucket', 'cli_type_name': 'string', - 'help_text': ('Optionally specifies the S3 bucket where the digest ' - 'files are stored. If a bucket name is not specified, ' - 'the CLI will retrieve it by calling describe_trails')}, - {'name': 's3-prefix', 'cli_type_name': 'string', - 'help_text': ('Optionally specifies the optional S3 prefix where the ' - 'digest files are stored. If not specified, the CLI ' - 'will determine the prefix automatically by calling ' - 'describe_trails.')}, - {'name': 'account-id', 'cli_type_name': 'string', - 'help_text': ('Optionally specifies the account for validating logs. ' - 'This parameter is needed for organization trails ' - 'for validating logs for specific account inside an ' - 'organization')}, - {'name': 'verbose', 'cli_type_name': 'boolean', - 'action': 'store_true', - 'help_text': 'Display verbose log validation information'} + { + 'name': 'trail-arn', + 'required': True, + 'cli_type_name': 'string', + 'help_text': 'Specifies the ARN of the trail to be validated', + }, + { + 'name': 'start-time', + 'required': True, + 'cli_type_name': 'string', + 'help_text': ( + 'Specifies that log files delivered on or after the ' + 'specified UTC timestamp value will be validated. ' + 'Example: "2015-01-08T05:21:42Z".' + ), + }, + { + 'name': 'end-time', + 'cli_type_name': 'string', + 'help_text': ( + 'Optionally specifies that log files delivered on or ' + 'before the specified UTC timestamp value will be ' + 'validated. The default value is the current time. ' + 'Example: "2015-01-08T12:31:41Z".' + ), + }, + { + 'name': 's3-bucket', + 'cli_type_name': 'string', + 'help_text': ( + 'Optionally specifies the S3 bucket where the digest ' + 'files are stored. If a bucket name is not specified, ' + 'the CLI will retrieve it by calling describe_trails' + ), + }, + { + 'name': 's3-prefix', + 'cli_type_name': 'string', + 'help_text': ( + 'Optionally specifies the optional S3 prefix where the ' + 'digest files are stored. If not specified, the CLI ' + 'will determine the prefix automatically by calling ' + 'describe_trails.' + ), + }, + { + 'name': 'account-id', + 'cli_type_name': 'string', + 'help_text': ( + 'Optionally specifies the account for validating logs. ' + 'This parameter is needed for organization trails ' + 'for validating logs for specific account inside an ' + 'organization' + ), + }, + { + 'name': 'verbose', + 'cli_type_name': 'boolean', + 'action': 'store_true', + 'help_text': 'Display verbose log validation information', + }, ] def __init__(self, session): @@ -722,26 +867,36 @@ def setup_services(self, parsed_globals): self._source_region = parsed_globals.region # Use the the same region as the region of the CLI to get locations. self.s3_client_provider = S3ClientProvider( - self._session, self._source_region) - client_args = {'region_name': parsed_globals.region, - 'verify': parsed_globals.verify_ssl} + self._session, self._source_region + ) + client_args = { + 'region_name': parsed_globals.region, + 'verify': parsed_globals.verify_ssl, + } self.organization_client = self._session.create_client( - 'organizations', **client_args) + 'organizations', **client_args + ) if parsed_globals.endpoint_url is not None: client_args['endpoint_url'] = parsed_globals.endpoint_url self.cloudtrail_client = self._session.create_client( - 'cloudtrail', **client_args) + 'cloudtrail', **client_args + ) def _call(self): traverser = create_digest_traverser( - trail_arn=self.trail_arn, cloudtrail_client=self.cloudtrail_client, + trail_arn=self.trail_arn, + cloudtrail_client=self.cloudtrail_client, organization_client=self.organization_client, trail_source_region=self._source_region, - s3_client_provider=self.s3_client_provider, bucket=self.s3_bucket, - prefix=self.s3_prefix, on_missing=self._on_missing_digest, - on_invalid=self._on_invalid_digest, on_gap=self._on_digest_gap, - account_id=self.account_id) + s3_client_provider=self.s3_client_provider, + bucket=self.s3_bucket, + prefix=self.s3_prefix, + on_missing=self._on_missing_digest, + on_invalid=self._on_invalid_digest, + on_gap=self._on_digest_gap, + account_id=self.account_id, + ) self._write_startup_text() digests = traverser.traverse(self.start_time, self.end_time) for digest in digests: @@ -751,7 +906,8 @@ def _call(self): self._valid_digests += 1 self._write_status( 'Digest file\ts3://%s/%s\tvalid' - % (digest['digestS3Bucket'], digest['digestS3Object'])) + % (digest['digestS3Bucket'], digest['digestS3Object']) + ) if not digest['logFiles']: continue for log in digest['logFiles']: @@ -771,12 +927,13 @@ def _track_found_times(self, digest): self._found_end_time = min(digest_end_time, self.end_time) def _download_log(self, log): - """ Download a log, decompress, and compare SHA256 checksums""" + """Download a log, decompress, and compare SHA256 checksums""" try: # Create a client that can work with this bucket. client = self.s3_client_provider.get_client(log['s3Bucket']) response = client.get_object( - Bucket=log['s3Bucket'], Key=log['s3Object']) + Bucket=log['s3Bucket'], Key=log['s3Object'] + ) gzip_inflater = zlib.decompressobj(zlib.MAX_WBITS | 16) rolling_hash = hashlib.sha256() for chunk in iter(lambda: response['Body'].read(2048), b""): @@ -790,8 +947,12 @@ def _download_log(self, log): self._on_log_invalid(log) else: self._valid_logs += 1 - self._write_status(('Log file\ts3://%s/%s\tvalid' - % (log['s3Bucket'], log['s3Object']))) + self._write_status( + ( + 'Log file\ts3://%s/%s\tvalid' + % (log['s3Bucket'], log['s3Object']) + ) + ) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise @@ -813,24 +974,36 @@ def _write_status(self, message, is_error=False): def _write_startup_text(self): sys.stdout.write( 'Validating log files for trail %s between %s and %s\n\n' - % (self.trail_arn, format_display_date(self.start_time), - format_display_date(self.end_time))) + % ( + self.trail_arn, + format_display_date(self.start_time), + format_display_date(self.end_time), + ) + ) def _write_summary_text(self): if not self._is_last_status_double_space: sys.stdout.write('\n') - sys.stdout.write('Results requested for %s to %s\n' - % (format_display_date(self.start_time), - format_display_date(self.end_time))) + sys.stdout.write( + 'Results requested for %s to %s\n' + % ( + format_display_date(self.start_time), + format_display_date(self.end_time), + ) + ) if not self._valid_digests and not self._invalid_digests: sys.stdout.write('No digests found\n') return if not self._found_start_time or not self._found_end_time: sys.stdout.write('No valid digests found in range\n') else: - sys.stdout.write('Results found for %s to %s:\n' - % (format_display_date(self._found_start_time), - format_display_date(self._found_end_time))) + sys.stdout.write( + 'Results found for %s to %s:\n' + % ( + format_display_date(self._found_start_time), + format_display_date(self._found_end_time), + ) + ) self._write_ratio(self._valid_digests, self._invalid_digests, 'digest') self._write_ratio(self._valid_logs, self._invalid_logs, 'log') sys.stdout.write('\n') @@ -840,19 +1013,26 @@ def _write_ratio(self, valid, invalid, name): if total > 0: sys.stdout.write('\n%d/%d %s files valid' % (valid, total, name)) if invalid > 0: - sys.stdout.write(', %d/%d %s files INVALID' % (invalid, total, - name)) + sys.stdout.write( + ', %d/%d %s files INVALID' % (invalid, total, name) + ) def _on_missing_digest(self, bucket, last_key, **kwargs): self._invalid_digests += 1 - self._write_status('Digest file\ts3://%s/%s\tINVALID: not found' - % (bucket, last_key), True) + self._write_status( + 'Digest file\ts3://%s/%s\tINVALID: not found' % (bucket, last_key), + True, + ) def _on_digest_gap(self, **kwargs): self._write_status( 'No log files were delivered by CloudTrail between %s and %s' - % (format_display_date(kwargs['next_end_date']), - format_display_date(kwargs['last_start_date'])), True) + % ( + format_display_date(kwargs['next_end_date']), + format_display_date(kwargs['last_start_date']), + ), + True, + ) def _on_invalid_digest(self, message, **kwargs): self._invalid_digests += 1 @@ -861,17 +1041,25 @@ def _on_invalid_digest(self, message, **kwargs): def _on_invalid_log_format(self, log_data): self._invalid_logs += 1 self._write_status( - ('Log file\ts3://%s/%s\tINVALID: invalid format' - % (log_data['s3Bucket'], log_data['s3Object'])), True) + ( + 'Log file\ts3://%s/%s\tINVALID: invalid format' + % (log_data['s3Bucket'], log_data['s3Object']) + ), + True, + ) def _on_log_invalid(self, log_data): self._invalid_logs += 1 self._write_status( "Log file\ts3://%s/%s\tINVALID: hash value doesn't match" - % (log_data['s3Bucket'], log_data['s3Object']), True) + % (log_data['s3Bucket'], log_data['s3Object']), + True, + ) def _on_missing_log(self, log_data): self._invalid_logs += 1 self._write_status( 'Log file\ts3://%s/%s\tINVALID: not found' - % (log_data['s3Bucket'], log_data['s3Object']), True) + % (log_data['s3Bucket'], log_data['s3Object']), + True, + ) diff --git a/awscli/customizations/cloudtrail/verifyqueryresults.py b/awscli/customizations/cloudtrail/verifyqueryresults.py index ba23329f234d..6b1a45b4ea67 100644 --- a/awscli/customizations/cloudtrail/verifyqueryresults.py +++ b/awscli/customizations/cloudtrail/verifyqueryresults.py @@ -1,17 +1,19 @@ import base64 import binascii -import json import hashlib +import json import sys from abc import ABC, abstractmethod from os import path -from awscli.customizations.exceptions import ParamValidationError - from awscrt.crypto import RSA, RSASignatureAlgorithm +from awscli.customizations.cloudtrail.utils import ( + PublicKeyProvider, + parse_date, +) from awscli.customizations.commands import BasicCommand -from awscli.customizations.cloudtrail.utils import parse_date, PublicKeyProvider +from awscli.customizations.exceptions import ParamValidationError SIGN_FILE_NAME = "result_sign.json" @@ -65,8 +67,10 @@ def validate(self, public_key_base64, sign_file): signature_bytes = binascii.unhexlify(sign_file["hashSignature"]) result = public_key.verify( signature_algorithm=RSASignatureAlgorithm.PKCS1_5_SHA256, - digest=hashlib.sha256(self._create_string_to_sign(sign_file)).digest(), - signature=signature_bytes + digest=hashlib.sha256( + self._create_string_to_sign(sign_file) + ).digest(), + signature=signature_bytes, ) if not result: # The previous implementation caught a cryptography.exceptions.InvalidSignature @@ -187,7 +191,6 @@ def __init__( s3_bucket=None, s3_path_prefix=None, ): - self._s3_client = s3_client self._s3_bucket = s3_bucket self._s3_path_prefix = s3_path_prefix @@ -197,9 +200,13 @@ def validate_export_files(self, sign_file): for file_info in sign_file["files"]: key = self._s3_path_prefix + file_info["fileName"] - response = self._s3_client.get_object(Bucket=self._s3_bucket, Key=key) + response = self._s3_client.get_object( + Bucket=self._s3_bucket, Key=key + ) self._validate_hash_value( - response["Body"], file_info["fileName"], file_info["fileHashValue"] + response["Body"], + file_info["fileName"], + file_info["fileHashValue"], ) @@ -218,7 +225,9 @@ def validate_export_files(self, sign_file): path.join(self.local_path_prefix, file_info["fileName"]), "rb" ) as export_file: self._validate_hash_value( - export_file, file_info["fileName"], file_info["fileHashValue"] + export_file, + file_info["fileName"], + file_info["fileHashValue"], ) @@ -250,7 +259,7 @@ class CloudTrailVerifyQueryResult(BasicCommand): CloudTrail delivered them. .. note:: - For verify export file from S3, this command requires that the user or + For verify export file from S3, this command requires that the user or role executing the command has permission to call GetObject, and GetBucketLocation for the bucket that store the export file. """ @@ -358,7 +367,9 @@ def _call(self, args, parsed_globals): ) signature_validator.validate(public_key, sign_file) self._return_code = 0 - sys.stdout.write("Successfully validated sign and query result files\n") + sys.stdout.write( + "Successfully validated sign and query result files\n" + ) def _initialize_components( self, diff --git a/awscli/customizations/codeartifact/login.py b/awscli/customizations/codeartifact/login.py index 884c31d9f5c5..6376bf55540d 100644 --- a/awscli/customizations/codeartifact/login.py +++ b/awscli/customizations/codeartifact/login.py @@ -1,26 +1,23 @@ import errno import os import platform -import sys -import subprocess import re - +import subprocess +import sys from configparser import RawConfigParser +from datetime import datetime from io import StringIO from urllib.parse import urlsplit -from datetime import datetime -from dateutil.tz import tzutc from dateutil.relativedelta import relativedelta -from botocore.utils import parse_timestamp +from dateutil.tz import tzutc -from awscli.compat import ( - is_windows, urlparse, get_stderr_encoding, is_macos -) +from awscli.compat import get_stderr_encoding, is_macos, is_windows, urlparse from awscli.customizations import utils as cli_utils from awscli.customizations.commands import BasicCommand -from awscli.utils import original_ld_library_path from awscli.customizations.utils import uni_print +from awscli.utils import original_ld_library_path +from botocore.utils import parse_timestamp def get_relative_expiration_time(remaining): @@ -45,7 +42,7 @@ class CommandFailedError(Exception): def __init__(self, called_process_error, auth_token): msg = str(called_process_error).replace(auth_token, '******') if called_process_error.stderr is not None: - msg +=( + msg += ( f' Stderr from command:\n' f'{called_process_error.stderr.decode(get_stderr_encoding())}' ) @@ -55,8 +52,16 @@ def __init__(self, called_process_error, auth_token): class BaseLogin: _TOOL_NOT_FOUND_MESSAGE = '%s was not found. Please verify installation.' - def __init__(self, auth_token, expiration, repository_endpoint, - domain, repository, subprocess_utils, namespace=None): + def __init__( + self, + auth_token, + expiration, + repository_endpoint, + domain, + repository, + subprocess_utils, + namespace=None, + ): self.auth_token = auth_token self.expiration = expiration self.repository_endpoint = repository_endpoint @@ -79,15 +84,22 @@ def _write_success_message(self, tool): # for some corner case # e.g. 11 hours 59 minutes 31 seconds should output --> 12 hours. remaining = relativedelta( - self.expiration, datetime.now(tzutc())) + relativedelta(seconds=30) + self.expiration, datetime.now(tzutc()) + ) + relativedelta(seconds=30) expiration_message = get_relative_expiration_time(remaining) - sys.stdout.write('Successfully configured {} to use ' - 'AWS CodeArtifact repository {} ' - .format(tool, self.repository_endpoint)) + sys.stdout.write( + 'Successfully configured {} to use ' + 'AWS CodeArtifact repository {} '.format( + tool, self.repository_endpoint + ) + ) sys.stdout.write(os.linesep) - sys.stdout.write('Login expires in {} at {}'.format( - expiration_message, self.expiration)) + sys.stdout.write( + 'Login expires in {} at {}'.format( + expiration_message, self.expiration + ) + ) sys.stdout.write(os.linesep) def _run_commands(self, tool, commands, dry_run=False): @@ -113,9 +125,7 @@ def _run_command(self, tool, command, *, ignore_errors=False): raise CommandFailedError(ex, self.auth_token) except OSError as ex: if ex.errno == errno.ENOENT: - raise ValueError( - self._TOOL_NOT_FOUND_MESSAGE % tool - ) + raise ValueError(self._TOOL_NOT_FOUND_MESSAGE % tool) raise ex @classmethod @@ -124,18 +134,15 @@ def get_commands(cls, endpoint, auth_token, **kwargs): class SwiftLogin(BaseLogin): + DEFAULT_NETRC_FMT = 'machine {hostname} login token password {auth_token}' - DEFAULT_NETRC_FMT = \ - u'machine {hostname} login token password {auth_token}' - - NETRC_REGEX_FMT = \ - r'(?P\bmachine\s+{escaped_hostname}\s+login\s+\S+\s+password\s+)' \ + NETRC_REGEX_FMT = ( + r'(?P\bmachine\s+{escaped_hostname}\s+login\s+\S+\s+password\s+)' r'(?P\S+)' + ) def login(self, dry_run=False): - scope = self.get_scope( - self.namespace - ) + scope = self.get_scope(self.namespace) commands = self.get_commands( self.repository_endpoint, self.auth_token, scope=scope ) @@ -143,13 +150,14 @@ def login(self, dry_run=False): if not is_macos: hostname = urlparse.urlparse(self.repository_endpoint).hostname new_entry = self.DEFAULT_NETRC_FMT.format( - hostname=hostname, - auth_token=self.auth_token + hostname=hostname, auth_token=self.auth_token ) if dry_run: self._display_new_netrc_entry(new_entry, self.get_netrc_path()) else: - self._update_netrc_entry(hostname, new_entry, self.get_netrc_path()) + self._update_netrc_entry( + hostname, new_entry, self.get_netrc_path() + ) self._run_commands('swift', commands, dry_run) @@ -171,7 +179,7 @@ def _display_new_netrc_entry(self, new_entry, netrc_path): def _update_netrc_entry(self, hostname, new_entry, netrc_path): pattern = re.compile( self.NETRC_REGEX_FMT.format(escaped_hostname=re.escape(hostname)), - re.M + re.M, ) if not os.path.isfile(netrc_path): self._create_netrc_file(netrc_path, new_entry) @@ -180,13 +188,13 @@ def _update_netrc_entry(self, hostname, new_entry, netrc_path): contents = f.read() escaped_auth_token = self.auth_token.replace('\\', r'\\') new_contents = re.sub( - pattern, - rf"\g{escaped_auth_token}", - contents + pattern, rf"\g{escaped_auth_token}", contents ) if new_contents == contents: - new_contents = self._append_netrc_entry(new_contents, new_entry) + new_contents = self._append_netrc_entry( + new_contents, new_entry + ) with open(netrc_path, 'w') as f: f.write(new_contents) @@ -195,8 +203,9 @@ def _create_netrc_file(self, netrc_path, new_entry): dirname = os.path.split(netrc_path)[0] if not os.path.isdir(dirname): os.makedirs(dirname) - with os.fdopen(os.open(netrc_path, - os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: + with os.fdopen( + os.open(netrc_path, os.O_WRONLY | os.O_CREAT, 0o600), 'w' + ) as f: f.write(new_entry + '\n') def _append_netrc_entry(self, contents, new_entry): @@ -234,9 +243,7 @@ def get_commands(cls, endpoint, auth_token, **kwargs): scope = kwargs.get('scope') # Set up the codeartifact repository as the swift registry. - set_registry_command = [ - 'swift', 'package-registry', 'set', endpoint - ] + set_registry_command = ['swift', 'package-registry', 'set', endpoint] if scope is not None: set_registry_command.extend(['--scope', scope]) commands.append(set_registry_command) @@ -245,7 +252,10 @@ def get_commands(cls, endpoint, auth_token, **kwargs): # We will write token to .netrc for Linux and Windows # MacOS will store the token from command line option to Keychain login_registry_command = [ - 'swift', 'package-registry', 'login', f'{endpoint}login' + 'swift', + 'package-registry', + 'login', + f'{endpoint}login', ] if is_macos: login_registry_command.extend(['--token', auth_token]) @@ -293,7 +303,9 @@ def login(self, dry_run=False): ) source_configured_message = self._SOURCE_UPDATED_MESSAGE else: - command = self._get_configure_command('add', nuget_index_url, source_name) + command = self._get_configure_command( + 'add', nuget_index_url, source_name + ) source_configured_message = self._SOURCE_ADDED_MESSAGE if dry_run: @@ -305,9 +317,7 @@ def login(self, dry_run=False): try: with original_ld_library_path(): self.subprocess_utils.run( - command, - capture_output=True, - check=True + command, capture_output=True, check=True ) except subprocess.CalledProcessError as e: uni_print('Failed to update the NuGet.Config\n') @@ -339,8 +349,7 @@ def _get_source_to_url_dict(self): """ with original_ld_library_path(): response = self.subprocess_utils.check_output( - self._get_list_command(), - stderr=self.subprocess_utils.PIPE + self._get_list_command(), stderr=self.subprocess_utils.PIPE ) lines = response.decode(os.device_encoding(1) or "utf-8").splitlines() @@ -350,8 +359,9 @@ def _get_source_to_url_dict(self): for i in range(len(lines)): result = self._SOURCE_REGEX.match(lines[i].strip()) if result: - source_to_url_dict[result["source_name"].strip()] = \ - lines[i + 1].strip() + source_to_url_dict[result["source_name"].strip()] = lines[ + i + 1 + ].strip() return source_to_url_dict @@ -387,7 +397,6 @@ def _get_configure_command(self, operation, nuget_index_url, source_name): class NuGetLogin(NuGetBaseLogin): - def _get_tool_name(self): return 'nuget' @@ -396,16 +405,21 @@ def _get_list_command(self): def _get_configure_command(self, operation, nuget_index_url, source_name): return [ - 'nuget', 'sources', operation, - '-name', source_name, - '-source', nuget_index_url, - '-username', 'aws', - '-password', self.auth_token + 'nuget', + 'sources', + operation, + '-name', + source_name, + '-source', + nuget_index_url, + '-username', + 'aws', + '-password', + self.auth_token, ] class DotNetLogin(NuGetBaseLogin): - def _get_tool_name(self): return 'dotnet' @@ -422,10 +436,7 @@ def _get_configure_command(self, operation, nuget_index_url, source_name): command.append(source_name) command += ['--source', nuget_index_url] - command += [ - '--username', 'aws', - '--password', self.auth_token - ] + command += ['--username', 'aws', '--password', self.auth_token] # Encryption is not supported on non-Windows platforms. if not is_windows: @@ -435,15 +446,12 @@ def _get_configure_command(self, operation, nuget_index_url, source_name): class NpmLogin(BaseLogin): - # On Windows we need to be explicit about the .cmd file to execute # (unless we execute through the shell, i.e. with shell=True). NPM_CMD = 'npm.cmd' if platform.system().lower() == 'windows' else 'npm' def login(self, dry_run=False): - scope = self.get_scope( - self.namespace - ) + scope = self.get_scope(self.namespace) commands = self.get_commands( self.repository_endpoint, self.auth_token, scope=scope ) @@ -484,9 +492,7 @@ def get_commands(cls, endpoint, auth_token, **kwargs): registry = '{}:registry'.format(scope) if scope else 'registry' # set up the codeartifact repository as the npm registry. - commands.append( - [cls.NPM_CMD, 'config', 'set', registry, endpoint] - ) + commands.append([cls.NPM_CMD, 'config', 'set', registry, endpoint]) repo_uri = urlsplit(endpoint) @@ -510,13 +516,10 @@ def get_commands(cls, endpoint, auth_token, **kwargs): class PipLogin(BaseLogin): - PIP_INDEX_URL_FMT = '{scheme}://aws:{auth_token}@{netloc}{path}simple/' def login(self, dry_run=False): - commands = self.get_commands( - self.repository_endpoint, self.auth_token - ) + commands = self.get_commands(self.repository_endpoint, self.auth_token) self._run_commands('pip', commands, dry_run) @classmethod @@ -526,14 +529,13 @@ def get_commands(cls, endpoint, auth_token, **kwargs): scheme=repo_uri.scheme, auth_token=auth_token, netloc=repo_uri.netloc, - path=repo_uri.path + path=repo_uri.path, ) return [['pip', 'config', 'set', 'global.index-url', pip_index_url]] class TwineLogin(BaseLogin): - DEFAULT_PYPI_RC_FMT = '''\ [distutils] index-servers= @@ -553,14 +555,19 @@ def __init__( domain, repository, subprocess_utils, - pypi_rc_path=None + pypi_rc_path=None, ): if pypi_rc_path is None: pypi_rc_path = self.get_pypi_rc_path() self.pypi_rc_path = pypi_rc_path super().__init__( - auth_token, expiration, repository_endpoint, - domain, repository, subprocess_utils) + auth_token, + expiration, + repository_endpoint, + domain, + repository, + subprocess_utils, + ) @classmethod def get_commands(cls, endpoint, auth_token, **kwargs): @@ -577,8 +584,7 @@ def get_commands(cls, endpoint, auth_token, **kwargs): pypi_rc_path = kwargs['pypi_rc_path'] default_pypi_rc = cls.DEFAULT_PYPI_RC_FMT.format( - repository_endpoint=endpoint, - auth_token=auth_token + repository_endpoint=endpoint, auth_token=auth_token ) pypi_rc = RawConfigParser() @@ -624,7 +630,7 @@ def login(self, dry_run=False): pypi_rc_str = self.get_commands( self.repository_endpoint, self.auth_token, - pypi_rc_path=self.pypi_rc_path + pypi_rc_path=self.pypi_rc_path, ) if dry_run: @@ -681,7 +687,7 @@ class CodeArtifactLogin(BasicCommand): 'package_format': 'pypi', 'login_cls': TwineLogin, 'namespace_support': False, - } + }, } NAME = 'login' @@ -707,7 +713,7 @@ class CodeArtifactLogin(BasicCommand): { 'name': 'domain-owner', 'help_text': 'The AWS account ID that owns your CodeArtifact ' - 'domain', + 'domain', 'required': False, }, { @@ -719,7 +725,7 @@ class CodeArtifactLogin(BasicCommand): 'name': 'duration-seconds', 'cli_type_name': 'integer', 'help_text': 'The time, in seconds, that the login information ' - 'is valid', + 'is valid', 'required': False, }, { @@ -730,17 +736,17 @@ class CodeArtifactLogin(BasicCommand): { 'name': 'endpoint-type', 'help_text': 'The type of endpoint you want the tool to interact with', - 'required': False + 'required': False, }, { 'name': 'dry-run', 'action': 'store_true', 'help_text': 'Only print the commands that would be executed ' - 'to connect your tool with your repository without ' - 'making any changes to your configuration. Note that ' - 'this prints the unredacted auth token as part of the output', + 'to connect your tool with your repository without ' + 'making any changes to your configuration. Note that ' + 'this prints the unredacted auth token as part of the output', 'required': False, - 'default': False + 'default': False, }, ] @@ -760,30 +766,30 @@ def _get_repository_endpoint( kwargs = { 'domain': parsed_args.domain, 'repository': parsed_args.repository, - 'format': package_format + 'format': package_format, } if parsed_args.endpoint_type: kwargs['endpointType'] = parsed_args.endpoint_type if parsed_args.domain_owner: kwargs['domainOwner'] = parsed_args.domain_owner - get_repository_endpoint_response = \ + get_repository_endpoint_response = ( codeartifact_client.get_repository_endpoint(**kwargs) + ) return get_repository_endpoint_response['repositoryEndpoint'] def _get_authorization_token(self, codeartifact_client, parsed_args): - kwargs = { - 'domain': parsed_args.domain - } + kwargs = {'domain': parsed_args.domain} if parsed_args.domain_owner: kwargs['domainOwner'] = parsed_args.domain_owner if parsed_args.duration_seconds: kwargs['durationSeconds'] = parsed_args.duration_seconds - get_authorization_token_response = \ + get_authorization_token_response = ( codeartifact_client.get_authorization_token(**kwargs) + ) return get_authorization_token_response @@ -811,8 +817,13 @@ def _run_main(self, parsed_args, parsed_globals): auth_token = auth_token_res['authorizationToken'] expiration = parse_timestamp(auth_token_res['expiration']) login = self.TOOL_MAP[tool]['login_cls']( - auth_token, expiration, repository_endpoint, - domain, repository, subprocess, namespace + auth_token, + expiration, + repository_endpoint, + domain, + repository, + subprocess, + namespace, ) login.login(parsed_args.dry_run) diff --git a/awscli/customizations/codecommit.py b/awscli/customizations/codecommit.py index 6b30e834f67f..8901c6856ea8 100644 --- a/awscli/customizations/codecommit.py +++ b/awscli/customizations/codecommit.py @@ -11,18 +11,18 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import datetime +import fileinput +import logging import os import re import sys -import logging -import fileinput -import datetime +from awscli.compat import NonTranslatedStdout +from awscli.customizations.commands import BasicCommand from botocore.auth import SigV4Auth from botocore.awsrequest import AWSRequest from botocore.compat import urlsplit -from awscli.customizations.commands import BasicCommand -from awscli.compat import NonTranslatedStdout logger = logging.getLogger('botocore.credentials') @@ -43,9 +43,10 @@ def inject_commands(command_table, session, **kwargs): class CodeCommitNoOpStoreCommand(BasicCommand): NAME = 'store' - DESCRIPTION = ('This operation does nothing, credentials' - ' are calculated each time') - SYNOPSIS = ('aws codecommit credential-helper store') + DESCRIPTION = ( + 'This operation does nothing, credentials' ' are calculated each time' + ) + SYNOPSIS = 'aws codecommit credential-helper store' EXAMPLES = '' _UNDOCUMENTED = True @@ -55,9 +56,10 @@ def _run_main(self, args, parsed_globals): class CodeCommitNoOpEraseCommand(BasicCommand): NAME = 'erase' - DESCRIPTION = ('This operation does nothing, no credentials' - ' are ever stored') - SYNOPSIS = ('aws codecommit credential-helper erase') + DESCRIPTION = ( + 'This operation does nothing, no credentials' ' are ever stored' + ) + SYNOPSIS = 'aws codecommit credential-helper erase' EXAMPLES = '' _UNDOCUMENTED = True @@ -67,16 +69,20 @@ def _run_main(self, args, parsed_globals): class CodeCommitGetCommand(BasicCommand): NAME = 'get' - DESCRIPTION = ('get a username SigV4 credential pair' - ' based on protocol, host and path provided' - ' from standard in. This is primarily' - ' called by git to generate credentials to' - ' authenticate against AWS CodeCommit') - SYNOPSIS = ('aws codecommit credential-helper get') - EXAMPLES = (r'echo -e "protocol=https\\n' - r'path=/v1/repos/myrepo\\n' - 'host=git-codecommit.us-east-1.amazonaws.com"' - ' | aws codecommit credential-helper get') + DESCRIPTION = ( + 'get a username SigV4 credential pair' + ' based on protocol, host and path provided' + ' from standard in. This is primarily' + ' called by git to generate credentials to' + ' authenticate against AWS CodeCommit' + ) + SYNOPSIS = 'aws codecommit credential-helper get' + EXAMPLES = ( + r'echo -e "protocol=https\\n' + r'path=/v1/repos/myrepo\\n' + 'host=git-codecommit.us-east-1.amazonaws.com"' + ' | aws codecommit credential-helper get' + ) ARG_TABLE = [ { 'name': 'ignore-host-check', @@ -86,18 +92,20 @@ class CodeCommitGetCommand(BasicCommand): 'help_text': ( 'Optional. Generate credentials regardless of whether' ' the domain is an Amazon domain.' - ) - } - ] + ), + } + ] def __init__(self, session): super(CodeCommitGetCommand, self).__init__(session) def _run_main(self, args, parsed_globals): git_parameters = self.read_git_parameters() - if ('amazon.com' in git_parameters['host'] or - 'amazonaws.com' in git_parameters['host'] or - args.ignore_host_check): + if ( + 'amazon.com' in git_parameters['host'] + or 'amazonaws.com' in git_parameters['host'] + or args.ignore_host_check + ): theUrl = self.extract_url(git_parameters) region = self.extract_region(git_parameters, parsed_globals) signature = self.sign_request(region, theUrl) @@ -129,14 +137,16 @@ def read_git_parameters(self): return parsed def extract_url(self, parameters): - url = '{0}://{1}/{2}'.format(parameters['protocol'], - parameters['host'], - parameters['path']) + url = '{0}://{1}/{2}'.format( + parameters['protocol'], parameters['host'], parameters['path'] + ) return url def extract_region(self, parameters, parsed_globals): - match = re.match(r'(vpce-.+\.)?git-codecommit(-fips)?\.([^.]+)\.(vpce\.)?amazonaws\.com', - parameters['host']) + match = re.match( + r'(vpce-.+\.)?git-codecommit(-fips)?\.([^.]+)\.(vpce\.)?amazonaws\.com', + parameters['host'], + ) if match is not None: return match.group(3) elif parsed_globals.region is not None: @@ -156,9 +166,8 @@ def sign_request(self, region, url_to_sign): # we don't want to include the port number in the signature hostname = split.netloc.split(':')[0] canonical_request = '{0}\n{1}\n\nhost:{2}\n\nhost\n'.format( - request.method, - split.path, - hostname) + request.method, split.path, hostname + ) logger.debug("Calculating signature using v4 auth.") logger.debug('CanonicalRequest:\n%s', canonical_request) string_to_sign = signer.string_to_sign(request, canonical_request) @@ -170,7 +179,7 @@ def sign_request(self, region, url_to_sign): class CodeCommitCommand(BasicCommand): NAME = 'credential-helper' - SYNOPSIS = ('aws codecommit credential-helper') + SYNOPSIS = 'aws codecommit credential-helper' EXAMPLES = '' SUBCOMMANDS = [ @@ -178,14 +187,16 @@ class CodeCommitCommand(BasicCommand): {'name': 'store', 'command_class': CodeCommitNoOpStoreCommand}, {'name': 'erase', 'command_class': CodeCommitNoOpEraseCommand}, ] - DESCRIPTION = ('Provide a SigV4 compatible user name and' - ' password for git smart HTTP ' - ' These commands are consumed by git and' - ' should not used directly. Erase and Store' - ' are no-ops. Get is operation to generate' - ' credentials to authenticate AWS CodeCommit.' - ' Run \"aws codecommit credential-helper help\"' - ' for details') + DESCRIPTION = ( + 'Provide a SigV4 compatible user name and' + ' password for git smart HTTP ' + ' These commands are consumed by git and' + ' should not used directly. Erase and Store' + ' are no-ops. Get is operation to generate' + ' credentials to authenticate AWS CodeCommit.' + ' Run "aws codecommit credential-helper help"' + ' for details' + ) def _run_main(self, args, parsed_globals): self._raise_usage_error() diff --git a/awscli/customizations/codedeploy/codedeploy.py b/awscli/customizations/codedeploy/codedeploy.py index df6d2f5be205..b1c78d648f0e 100644 --- a/awscli/customizations/codedeploy/codedeploy.py +++ b/awscli/customizations/codedeploy/codedeploy.py @@ -12,12 +12,13 @@ # language governing permissions and limitations under the License. from awscli.customizations import utils -from awscli.customizations.codedeploy.locationargs import \ - modify_revision_arguments -from awscli.customizations.codedeploy.push import Push -from awscli.customizations.codedeploy.register import Register from awscli.customizations.codedeploy.deregister import Deregister from awscli.customizations.codedeploy.install import Install +from awscli.customizations.codedeploy.locationargs import ( + modify_revision_arguments, +) +from awscli.customizations.codedeploy.push import Push +from awscli.customizations.codedeploy.register import Register from awscli.customizations.codedeploy.uninstall import Uninstall @@ -25,25 +26,19 @@ def initialize(cli): """ The entry point for CodeDeploy high level commands. """ - cli.register( - 'building-command-table.main', - change_name - ) - cli.register( - 'building-command-table.deploy', - inject_commands - ) + cli.register('building-command-table.main', change_name) + cli.register('building-command-table.deploy', inject_commands) cli.register( 'building-argument-table.deploy.get-application-revision', - modify_revision_arguments + modify_revision_arguments, ) cli.register( 'building-argument-table.deploy.register-application-revision', - modify_revision_arguments + modify_revision_arguments, ) cli.register( 'building-argument-table.deploy.create-deployment', - modify_revision_arguments + modify_revision_arguments, ) diff --git a/awscli/customizations/codedeploy/deregister.py b/awscli/customizations/codedeploy/deregister.py index 63ed4b9f13de..624c33c41281 100644 --- a/awscli/customizations/codedeploy/deregister.py +++ b/awscli/customizations/codedeploy/deregister.py @@ -13,11 +13,13 @@ import sys -from botocore.exceptions import ClientError - +from awscli.customizations.codedeploy.utils import ( + INSTANCE_NAME_ARG, + validate_instance_name, + validate_region, +) from awscli.customizations.commands import BasicCommand -from awscli.customizations.codedeploy.utils import \ - validate_region, validate_instance_name, INSTANCE_NAME_ARG +from botocore.exceptions import ClientError class Deregister(BasicCommand): @@ -38,8 +40,8 @@ class Deregister(BasicCommand): 'help_text': ( 'Optional. Do not delete the IAM user for the registered ' 'on-premises instance.' - ) - } + ), + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -52,11 +54,10 @@ def _run_main(self, parsed_args, parsed_globals): 'codedeploy', region_name=params.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) self.iam = self._session.create_client( - 'iam', - region_name=params.region + 'iam', region_name=params.region ) try: @@ -96,10 +97,7 @@ def _get_instance_info(self, params): params.user_name = params.iam_user_arn[start:] params.tags = response['instanceInfo']['tags'] sys.stdout.write( - 'DONE\n' - 'IamUserArn: {0}\n'.format( - params.iam_user_arn - ) + 'DONE\n' 'IamUserArn: {0}\n'.format(params.iam_user_arn) ) if params.tags: sys.stdout.write('Tags:') @@ -112,8 +110,7 @@ def _get_instance_info(self, params): def _remove_tags(self, params): sys.stdout.write('Removing tags from the on-premises instance... ') self.codedeploy.remove_tags_from_on_premises_instances( - tags=params.tags, - instanceNames=[params.instance_name] + tags=params.tags, instanceNames=[params.instance_name] ) sys.stdout.write('DONE\n') @@ -129,11 +126,11 @@ def _delete_user_policy(self, params): list_user_policies = self.iam.get_paginator('list_user_policies') try: for response in list_user_policies.paginate( - UserName=params.user_name): + UserName=params.user_name + ): for policy_name in response['PolicyNames']: self.iam.delete_user_policy( - UserName=params.user_name, - PolicyName=policy_name + UserName=params.user_name, PolicyName=policy_name ) except ClientError as e: if e.response.get('Error', {}).get('Code') != 'NoSuchEntity': @@ -145,11 +142,12 @@ def _delete_access_key(self, params): list_access_keys = self.iam.get_paginator('list_access_keys') try: for response in list_access_keys.paginate( - UserName=params.user_name): + UserName=params.user_name + ): for access_key in response['AccessKeyMetadata']: self.iam.delete_access_key( UserName=params.user_name, - AccessKeyId=access_key['AccessKeyId'] + AccessKeyId=access_key['AccessKeyId'], ) except ClientError as e: if e.response.get('Error', {}).get('Code') != 'NoSuchEntity': @@ -157,9 +155,9 @@ def _delete_access_key(self, params): sys.stdout.write('DONE\n') def _delete_iam_user(self, params): - sys.stdout.write('Deleting the IAM user ({0})... '.format( - params.user_name - )) + sys.stdout.write( + 'Deleting the IAM user ({0})... '.format(params.user_name) + ) try: self.iam.delete_user(UserName=params.user_name) except ClientError as e: diff --git a/awscli/customizations/codedeploy/install.py b/awscli/customizations/codedeploy/install.py index 0055b775c555..e6e1e014033c 100644 --- a/awscli/customizations/codedeploy/install.py +++ b/awscli/customizations/codedeploy/install.py @@ -16,9 +16,12 @@ import shutil import sys +from awscli.customizations.codedeploy.utils import ( + validate_instance, + validate_region, + validate_s3_location, +) from awscli.customizations.commands import BasicCommand -from awscli.customizations.codedeploy.utils import \ - validate_region, validate_s3_location, validate_instance class Install(BasicCommand): @@ -37,7 +40,7 @@ class Install(BasicCommand): 'help_text': ( 'Required. The path to the on-premises instance configuration ' 'file.' - ) + ), }, { 'name': 'override-config', @@ -46,7 +49,7 @@ class Install(BasicCommand): 'help_text': ( 'Optional. Overrides the on-premises instance configuration ' 'file.' - ) + ), }, { 'name': 'agent-installer', @@ -54,8 +57,8 @@ class Install(BasicCommand): 'required': False, 'help_text': ( 'Optional. The AWS CodeDeploy Agent installer file.' - ) - } + ), + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -84,8 +87,10 @@ def _run_main(self, parsed_args, parsed_globals): return 0 def _validate_override_config(self, params): - if os.path.isfile(params.system.CONFIG_PATH) and \ - not params.override_config: + if ( + os.path.isfile(params.system.CONFIG_PATH) + and not params.override_config + ): raise RuntimeError( 'The on-premises instance configuration file already exists. ' 'Specify --override-config to update the existing on-premises ' diff --git a/awscli/customizations/codedeploy/locationargs.py b/awscli/customizations/codedeploy/locationargs.py index 8f07593aab18..ff65d89d1b47 100644 --- a/awscli/customizations/codedeploy/locationargs.py +++ b/awscli/customizations/codedeploy/locationargs.py @@ -12,8 +12,7 @@ # language governing permissions and limitations under the License. from awscli.argprocess import unpack_cli_arg -from awscli.arguments import CustomArgument -from awscli.arguments import create_argument_model_from_schema +from awscli.arguments import CustomArgument, create_argument_model_from_schema from awscli.customizations.exceptions import ParamValidationError S3_LOCATION_ARG_DESCRIPTION = { @@ -23,7 +22,7 @@ 'Information about the location of the application revision in Amazon ' 'S3. You must specify the bucket, the key, and bundleType. ' 'Optionally, you can also specify an eTag and version.' - ) + ), } S3_LOCATION_SCHEMA = { @@ -32,30 +31,30 @@ "bucket": { "type": "string", "description": "The Amazon S3 bucket name.", - "required": True + "required": True, }, "key": { "type": "string", "description": "The Amazon S3 object key name.", - "required": True + "required": True, }, "bundleType": { "type": "string", "description": "The format of the bundle stored in Amazon S3.", "enum": ["tar", "tgz", "zip"], - "required": True + "required": True, }, "eTag": { "type": "string", "description": "The Amazon S3 object eTag.", - "required": False + "required": False, }, "version": { "type": "string", "description": "The Amazon S3 object version.", - "required": False - } - } + "required": False, + }, + }, } GITHUB_LOCATION_ARG_DESCRIPTION = { @@ -67,7 +66,7 @@ 'references the application revision. For the repository, use the ' 'format GitHub-account/repository-name or GitHub-org/repository-name. ' 'For the commit ID, use the SHA1 Git commit reference.' - ) + ), } GITHUB_LOCATION_SCHEMA = { @@ -79,32 +78,28 @@ "The GitHub account or organization and repository. Specify " "as GitHub-account/repository or GitHub-org/repository." ), - "required": True + "required": True, }, "commitId": { "type": "string", "description": "The SHA1 Git commit reference.", - "required": True - } - } + "required": True, + }, + }, } def modify_revision_arguments(argument_table, session, **kwargs): s3_model = create_argument_model_from_schema(S3_LOCATION_SCHEMA) - argument_table[S3_LOCATION_ARG_DESCRIPTION['name']] = ( - S3LocationArgument( - argument_model=s3_model, - session=session, - **S3_LOCATION_ARG_DESCRIPTION - ) + argument_table[S3_LOCATION_ARG_DESCRIPTION['name']] = S3LocationArgument( + argument_model=s3_model, session=session, **S3_LOCATION_ARG_DESCRIPTION ) github_model = create_argument_model_from_schema(GITHUB_LOCATION_SCHEMA) argument_table[GITHUB_LOCATION_ARG_DESCRIPTION['name']] = ( GitHubLocationArgument( argument_model=github_model, session=session, - **GITHUB_LOCATION_ARG_DESCRIPTION + **GITHUB_LOCATION_ARG_DESCRIPTION, ) ) argument_table['revision'].required = False @@ -123,7 +118,7 @@ def add_to_params(self, parameters, value): param=self.argument_model, cli_argument=self, value=value, - operation=None + operation=None, ) if parsed is None: parsed = unpack_cli_arg(self, value) @@ -149,8 +144,8 @@ def build_revision_location(self, value_dict): "s3Location": { "bucket": value_dict['bucket'], "key": value_dict['key'], - "bundleType": value_dict['bundleType'] - } + "bundleType": value_dict['bundleType'], + }, } if 'eTag' in value_dict: revision['s3Location']['eTag'] = value_dict['eTag'] @@ -171,6 +166,6 @@ def build_revision_location(self, value_dict): "revisionType": "GitHub", "gitHubLocation": { "repository": value_dict['repository'], - "commitId": value_dict['commitId'] - } + "commitId": value_dict['commitId'], + }, } diff --git a/awscli/customizations/codedeploy/push.py b/awscli/customizations/codedeploy/push.py index 4e0fbcffbed8..91cc3ffb84c9 100644 --- a/awscli/customizations/codedeploy/push.py +++ b/awscli/customizations/codedeploy/push.py @@ -11,20 +11,18 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import contextlib import os import sys -import zipfile import tempfile -import contextlib +import zipfile from datetime import datetime -from botocore.exceptions import ClientError - +from awscli.compat import ZIP_COMPRESSION_MODE, BytesIO from awscli.customizations.codedeploy.utils import validate_s3_location from awscli.customizations.commands import BasicCommand from awscli.customizations.exceptions import ParamValidationError -from awscli.compat import BytesIO, ZIP_COMPRESSION_MODE - +from botocore.exceptions import ClientError ONE_MB = 1 << 20 MULTIPART_LIMIT = 6 * ONE_MB @@ -51,7 +49,7 @@ class Push(BasicCommand): 'help_text': ( 'Required. The name of the AWS CodeDeploy application to be ' 'associated with the application revision.' - ) + ), }, { 'name': 's3-location', @@ -63,7 +61,7 @@ class Push(BasicCommand): r'a bucket and a key that represent the Amazon S3 bucket name ' r'and the object key name. Content will be zipped before ' r'uploading. Use the format s3://\/\' - ) + ), }, { 'name': 'ignore-hidden-files', @@ -75,13 +73,13 @@ class Push(BasicCommand): 'and upload hidden files to Amazon S3; otherwise, set the ' '--no-ignore-hidden-files flag (the default) to bundle and ' 'upload hidden files to Amazon S3.' - ) + ), }, { 'name': 'no-ignore-hidden-files', 'action': 'store_true', 'default': False, - 'group_name': 'ignore-hidden-files' + 'group_name': 'ignore-hidden-files', }, { 'name': 'source', @@ -92,7 +90,7 @@ class Push(BasicCommand): 'accompanying AppSpec file on the development machine to be ' 'zipped and uploaded to Amazon S3. If not specified, the ' 'current directory is used.' - ) + ), }, { 'name': 'description', @@ -102,8 +100,8 @@ class Push(BasicCommand): 'revision. If not specified, the default string "Uploaded by ' 'AWS CLI \'time\' UTC" is used, where \'time\' is the current ' 'system time in Coordinated Universal Time (UTC).' - ) - } + ), + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -112,34 +110,32 @@ def _run_main(self, parsed_args, parsed_globals): 'codedeploy', region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) self.s3 = self._session.create_client( - 's3', - region_name=parsed_globals.region + 's3', region_name=parsed_globals.region ) self._push(parsed_args) return 0 def _validate_args(self, parsed_args): validate_s3_location(parsed_args, 's3_location') - if parsed_args.ignore_hidden_files \ - and parsed_args.no_ignore_hidden_files: + if ( + parsed_args.ignore_hidden_files + and parsed_args.no_ignore_hidden_files + ): raise ParamValidationError( 'You cannot specify both --ignore-hidden-files and ' '--no-ignore-hidden-files.' ) if not parsed_args.description: - parsed_args.description = ( - 'Uploaded by AWS CLI {0} UTC'.format( - datetime.utcnow().isoformat() - ) + parsed_args.description = 'Uploaded by AWS CLI {0} UTC'.format( + datetime.utcnow().isoformat() ) def _push(self, params): with self._compress( - params.source, - params.ignore_hidden_files + params.source, params.ignore_hidden_files ) as bundle: try: upload_response = self._upload_to_s3(params, bundle) @@ -148,10 +144,8 @@ def _push(self, params): params.version = upload_response['VersionId'] except Exception as e: raise RuntimeError( - 'Failed to upload \'%s\' to \'%s\': %s' % - (params.source, - params.s3_location, - str(e)) + 'Failed to upload \'%s\' to \'%s\': %s' + % (params.source, params.s3_location, str(e)) ) self._register_revision(params) @@ -162,10 +156,7 @@ def _push(self, params): s3location_string = ( '--s3-location bucket={0},key={1},' 'bundleType=zip,eTag={2}{3}'.format( - params.bucket, - params.key, - params.eTag, - version_string + params.bucket, params.key, params.eTag, version_string ) ) sys.stdout.write( @@ -175,8 +166,7 @@ def _push(self, params): '--deployment-group-name ' '--deployment-config-name ' '--description \n'.format( - params.application_name, - s3location_string + params.application_name, s3location_string ) ) @@ -197,7 +187,7 @@ def _compress(self, source, ignore_hidden_files=False): for fn in files: filename = os.path.join(root, fn) filename = os.path.abspath(filename) - arcname = filename[len(source_path) + 1:] + arcname = filename[len(source_path) + 1 :] if filename == appspec_path: contains_appspec = True zf.write(filename, arcname, ZIP_COMPRESSION_MODE) @@ -213,16 +203,10 @@ def _upload_to_s3(self, params, bundle): size_remaining = self._bundle_size(bundle) if size_remaining < MULTIPART_LIMIT: return self.s3.put_object( - Bucket=params.bucket, - Key=params.key, - Body=bundle + Bucket=params.bucket, Key=params.key, Body=bundle ) else: - return self._multipart_upload_to_s3( - params, - bundle, - size_remaining - ) + return self._multipart_upload_to_s3(params, bundle, size_remaining) def _bundle_size(self, bundle): bundle.seek(0, 2) @@ -232,8 +216,7 @@ def _bundle_size(self, bundle): def _multipart_upload_to_s3(self, params, bundle, size_remaining): create_response = self.s3.create_multipart_upload( - Bucket=params.bucket, - Key=params.key + Bucket=params.bucket, Key=params.key ) upload_id = create_response['UploadId'] try: @@ -247,25 +230,22 @@ def _multipart_upload_to_s3(self, params, bundle, size_remaining): Key=params.key, UploadId=upload_id, PartNumber=part_num, - Body=BytesIO(data) + Body=BytesIO(data), + ) + multipart_list.append( + {'PartNumber': part_num, 'ETag': upload_response['ETag']} ) - multipart_list.append({ - 'PartNumber': part_num, - 'ETag': upload_response['ETag'] - }) part_num += 1 size_remaining -= len(data) return self.s3.complete_multipart_upload( Bucket=params.bucket, Key=params.key, UploadId=upload_id, - MultipartUpload={'Parts': multipart_list} + MultipartUpload={'Parts': multipart_list}, ) except ClientError as e: self.s3.abort_multipart_upload( - Bucket=params.bucket, - Key=params.key, - UploadId=upload_id + Bucket=params.bucket, Key=params.key, UploadId=upload_id ) raise e @@ -276,13 +256,13 @@ def _register_revision(self, params): 'bucket': params.bucket, 'key': params.key, 'bundleType': 'zip', - 'eTag': params.eTag - } + 'eTag': params.eTag, + }, } if 'version' in params: revision['s3Location']['version'] = params.version self.codedeploy.register_application_revision( applicationName=params.application_name, revision=revision, - description=params.description + description=params.description, ) diff --git a/awscli/customizations/codedeploy/register.py b/awscli/customizations/codedeploy/register.py index 03881b039ab6..fb67e1720fdc 100644 --- a/awscli/customizations/codedeploy/register.py +++ b/awscli/customizations/codedeploy/register.py @@ -13,11 +13,16 @@ import sys -from awscli.customizations.commands import BasicCommand from awscli.customizations.codedeploy.systems import DEFAULT_CONFIG_FILE -from awscli.customizations.codedeploy.utils import \ - validate_region, validate_instance_name, validate_tags, \ - validate_iam_user_arn, INSTANCE_NAME_ARG, IAM_USER_ARN_ARG +from awscli.customizations.codedeploy.utils import ( + IAM_USER_ARN_ARG, + INSTANCE_NAME_ARG, + validate_iam_user_arn, + validate_instance_name, + validate_region, + validate_tags, +) +from awscli.customizations.commands import BasicCommand class Register(BasicCommand): @@ -38,15 +43,15 @@ class Register(BasicCommand): "Key": { "description": "The tag key.", "type": "string", - "required": True + "required": True, }, "Value": { "description": "The tag value.", "type": "string", - "required": True - } - } - } + "required": True, + }, + }, + }, } ARG_TABLE = [ @@ -60,9 +65,9 @@ class Register(BasicCommand): 'help_text': ( 'Optional. The list of key/value pairs to tag the on-premises ' 'instance.' - ) + ), }, - IAM_USER_ARN_ARG + IAM_USER_ARN_ARG, ] def _run_main(self, parsed_args, parsed_globals): @@ -77,11 +82,10 @@ def _run_main(self, parsed_args, parsed_globals): 'codedeploy', region_name=params.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) self.iam = self._session.create_client( - 'iam', - region_name=params.region + 'iam', region_name=params.region ) try: @@ -119,30 +123,21 @@ def _create_iam_user(self, params): sys.stdout.write('Creating the IAM user... ') params.user_name = params.instance_name response = self.iam.create_user( - Path='/AWS/CodeDeploy/', - UserName=params.user_name + Path='/AWS/CodeDeploy/', UserName=params.user_name ) params.iam_user_arn = response['User']['Arn'] sys.stdout.write( - 'DONE\n' - 'IamUserArn: {0}\n'.format( - params.iam_user_arn - ) + 'DONE\n' 'IamUserArn: {0}\n'.format(params.iam_user_arn) ) def _create_access_key(self, params): sys.stdout.write('Creating the IAM user access key... ') - response = self.iam.create_access_key( - UserName=params.user_name - ) + response = self.iam.create_access_key(UserName=params.user_name) params.access_key_id = response['AccessKey']['AccessKeyId'] params.secret_access_key = response['AccessKey']['SecretAccessKey'] sys.stdout.write( - 'DONE\n' - 'AccessKeyId: {0}\n' - 'SecretAccessKey: {1}\n'.format( - params.access_key_id, - params.secret_access_key + 'DONE\n' 'AccessKeyId: {0}\n' 'SecretAccessKey: {1}\n'.format( + params.access_key_id, params.secret_access_key ) ) @@ -162,14 +157,11 @@ def _create_user_policy(self, params): self.iam.put_user_policy( UserName=params.user_name, PolicyName=params.policy_name, - PolicyDocument=params.policy_document + PolicyDocument=params.policy_document, ) sys.stdout.write( - 'DONE\n' - 'PolicyName: {0}\n' - 'PolicyDocument: {1}\n'.format( - params.policy_name, - params.policy_document + 'DONE\n' 'PolicyName: {0}\n' 'PolicyDocument: {1}\n'.format( + params.policy_name, params.policy_document ) ) @@ -188,7 +180,7 @@ def _create_config(self, params): params.region, params.iam_user_arn, params.access_key_id, - params.secret_access_key + params.secret_access_key, ) ) sys.stdout.write('DONE\n') @@ -196,15 +188,13 @@ def _create_config(self, params): def _register_instance(self, params): sys.stdout.write('Registering the on-premises instance... ') self.codedeploy.register_on_premises_instance( - instanceName=params.instance_name, - iamUserArn=params.iam_user_arn + instanceName=params.instance_name, iamUserArn=params.iam_user_arn ) sys.stdout.write('DONE\n') def _add_tags(self, params): sys.stdout.write('Adding tags to the on-premises instance... ') self.codedeploy.add_tags_to_on_premises_instances( - tags=params.tags, - instanceNames=[params.instance_name] + tags=params.tags, instanceNames=[params.instance_name] ) sys.stdout.write('DONE\n') diff --git a/awscli/customizations/codedeploy/systems.py b/awscli/customizations/codedeploy/systems.py index 3d3f671f6d9c..65c2d93996b5 100644 --- a/awscli/customizations/codedeploy/systems.py +++ b/awscli/customizations/codedeploy/systems.py @@ -26,10 +26,7 @@ class System: def __init__(self, params): self.session = params.session - self.s3 = self.session.create_client( - 's3', - region_name=params.region - ) + self.s3 = self.session.create_client('s3', region_name=params.region) def validate_administrator(self): raise NotImplementedError('validate_administrator') @@ -60,11 +57,13 @@ def install(self, params): process = subprocess.Popen( [ 'powershell.exe', - '-Command', 'Stop-Service', - '-Name', 'codedeployagent' + '-Command', + 'Stop-Service', + '-Name', + 'codedeployagent', ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) (output, error) = process.communicate() not_found = ( @@ -83,24 +82,31 @@ def install(self, params): [ r'.\{0}'.format(self.INSTALLER), '/quiet', - '/l', r'.\codedeploy-agent-install-log.txt' + '/l', + r'.\codedeploy-agent-install-log.txt', ], - shell=True + shell=True, + ) + subprocess.check_call( + [ + 'powershell.exe', + '-Command', + 'Restart-Service', + '-Name', + 'codedeployagent', + ] ) - subprocess.check_call([ - 'powershell.exe', - '-Command', 'Restart-Service', - '-Name', 'codedeployagent' - ]) process = subprocess.Popen( [ 'powershell.exe', - '-Command', 'Get-Service', - '-Name', 'codedeployagent' + '-Command', + 'Get-Service', + '-Name', + 'codedeployagent', ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) (output, error) = process.communicate() if "Running" not in output: @@ -112,11 +118,13 @@ def uninstall(self, params): process = subprocess.Popen( [ 'powershell.exe', - '-Command', 'Stop-Service', - '-Name', 'codedeployagent' + '-Command', + 'Stop-Service', + '-Name', + 'codedeployagent', ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) (output, error) = process.communicate() not_found = ( @@ -133,11 +141,15 @@ def _remove_agent(self): process = subprocess.Popen( [ 'wmic', - 'product', 'where', 'name="CodeDeploy Host Agent"', - 'call', 'uninstall', '/nointeractive' + 'product', + 'where', + 'name="CodeDeploy Host Agent"', + 'call', + 'uninstall', + '/nointeractive', ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) (output, error) = process.communicate() if process.returncode != 0: @@ -169,9 +181,7 @@ def install(self, params): with open(self.INSTALLER, 'wb') as f: f.write(response['Body'].read()) - subprocess.check_call( - ['chmod', '+x', './{0}'.format(self.INSTALLER)] - ) + subprocess.check_call(['chmod', '+x', './{0}'.format(self.INSTALLER)]) credentials = self.session.get_credentials() environment = os.environ.copy() @@ -181,8 +191,7 @@ def install(self, params): if credentials.token is not None: environment['AWS_SESSION_TOKEN'] = credentials.token subprocess.check_call( - ['./{0}'.format(self.INSTALLER), 'auto'], - env=environment + ['./{0}'.format(self.INSTALLER), 'auto'], env=environment ) def uninstall(self, params): @@ -200,7 +209,7 @@ def _stop_agent(self, params): process = subprocess.Popen( ['service', 'codedeploy-agent', 'stop'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) (output, error) = process.communicate() if process.returncode != 0 and params.not_found_msg not in error: @@ -231,5 +240,7 @@ def _remove_agent(self, params): subprocess.check_call(['yum', '-y', 'erase', 'codedeploy-agent']) def _stop_agent(self, params): - params.not_found_msg = 'Redirecting to /bin/systemctl stop codedeploy-agent.service' + params.not_found_msg = ( + 'Redirecting to /bin/systemctl stop codedeploy-agent.service' + ) return Linux._stop_agent(self, params) diff --git a/awscli/customizations/codedeploy/uninstall.py b/awscli/customizations/codedeploy/uninstall.py index 7cbe88ae6f8c..4cc646de427d 100644 --- a/awscli/customizations/codedeploy/uninstall.py +++ b/awscli/customizations/codedeploy/uninstall.py @@ -11,12 +11,14 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import errno import os import sys -import errno -from awscli.customizations.codedeploy.utils import validate_instance, \ - validate_region +from awscli.customizations.codedeploy.utils import ( + validate_instance, + validate_region, +) from awscli.customizations.commands import BasicCommand diff --git a/awscli/customizations/codedeploy/utils.py b/awscli/customizations/codedeploy/utils.py index c3b7000ea54e..74365772cee4 100644 --- a/awscli/customizations/codedeploy/utils.py +++ b/awscli/customizations/codedeploy/utils.py @@ -13,14 +13,20 @@ import platform import re - -import awscli.compat -from awscli.compat import urlopen, URLError -from awscli.customizations.codedeploy.systems import System, Ubuntu, Windows, RHEL -from awscli.customizations.exceptions import ParamValidationError -from awscli.customizations.exceptions import ConfigurationError from socket import timeout +import awscli.compat +from awscli.compat import URLError, urlopen +from awscli.customizations.codedeploy.systems import ( + RHEL, + System, + Ubuntu, + Windows, +) +from awscli.customizations.exceptions import ( + ConfigurationError, + ParamValidationError, +) MAX_INSTANCE_NAME_LENGTH = 100 MAX_TAGS_PER_INSTANCE = 10 @@ -34,9 +40,7 @@ 'name': 'instance-name', 'synopsis': '--instance-name ', 'required': True, - 'help_text': ( - 'Required. The name of the on-premises instance.' - ) + 'help_text': ('Required. The name of the on-premises instance.'), } IAM_USER_ARN_ARG = { @@ -45,7 +49,7 @@ 'required': False, 'help_text': ( 'Optional. The IAM user associated with the on-premises instance.' - ) + ), } @@ -100,8 +104,9 @@ def validate_tags(params): def validate_iam_user_arn(params): - if params.iam_user_arn and \ - not re.match(IAM_USER_ARN_PATTERN, params.iam_user_arn): + if params.iam_user_arn and not re.match( + IAM_USER_ARN_PATTERN, params.iam_user_arn + ): raise ParamValidationError('Invalid IAM user ARN.') @@ -115,9 +120,7 @@ def validate_instance(params): elif platform.system() == 'Windows': params.system = Windows(params) if 'system' not in params: - raise RuntimeError( - System.UNSUPPORTED_SYSTEM_MSG - ) + raise RuntimeError(System.UNSUPPORTED_SYSTEM_MSG) try: urlopen('http://169.254.169.254/latest/meta-data/', timeout=1) raise RuntimeError('Amazon EC2 instances are not supported.') @@ -137,7 +140,5 @@ def validate_s3_location(params, arg_name): else: raise ParamValidationError( '--{0} must specify the Amazon S3 URL format as ' - 's3:///.'.format( - arg_name.replace('_', '-') - ) + 's3:///.'.format(arg_name.replace('_', '-')) ) diff --git a/awscli/customizations/commands.py b/awscli/customizations/commands.py index 55722888704e..4d634065f7fa 100644 --- a/awscli/customizations/commands.py +++ b/awscli/customizations/commands.py @@ -1,29 +1,27 @@ -import logging import copy +import logging import os -from botocore import model -from botocore.compat import OrderedDict -from botocore.validate import validate_parameters - import awscli from awscli.argparser import ArgTableArgParser, SubCommandArgParser from awscli.argprocess import unpack_argument, unpack_cli_arg from awscli.arguments import CustomArgument, create_argument_model_from_schema +from awscli.bcdoc import docevents from awscli.clidocs import OperationDocumentEventHandler from awscli.commands import CLICommand -from awscli.bcdoc import docevents +from awscli.customizations.exceptions import ParamValidationError from awscli.help import HelpCommand from awscli.schema import SchemaTransformer from awscli.utils import add_command_lineage_to_user_agent_extra -from awscli.customizations.exceptions import ParamValidationError +from botocore import model +from botocore.compat import OrderedDict +from botocore.validate import validate_parameters LOG = logging.getLogger(__name__) _open = open class _FromFile(object): - def __init__(self, *paths, **kwargs): """ ``**kwargs`` can contain a ``root_module`` argument @@ -43,7 +41,6 @@ def __init__(self, *paths, **kwargs): class BasicCommand(CLICommand): - """Basic top level command with no subcommands. If you want to create a new command, subclass this and @@ -140,17 +137,23 @@ def __call__(self, args, parsed_globals): # an arg parser and parse them. self._subcommand_table = self._build_subcommand_table() self._arg_table = self._build_arg_table() - event = 'before-building-argument-table-parser.%s' % \ - ".".join(self.lineage_names) - self._session.emit(event, argument_table=self._arg_table, args=args, - session=self._session) + event = 'before-building-argument-table-parser.%s' % ".".join( + self.lineage_names + ) + self._session.emit( + event, + argument_table=self._arg_table, + args=args, + session=self._session, + ) maybe_parsed_subcommand = self._parse_potential_subcommand( args, self._subcommand_table ) if maybe_parsed_subcommand is not None: new_args, subcommand_name = maybe_parsed_subcommand return self._subcommand_table[subcommand_name]( - new_args, parsed_globals) + new_args, parsed_globals + ) parser = ArgTableArgParser(self.arg_table, self.subcommand_table) parsed_args, remaining = parser.parse_known_args(args) @@ -166,20 +169,18 @@ def __call__(self, args, parsed_globals): cli_argument = self.arg_table[xformed] value = unpack_argument( - self._session, - 'custom', - self.name, - cli_argument, - value + self._session, 'custom', self.name, cli_argument, value ) # If this parameter has a schema defined, then allow plugins # a chance to process and override its value. if self._should_allow_plugins_override(cli_argument, value): - override = self._session\ - .emit_first_non_none_response( - 'process-cli-arg.%s.%s' % ('custom', self.name), - cli_argument=cli_argument, value=value, operation=None) + override = self._session.emit_first_non_none_response( + 'process-cli-arg.%s.%s' % ('custom', self.name), + cli_argument=cli_argument, + value=value, + operation=None, + ) if override is not None: # A plugin supplied a conversion @@ -189,7 +190,8 @@ def __call__(self, args, parsed_globals): # correct Python type (dict, list, etc) value = unpack_cli_arg(cli_argument, value) self._validate_value_against_schema( - cli_argument.argument_model, value) + cli_argument.argument_model, value + ) setattr(parsed_args, key, value) if hasattr(self._session, 'user_agent_extra'): @@ -213,8 +215,7 @@ def _validate_value_against_schema(self, model, value): validate_parameters(value, model) def _should_allow_plugins_override(self, param, value): - if (param and param.argument_model is not None and - value is not None): + if param and param.argument_model is not None and value is not None: return True return False @@ -236,10 +237,12 @@ def _build_subcommand_table(self): subcommand_class = subcommand['command_class'] subcommand_table[subcommand_name] = subcommand_class(self._session) name = '_'.join([c.name for c in self.lineage]) - self._session.emit('building-command-table.%s' % name, - command_table=subcommand_table, - session=self._session, - command_object=self) + self._session.emit( + 'building-command-table.%s' % name, + command_table=subcommand_table, + session=self._session, + command_object=self, + ) self._add_lineage(subcommand_table) return subcommand_table @@ -251,8 +254,12 @@ def create_help_command(self): command_help_table = {} if self.SUBCOMMANDS: command_help_table = self.create_help_command_table() - return BasicHelp(self._session, self, command_table=command_help_table, - arg_table=self.arg_table) + return BasicHelp( + self._session, + self, + command_table=command_help_table, + arg_table=self.arg_table, + ) def create_help_command_table(self): """ @@ -268,15 +275,16 @@ def create_help_command_table(self): def _build_arg_table(self): arg_table = OrderedDict() name = '_'.join([c.name for c in self.lineage]) - self._session.emit('building-arg-table.%s' % name, - arg_table=self.ARG_TABLE) + self._session.emit( + 'building-arg-table.%s' % name, arg_table=self.ARG_TABLE + ) for arg_data in self.ARG_TABLE: - # If a custom schema was passed in, create the argument_model # so that it can be validated and docs can be generated. if 'schema' in arg_data: argument_model = create_argument_model_from_schema( - arg_data.pop('schema')) + arg_data.pop('schema') + ) arg_data['argument_model'] = argument_model custom_argument = CustomArgument(**arg_data) @@ -325,15 +333,23 @@ def _raise_usage_error(self): raise ParamValidationError(error_msg) def _add_customization_to_user_agent(self): - add_command_lineage_to_user_agent_extra(self._session, self.lineage_names) + add_command_lineage_to_user_agent_extra( + self._session, self.lineage_names + ) class BasicHelp(HelpCommand): - - def __init__(self, session, command_object, command_table, arg_table, - event_handler_class=None): - super(BasicHelp, self).__init__(session, command_object, - command_table, arg_table) + def __init__( + self, + session, + command_object, + command_table, + arg_table, + event_handler_class=None, + ): + super(BasicHelp, self).__init__( + session, command_object, command_table, arg_table + ) # This is defined in HelpCommand so we're matching the # casing here. if event_handler_class is None: @@ -376,7 +392,9 @@ def _get_doc_contents(self, attr_name): root_module = value.root_module doc_path = os.path.join( os.path.abspath(os.path.dirname(root_module.__file__)), - 'examples', trailing_path) + 'examples', + trailing_path, + ) with _open(doc_path) as f: return f.read() else: @@ -394,7 +412,6 @@ def __call__(self, args, parsed_globals): class BasicDocHandler(OperationDocumentEventHandler): - def __init__(self, help_command): super(BasicDocHandler, self).__init__(help_command) self.doc = help_command.doc @@ -407,7 +424,8 @@ def doc_description(self, help_command, **kwargs): def doc_synopsis_start(self, help_command, **kwargs): if not help_command.synopsis: super(BasicDocHandler, self).doc_synopsis_start( - help_command=help_command, **kwargs) + help_command=help_command, **kwargs + ) else: self.doc.style.h2('Synopsis') self.doc.style.start_codeblock() @@ -424,8 +442,8 @@ def doc_synopsis_option(self, arg_name, help_command, **kwargs): # This arg is already documented so we can move on. return option_str = ' | '.join( - [a.cli_name for a in - self._arg_groups[argument.group_name]]) + [a.cli_name for a in self._arg_groups[argument.group_name]] + ) self._documented_arg_groups.append(argument.group_name) elif argument.cli_type_name == 'boolean': option_str = '%s' % argument.cli_name @@ -445,7 +463,8 @@ def doc_synopsis_option(self, arg_name, help_command, **kwargs): def doc_synopsis_end(self, help_command, **kwargs): if not help_command.synopsis and not help_command.command_table: super(BasicDocHandler, self).doc_synopsis_end( - help_command=help_command, **kwargs) + help_command=help_command, **kwargs + ) else: self.doc.style.end_codeblock() diff --git a/awscli/customizations/configservice/getstatus.py b/awscli/customizations/configservice/getstatus.py index d3a2dd8ce2e9..b5ddbd2301dc 100644 --- a/awscli/customizations/configservice/getstatus.py +++ b/awscli/customizations/configservice/getstatus.py @@ -25,8 +25,10 @@ def add_get_status(command_table, session, **kwargs): class GetStatusCommand(BasicCommand): NAME = 'get-status' - DESCRIPTION = ('Reports the status of all of configuration ' - 'recorders and delivery channels.') + DESCRIPTION = ( + 'Reports the status of all of configuration ' + 'recorders and delivery channels.' + ) def __init__(self, session): self._config_client = None @@ -42,10 +44,11 @@ def _setup_client(self, parsed_globals): client_args = { 'verify': parsed_globals.verify_ssl, 'region_name': parsed_globals.region, - 'endpoint_url': parsed_globals.endpoint_url + 'endpoint_url': parsed_globals.endpoint_url, } - self._config_client = self._session.create_client('config', - **client_args) + self._config_client = self._session.create_client( + 'config', **client_args + ) def _check_configuration_recorders(self): status = self._config_client.describe_configuration_recorder_status() diff --git a/awscli/customizations/configservice/putconfigurationrecorder.py b/awscli/customizations/configservice/putconfigurationrecorder.py index 7e8bc4a237d7..136f92b0cd1d 100644 --- a/awscli/customizations/configservice/putconfigurationrecorder.py +++ b/awscli/customizations/configservice/putconfigurationrecorder.py @@ -18,7 +18,8 @@ def register_modify_put_configuration_recorder(cli): cli.register( 'building-argument-table.configservice.put-configuration-recorder', - extract_recording_group) + extract_recording_group, + ) def extract_recording_group(session, argument_table, **kwargs): @@ -29,10 +30,13 @@ def extract_recording_group(session, argument_table, **kwargs): configuration_recorder_argument = argument_table['configuration-recorder'] configuration_recorder_model = copy.deepcopy( - configuration_recorder_argument.argument_model) + configuration_recorder_argument.argument_model + ) recording_group_model = copy.deepcopy( - configuration_recorder_argument.argument_model. - members['recordingGroup']) + configuration_recorder_argument.argument_model.members[ + 'recordingGroup' + ] + ) del configuration_recorder_model.members['recordingGroup'] argument_table['configuration-recorder'] = ConfigurationRecorderArgument( @@ -41,7 +45,7 @@ def extract_recording_group(session, argument_table, **kwargs): operation_model=configuration_recorder_argument._operation_model, is_required=True, event_emitter=session.get_component('event_emitter'), - serialized_name='ConfigurationRecorder' + serialized_name='ConfigurationRecorder', ) argument_table['recording-group'] = RecordingGroupArgument( @@ -50,7 +54,7 @@ def extract_recording_group(session, argument_table, **kwargs): operation_model=configuration_recorder_argument._operation_model, is_required=False, event_emitter=session.get_component('event_emitter'), - serialized_name='recordingGroup' + serialized_name='recordingGroup', ) diff --git a/awscli/customizations/configservice/subscribe.py b/awscli/customizations/configservice/subscribe.py index 38a391efa1fb..7f6e0fedd15e 100644 --- a/awscli/customizations/configservice/subscribe.py +++ b/awscli/customizations/configservice/subscribe.py @@ -14,29 +14,43 @@ import sys from awscli.customizations.commands import BasicCommand -from awscli.customizations.utils import s3_bucket_exists from awscli.customizations.s3.utils import find_bucket_key +from awscli.customizations.utils import s3_bucket_exists - -S3_BUCKET = {'name': 's3-bucket', 'required': True, - 'help_text': ('The S3 bucket that the AWS Config delivery channel' - ' will use. If the bucket does not exist, it will ' - 'be automatically created. The value for this ' - 'argument should follow the form ' - 'bucket/prefix. Note that the prefix is optional.')} - -SNS_TOPIC = {'name': 'sns-topic', 'required': True, - 'help_text': ('The SNS topic that the AWS Config delivery channel' - ' will use. If the SNS topic does not exist, it ' - 'will be automatically created. Value for this ' - 'should be a valid SNS topic name or the ARN of an ' - 'existing SNS topic.')} - -IAM_ROLE = {'name': 'iam-role', 'required': True, - 'help_text': ('The IAM role that the AWS Config configuration ' - 'recorder will use to record current resource ' - 'configurations. Value for this should be the ' - 'ARN of the desired IAM role.')} +S3_BUCKET = { + 'name': 's3-bucket', + 'required': True, + 'help_text': ( + 'The S3 bucket that the AWS Config delivery channel' + ' will use. If the bucket does not exist, it will ' + 'be automatically created. The value for this ' + 'argument should follow the form ' + 'bucket/prefix. Note that the prefix is optional.' + ), +} + +SNS_TOPIC = { + 'name': 'sns-topic', + 'required': True, + 'help_text': ( + 'The SNS topic that the AWS Config delivery channel' + ' will use. If the SNS topic does not exist, it ' + 'will be automatically created. Value for this ' + 'should be a valid SNS topic name or the ARN of an ' + 'existing SNS topic.' + ), +} + +IAM_ROLE = { + 'name': 'iam-role', + 'required': True, + 'help_text': ( + 'The IAM role that the AWS Config configuration ' + 'recorder will use to record current resource ' + 'configurations. Value for this should be the ' + 'ARN of the desired IAM role.' + ), +} def register_subscribe(cli): @@ -49,10 +63,12 @@ def add_subscribe(command_table, session, **kwargs): class SubscribeCommand(BasicCommand): NAME = 'subscribe' - DESCRIPTION = ('Subscribes user to AWS Config by creating an AWS Config ' - 'delivery channel and configuration recorder to track ' - 'AWS resource configurations. The names of the default ' - 'channel and configuration recorder will be default.') + DESCRIPTION = ( + 'Subscribes user to AWS Config by creating an AWS Config ' + 'delivery channel and configuration recorder to track ' + 'AWS resource configurations. The names of the default ' + 'channel and configuration recorder will be default.' + ) ARG_TABLE = [S3_BUCKET, SNS_TOPIC, IAM_ROLE] def __init__(self, session): @@ -79,7 +95,7 @@ def _run_main(self, parsed_args, parsed_globals): self._config_client.put_configuration_recorder( ConfigurationRecorder={ 'name': name, - 'roleARN': parsed_args.iam_role + 'roleARN': parsed_args.iam_role, } ) @@ -87,14 +103,15 @@ def _run_main(self, parsed_args, parsed_globals): delivery_channel = { 'name': name, 's3BucketName': bucket, - 'snsTopicARN': sns_topic_arn + 'snsTopicARN': sns_topic_arn, } if prefix: delivery_channel['s3KeyPrefix'] = prefix self._config_client.put_delivery_channel( - DeliveryChannel=delivery_channel) + DeliveryChannel=delivery_channel + ) # Start the configuration recorder. self._config_client.start_configuration_recorder( @@ -106,7 +123,8 @@ def _run_main(self, parsed_args, parsed_globals): sys.stdout.write('Configuration Recorders: ') response = self._config_client.describe_configuration_recorders() sys.stdout.write( - json.dumps(response['ConfigurationRecorders'], indent=4)) + json.dumps(response['ConfigurationRecorders'], indent=4) + ) sys.stdout.write('\n\n') # Describe the delivery channels @@ -120,14 +138,15 @@ def _run_main(self, parsed_args, parsed_globals): def _setup_clients(self, parsed_globals): client_args = { 'verify': parsed_globals.verify_ssl, - 'region_name': parsed_globals.region + 'region_name': parsed_globals.region, } self._s3_client = self._session.create_client('s3', **client_args) self._sns_client = self._session.create_client('sns', **client_args) # Use the specified endpoint only for config related commands. client_args['endpoint_url'] = parsed_globals.endpoint_url - self._config_client = self._session.create_client('config', - **client_args) + self._config_client = self._session.create_client( + 'config', **client_args + ) class S3BucketHelper(object): @@ -149,9 +168,7 @@ def _check_bucket_exists(self, bucket): def _create_bucket(self, bucket): region_name = self._s3_client.meta.region_name - params = { - 'Bucket': bucket - } + params = {'Bucket': bucket} bucket_config = {'LocationConstraint': region_name} if region_name != 'us-east-1': params['CreateBucketConfiguration'] = bucket_config diff --git a/awscli/customizations/configure/__init__.py b/awscli/customizations/configure/__init__.py index 8728ee6046a8..7c95d103a0dc 100644 --- a/awscli/customizations/configure/__init__.py +++ b/awscli/customizations/configure/__init__.py @@ -11,15 +11,15 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import string + from awscli.compat import shlex NOT_SET = '' -PREDEFINED_SECTION_NAMES = ('plugins') +PREDEFINED_SECTION_NAMES = 'plugins' _WHITESPACE = ' \t' class ConfigValue(object): - def __init__(self, value, config_type, config_variable): self.value = value self.config_type = config_type diff --git a/awscli/customizations/configure/addmodel.py b/awscli/customizations/configure/addmodel.py index 566ae9dfcc2d..604af5da4974 100644 --- a/awscli/customizations/configure/addmodel.py +++ b/awscli/customizations/configure/addmodel.py @@ -13,9 +13,8 @@ import json import os -from botocore.model import ServiceModel - from awscli.customizations.commands import BasicCommand +from botocore.model import ServiceModel def _get_endpoint_prefix_to_name_mappings(session): @@ -76,11 +75,12 @@ def get_model_location(session, service_definition, service_name=None): # not the one set by AWS_DATA_PATH) data_path = session.get_component('data_loader').CUSTOMER_DATA_PATH # Use the version of the model to determine the file's naming convention. - service_model_name = ( - 'service-%d.json' % int( - float(service_definition.get('version', '2.0')))) - return os.path.join(data_path, service_name, api_version, - service_model_name) + service_model_name = 'service-%d.json' % int( + float(service_definition.get('version', '2.0')) + ) + return os.path.join( + data_path, service_name, api_version, service_model_name + ) class AddModelCommand(BasicCommand): @@ -92,11 +92,18 @@ class AddModelCommand(BasicCommand): 'provided.' ) ARG_TABLE = [ - {'name': 'service-model', 'required': True, 'help_text': ( - 'The contents of the service JSON model.')}, - {'name': 'service-name', 'help_text': ( - 'Overrides the default name used by the service JSON ' - 'model to generate CLI service commands and Boto3 clients.')} + { + 'name': 'service-model', + 'required': True, + 'help_text': ('The contents of the service JSON model.'), + }, + { + 'name': 'service-name', + 'help_text': ( + 'Overrides the default name used by the service JSON ' + 'model to generate CLI service commands and Boto3 clients.' + ), + }, ] def _run_main(self, parsed_args, parsed_globals): diff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py index e1365d1b9c75..41c5868f60ec 100644 --- a/awscli/customizations/configure/configure.py +++ b/awscli/customizations/configure/configure.py @@ -10,38 +10,37 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os import logging - -from botocore.exceptions import ProfileNotFound +import os from awscli.compat import compat_input from awscli.customizations.commands import BasicCommand from awscli.customizations.configure.addmodel import AddModelCommand -from awscli.customizations.configure.set import ConfigureSetCommand +from awscli.customizations.configure.exportcreds import ( + ConfigureExportCredentialsCommand, +) from awscli.customizations.configure.get import ConfigureGetCommand -from awscli.customizations.configure.list import ConfigureListCommand -from awscli.customizations.configure.writer import ConfigFileWriter from awscli.customizations.configure.importer import ConfigureImportCommand +from awscli.customizations.configure.list import ConfigureListCommand from awscli.customizations.configure.listprofiles import ListProfilesCommand -from awscli.customizations.configure.sso import ConfigureSSOCommand -from awscli.customizations.configure.sso import ConfigureSSOSessionCommand -from awscli.customizations.configure.exportcreds import \ - ConfigureExportCredentialsCommand +from awscli.customizations.configure.set import ConfigureSetCommand +from awscli.customizations.configure.sso import ( + ConfigureSSOCommand, + ConfigureSSOSessionCommand, +) +from awscli.customizations.configure.writer import ConfigFileWriter +from botocore.exceptions import ProfileNotFound from . import mask_value, profile_to_section - logger = logging.getLogger(__name__) def register_configure_cmd(cli): - cli.register('building-command-table.main', - ConfigureCommand.add_command) + cli.register('building-command-table.main', ConfigureCommand.add_command) class InteractivePrompter(object): - def get_value(self, current_value, config_name, prompt_text=''): if config_name in ('aws_access_key_id', 'aws_secret_access_key'): current_value = mask_value(current_value) @@ -57,7 +56,7 @@ def get_value(self, current_value, config_name, prompt_text=''): class ConfigureCommand(BasicCommand): NAME = 'configure' DESCRIPTION = BasicCommand.FROM_FILE() - SYNOPSIS = ('aws configure [--profile profile-name]') + SYNOPSIS = 'aws configure [--profile profile-name]' EXAMPLES = ( 'To create a new configuration::\n' '\n' @@ -84,8 +83,10 @@ class ConfigureCommand(BasicCommand): {'name': 'list-profiles', 'command_class': ListProfilesCommand}, {'name': 'sso', 'command_class': ConfigureSSOCommand}, {'name': 'sso-session', 'command_class': ConfigureSSOSessionCommand}, - {'name': 'export-credentials', - 'command_class': ConfigureExportCredentialsCommand}, + { + 'name': 'export-credentials', + 'command_class': ConfigureExportCredentialsCommand, + }, ] # If you want to add new values to prompt, update this list here. @@ -117,12 +118,14 @@ def _run_main(self, parsed_args, parsed_globals): config = {} for config_name, prompt_text in self.VALUES_TO_PROMPT: current_value = config.get(config_name) - new_value = self._prompter.get_value(current_value, config_name, - prompt_text) + new_value = self._prompter.get_value( + current_value, config_name, prompt_text + ) if new_value is not None and new_value != current_value: new_values[config_name] = new_value config_filename = os.path.expanduser( - self._session.get_config_variable('config_file')) + self._session.get_config_variable('config_file') + ) if new_values: profile = self._session.profile self._write_out_creds_file_values(new_values, profile) @@ -140,15 +143,18 @@ def _write_out_creds_file_values(self, new_values, profile_name): credential_file_values = {} if 'aws_access_key_id' in new_values: credential_file_values['aws_access_key_id'] = new_values.pop( - 'aws_access_key_id') + 'aws_access_key_id' + ) if 'aws_secret_access_key' in new_values: credential_file_values['aws_secret_access_key'] = new_values.pop( - 'aws_secret_access_key') + 'aws_secret_access_key' + ) if credential_file_values: if profile_name is not None: credential_file_values['__section__'] = profile_name shared_credentials_filename = os.path.expanduser( - self._session.get_config_variable('credentials_file')) + self._session.get_config_variable('credentials_file') + ) self._config_writer.update_config( - credential_file_values, - shared_credentials_filename) + credential_file_values, shared_credentials_filename + ) diff --git a/awscli/customizations/configure/exportcreds.py b/awscli/customizations/configure/exportcreds.py index 2358efca5405..07c61baab7c1 100644 --- a/awscli/customizations/configure/exportcreds.py +++ b/awscli/customizations/configure/exportcreds.py @@ -10,21 +10,21 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os -import io -import sys import csv +import io import json -from datetime import datetime +import os +import sys from collections import namedtuple +from datetime import datetime from awscli.customizations.commands import BasicCommand from awscli.customizations.exceptions import ConfigurationError - # Takes botocore's ReadOnlyCredentials and exposes an expiry_time. Credentials = namedtuple( - 'Credentials', ['access_key', 'secret_key', 'token', 'expiry_time']) + 'Credentials', ['access_key', 'secret_key', 'token', 'expiry_time'] +) def convert_botocore_credentials(credentials): @@ -47,7 +47,6 @@ def convert_botocore_credentials(credentials): class BaseCredentialFormatter(object): - FORMAT = None DOCUMENTATION = "" @@ -61,27 +60,28 @@ def display_credentials(self, credentials): class BasePerLineFormatter(BaseCredentialFormatter): - _VAR_FORMAT = 'export {var_name}={var_value}' def display_credentials(self, credentials): - output = ( - self._format_line('AWS_ACCESS_KEY_ID', credentials.access_key) + - self._format_line('AWS_SECRET_ACCESS_KEY', credentials.secret_key)) + output = self._format_line( + 'AWS_ACCESS_KEY_ID', credentials.access_key + ) + self._format_line('AWS_SECRET_ACCESS_KEY', credentials.secret_key) if credentials.token is not None: output += self._format_line('AWS_SESSION_TOKEN', credentials.token) if credentials.expiry_time is not None: output += self._format_line( - 'AWS_CREDENTIAL_EXPIRATION', credentials.expiry_time) + 'AWS_CREDENTIAL_EXPIRATION', credentials.expiry_time + ) self._stream.write(output) def _format_line(self, var_name, var_value): - return self._VAR_FORMAT.format( - var_name=var_name, var_value=var_value) + '\n' + return ( + self._VAR_FORMAT.format(var_name=var_name, var_value=var_value) + + '\n' + ) class BashEnvVarFormatter(BasePerLineFormatter): - FORMAT = 'env' DOCUMENTATION = ( "Display credentials as exported shell variables: " @@ -91,7 +91,6 @@ class BashEnvVarFormatter(BasePerLineFormatter): class BashNoExportEnvFormatter(BasePerLineFormatter): - FORMAT = 'env-no-export' DOCUMENTATION = ( "Display credentials as non-exported shell variables: " @@ -101,7 +100,6 @@ class BashNoExportEnvFormatter(BasePerLineFormatter): class PowershellFormatter(BasePerLineFormatter): - FORMAT = 'powershell' DOCUMENTATION = ( 'Display credentials as PowerShell environment variables: ' @@ -111,7 +109,6 @@ class PowershellFormatter(BasePerLineFormatter): class WindowsCmdFormatter(BasePerLineFormatter): - FORMAT = 'windows-cmd' DOCUMENTATION = ( 'Display credentials as Windows cmd environment variables: ' @@ -121,7 +118,6 @@ class WindowsCmdFormatter(BasePerLineFormatter): class CredentialProcessFormatter(BaseCredentialFormatter): - FORMAT = 'process' DOCUMENTATION = ( "Display credentials as JSON output, in the schema " @@ -149,15 +145,23 @@ def display_credentials(self, credentials): SUPPORTED_FORMATS = { - format_cls.FORMAT: format_cls for format_cls in - [CredentialProcessFormatter, BashEnvVarFormatter, BashNoExportEnvFormatter, - PowershellFormatter, WindowsCmdFormatter] + format_cls.FORMAT: format_cls + for format_cls in [ + CredentialProcessFormatter, + BashEnvVarFormatter, + BashNoExportEnvFormatter, + PowershellFormatter, + WindowsCmdFormatter, + ] } def generate_docs(formats): - lines = ['The output format to display credentials. ' - 'Defaults to `process`. ', '

    '] + lines = [ + 'The output format to display credentials. ' + 'Defaults to `process`. ', + '
      ', + ] for name, cls in formats.items(): line = f'
    • ``{name}`` - {cls.DOCUMENTATION}
    • ' lines.append(line) @@ -166,7 +170,6 @@ def generate_docs(formats): class ConfigureExportCredentialsCommand(BasicCommand): - NAME = 'export-credentials' SYNOPSIS = 'aws configure export-credentials --profile profile-name' DESCRIPTION = ( @@ -179,11 +182,13 @@ class ConfigureExportCredentialsCommand(BasicCommand): "``--output`` options." ) ARG_TABLE = [ - {'name': 'format', - 'help_text': generate_docs(SUPPORTED_FORMATS), - 'action': 'store', - 'choices': list(SUPPORTED_FORMATS), - 'default': CredentialProcessFormatter.FORMAT}, + { + 'name': 'format', + 'help_text': generate_docs(SUPPORTED_FORMATS), + 'action': 'store', + 'choices': list(SUPPORTED_FORMATS), + 'default': CredentialProcessFormatter.FORMAT, + }, ] _RECURSION_VAR = '_AWS_CLI_PROFILE_CHAIN' # Two levels is reasonable because you might explicitly run @@ -208,7 +213,8 @@ def __init__(self, session, out_stream=None, error_stream=None, env=None): def _detect_recursion_barrier(self): profile = self._get_current_profile() seen_profiles = self._parse_profile_chain( - self._env.get(self._RECURSION_VAR, '')) + self._env.get(self._RECURSION_VAR, '') + ) if len(seen_profiles) >= self._MAX_RECURSION: raise ConfigurationError( f"Maximum recursive credential process resolution reached " @@ -224,7 +230,8 @@ def _detect_recursion_barrier(self): def _update_recursion_barrier(self): profile = self._get_current_profile() seen_profiles = self._parse_profile_chain( - self._env.get(self._RECURSION_VAR, '')) + self._env.get(self._RECURSION_VAR, '') + ) seen_profiles.append(profile) serialized = self._serialize_to_csv_str(seen_profiles) self._env[self._RECURSION_VAR] = serialized @@ -254,10 +261,12 @@ def _run_main(self, parsed_args, parsed_globals): except Exception as e: original_msg = str(e).strip() raise ConfigurationError( - f"Unable to retrieve credentials: {original_msg}\n") + f"Unable to retrieve credentials: {original_msg}\n" + ) if creds is None: raise ConfigurationError( - "Unable to retrieve credentials: no credentials found") + "Unable to retrieve credentials: no credentials found" + ) creds_with_expiry = convert_botocore_credentials(creds) formatter = SUPPORTED_FORMATS[parsed_args.format](self._out_stream) formatter.display_credentials(creds_with_expiry) diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py index 5edad7e2d933..74c9efb66255 100644 --- a/awscli/customizations/configure/get.py +++ b/awscli/customizations/configure/get.py @@ -10,8 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import sys import logging +import sys from awscli.customizations.commands import BasicCommand @@ -22,15 +22,19 @@ class ConfigureGetCommand(BasicCommand): NAME = 'get' - DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get', - '_description.rst') + DESCRIPTION = BasicCommand.FROM_FILE( + 'configure', 'get', '_description.rst' + ) SYNOPSIS = 'aws configure get varname [--profile profile-name]' EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst') ARG_TABLE = [ - {'name': 'varname', - 'help_text': 'The name of the config value to retrieve.', - 'action': 'store', - 'cli_type_name': 'string', 'positional_arg': True}, + { + 'name': 'varname', + 'help_text': 'The name of the config value to retrieve.', + 'action': 'store', + 'cli_type_name': 'string', + 'positional_arg': True, + }, ] def __init__(self, session, stream=None, error_stream=None): @@ -53,7 +57,7 @@ def _run_main(self, args, parsed_globals): else: value = self._get_dotted_config_value(varname) - LOG.debug(u'Config value retrieved: %s' % value) + LOG.debug('Config value retrieved: %s' % value) if isinstance(value, str): self._stream.write(value) @@ -81,8 +85,9 @@ def _get_dotted_config_value(self, varname): value = full_config.get(section, {}).get(config_name) if value is None: # Try to retrieve it from the profile config. - value = full_config['profiles'].get( - section, {}).get(config_name) + value = ( + full_config['profiles'].get(section, {}).get(config_name) + ) return value if parts[0] == 'profile': @@ -93,7 +98,8 @@ def _get_dotted_config_value(self, varname): # default.emr-dev.emr.instance_profile) If not, go further to check # if varname starts with a known profile name elif parts[0] == 'default' or ( - parts[0] in self._session.full_config['profiles']): + parts[0] in self._session.full_config['profiles'] + ): profile_name = parts[0] config_name = parts[1] remaining = parts[2:] @@ -104,8 +110,11 @@ def _get_dotted_config_value(self, varname): config_name = parts[0] remaining = parts[1:] - value = self._session.full_config['profiles'].get( - profile_name, {}).get(config_name) + value = ( + self._session.full_config['profiles'] + .get(profile_name, {}) + .get(config_name) + ) if len(remaining) == 1: try: value = value.get(remaining[-1]) diff --git a/awscli/customizations/configure/importer.py b/awscli/customizations/configure/importer.py index dc7e6e088ba9..fa2975f24fb1 100644 --- a/awscli/customizations/configure/importer.py +++ b/awscli/customizations/configure/importer.py @@ -10,13 +10,13 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import codecs import os import sys -import codecs -from awscli.customizations.utils import uni_print from awscli.customizations.commands import BasicCommand from awscli.customizations.configure.writer import ConfigFileWriter +from awscli.customizations.utils import uni_print class ConfigureImportCommand(BasicCommand): @@ -33,33 +33,40 @@ class ConfigureImportCommand(BasicCommand): '--profile-prefix test-\n\n' ) ARG_TABLE = [ - {'name': 'csv', - 'required': True, - 'help_text': ( - 'The credentials in CSV format generated by the AWS web console.' - 'The CSV file must contain the "User name", "Access key ID", and ' - '"Secret access key" headers.' - ), - 'cli_type_name': 'string'}, - {'name': 'skip-invalid', - 'dest': 'skip_invalid', - 'help_text': ( - 'Skip entries that are invalid or do not have programmatic ' - 'access instead of failing.' - ), - 'default': False, - 'action': 'store_true'}, - {'name': 'profile-prefix', - 'dest': 'profile_prefix', - 'help_text': ( - 'Adds the specified prefix to the beginning of all profile names.' - ), - 'default': '', - 'cli_type_name': 'string'}, + { + 'name': 'csv', + 'required': True, + 'help_text': ( + 'The credentials in CSV format generated by the AWS web console.' + 'The CSV file must contain the "User name", "Access key ID", and ' + '"Secret access key" headers.' + ), + 'cli_type_name': 'string', + }, + { + 'name': 'skip-invalid', + 'dest': 'skip_invalid', + 'help_text': ( + 'Skip entries that are invalid or do not have programmatic ' + 'access instead of failing.' + ), + 'default': False, + 'action': 'store_true', + }, + { + 'name': 'profile-prefix', + 'dest': 'profile_prefix', + 'help_text': ( + 'Adds the specified prefix to the beginning of all profile names.' + ), + 'default': '', + 'cli_type_name': 'string', + }, ] - def __init__(self, session, csv_parser=None, importer=None, - out_stream=None): + def __init__( + self, session, csv_parser=None, importer=None, out_stream=None + ): super(ConfigureImportCommand, self).__init__(session) if csv_parser is None: csv_parser = CSVCredentialParser() @@ -83,7 +90,8 @@ def _import_csv(self, contents): credentials = self._csv_parser.parse_credentials(contents) for credential in credentials: self._importer.import_credential( - credential, config_path, + credential, + config_path, profile_prefix=self._profile_prefix, ) import_msg = 'Successfully imported %s profile(s)\n' % len(credentials) @@ -189,7 +197,9 @@ class CredentialImporter(object): def __init__(self, writer): self._config_writer = writer - def import_credential(self, credential, credentials_file, profile_prefix=''): + def import_credential( + self, credential, credentials_file, profile_prefix='' + ): name, akid, sak = credential config_profile = { '__section__': profile_prefix + name, diff --git a/awscli/customizations/configure/list.py b/awscli/customizations/configure/list.py index c1f0f342e058..5d44125a113f 100644 --- a/awscli/customizations/configure/list.py +++ b/awscli/customizations/configure/list.py @@ -14,7 +14,7 @@ from awscli.customizations.commands import BasicCommand -from . import ConfigValue, NOT_SET +from . import NOT_SET, ConfigValue class ConfigureListCommand(BasicCommand): @@ -57,10 +57,12 @@ def __init__(self, session, stream=None): self._stream = stream def _run_main(self, args, parsed_globals): - self._display_config_value(ConfigValue('Value', 'Type', 'Location'), - 'Name') - self._display_config_value(ConfigValue('-----', '----', '--------'), - '----') + self._display_config_value( + ConfigValue('Value', 'Type', 'Location'), 'Name' + ) + self._display_config_value( + ConfigValue('-----', '----', '--------'), '----' + ) if parsed_globals and parsed_globals.profile is not None: profile = ConfigValue(self._session.profile, 'manual', '--profile') @@ -77,9 +79,15 @@ def _run_main(self, args, parsed_globals): return 0 def _display_config_value(self, config_value, config_name): - self._stream.write('%10s %24s %16s %s\n' % ( - config_name, config_value.value, config_value.config_type, - config_value.config_variable)) + self._stream.write( + '%10s %24s %16s %s\n' + % ( + config_name, + config_value.value, + config_value.config_type, + config_value.config_variable, + ) + ) def _lookup_credentials(self): # First try it with _lookup_config. It's possible @@ -105,10 +113,12 @@ def _lookup_credentials(self): # visible from botocore.credentials. I think # the credentials.method is sufficient to show # where the credentials are coming from. - access_key = ConfigValue(credentials.access_key, - credentials.method, '') - secret_key = ConfigValue(credentials.secret_key, - credentials.method, '') + access_key = ConfigValue( + credentials.access_key, credentials.method, '' + ) + secret_key = ConfigValue( + credentials.secret_key, credentials.method, '' + ) access_key.mask_value() secret_key.mask_value() return access_key, secret_key @@ -135,14 +145,17 @@ def _lookup_in_env_and_config(self, name): # First try to look up the variable in the env. value = self._session.get_config_variable(name, methods=('env',)) if value is not None: - return ConfigValue(value, 'env', - self._session.session_var_map[name][1]) + return ConfigValue( + value, 'env', self._session.session_var_map[name][1] + ) # Then try to look up the variable in the config file. value = self._session.get_config_variable(name, methods=('config',)) if value is not None: return ConfigValue( - value, 'config-file', - self._session.get_config_variable('config_file')) + value, + 'config-file', + self._session.get_config_variable('config_file'), + ) def _lookup_config(self, name): val = self._lookup_in_env_and_config(name) diff --git a/awscli/customizations/configure/listprofiles.py b/awscli/customizations/configure/listprofiles.py index 411fb858ad8c..1605350365a9 100644 --- a/awscli/customizations/configure/listprofiles.py +++ b/awscli/customizations/configure/listprofiles.py @@ -18,12 +18,8 @@ class ListProfilesCommand(BasicCommand): NAME = 'list-profiles' - DESCRIPTION = ( - 'List the profiles available to the AWS CLI.' - ) - EXAMPLES = ( - 'aws configure list-profiles\n\n' - ) + DESCRIPTION = 'List the profiles available to the AWS CLI.' + EXAMPLES = 'aws configure list-profiles\n\n' def __init__(self, session, out_stream=None): super(ListProfilesCommand, self).__init__(session) diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py index c17f38d955a3..7f45ee8fd318 100644 --- a/awscli/customizations/configure/set.py +++ b/awscli/customizations/configure/set.py @@ -20,25 +20,35 @@ class ConfigureSetCommand(BasicCommand): NAME = 'set' - DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set', - '_description.rst') + DESCRIPTION = BasicCommand.FROM_FILE( + 'configure', 'set', '_description.rst' + ) SYNOPSIS = 'aws configure set varname value [--profile profile-name]' EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst') ARG_TABLE = [ - {'name': 'varname', - 'help_text': 'The name of the config value to set.', - 'action': 'store', - 'cli_type_name': 'string', 'positional_arg': True}, - {'name': 'value', - 'help_text': 'The value to set.', - 'action': 'store', - 'no_paramfile': True, # To disable the default paramfile behavior - 'cli_type_name': 'string', 'positional_arg': True}, + { + 'name': 'varname', + 'help_text': 'The name of the config value to set.', + 'action': 'store', + 'cli_type_name': 'string', + 'positional_arg': True, + }, + { + 'name': 'value', + 'help_text': 'The value to set.', + 'action': 'store', + 'no_paramfile': True, # To disable the default paramfile behavior + 'cli_type_name': 'string', + 'positional_arg': True, + }, ] # Any variables specified in this list will be written to # the ~/.aws/credentials file instead of ~/.aws/config. - _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key', - 'aws_session_token'] + _WRITE_TO_CREDS_FILE = [ + 'aws_access_key_id', + 'aws_secret_access_key', + 'aws_session_token', + ] def __init__(self, session, config_writer=None): super(ConfigureSetCommand, self).__init__(session) diff --git a/awscli/customizations/configure/sso.py b/awscli/customizations/configure/sso.py index fc5feb83d729..1ed9dd1d1af9 100644 --- a/awscli/customizations/configure/sso.py +++ b/awscli/customizations/configure/sso.py @@ -13,37 +13,38 @@ import collections import itertools import json -import os import logging +import os import re import colorama -from botocore import UNSIGNED -from botocore.config import Config -from botocore.configprovider import ConstantProvider -from botocore.exceptions import ProfileNotFound -from botocore.utils import is_valid_endpoint_url - from prompt_toolkit import prompt as ptk_prompt from prompt_toolkit.application import get_app from prompt_toolkit.completion import WordCompleter from prompt_toolkit.formatted_text import FormattedText from prompt_toolkit.styles import Style -from prompt_toolkit.validation import Validator -from prompt_toolkit.validation import ValidationError +from prompt_toolkit.validation import ValidationError, Validator -from awscli.customizations.utils import uni_print from awscli.customizations.configure import ( - profile_to_section, get_section_header, + get_section_header, + profile_to_section, ) from awscli.customizations.configure.writer import ConfigFileWriter -from awscli.customizations.wizard.ui.selectmenu import select_menu from awscli.customizations.sso.utils import ( - do_sso_login, parse_sso_registration_scopes, PrintOnlyHandler, LOGIN_ARGS, + LOGIN_ARGS, BaseSSOCommand, + PrintOnlyHandler, + do_sso_login, + parse_sso_registration_scopes, ) +from awscli.customizations.utils import uni_print +from awscli.customizations.wizard.ui.selectmenu import select_menu from awscli.formatter import CLI_OUTPUT_FORMATS - +from botocore import UNSIGNED +from botocore.config import Config +from botocore.configprovider import ConstantProvider +from botocore.exceptions import ProfileNotFound +from botocore.utils import is_valid_endpoint_url logger = logging.getLogger(__name__) @@ -95,7 +96,8 @@ def validate(self, document): return if not self._is_comma_separated_list(document.text): self._raise_validation_error( - document, 'Scope values must be separated by commas') + document, 'Scope values must be separated by commas' + ) def _is_comma_separated_list(self, value): scopes = value.split(',') @@ -118,21 +120,26 @@ def _create_completer(self, completions): completions = [] completer_kwargs = { 'words': completions, - 'pattern': re.compile(r'\S+') + 'pattern': re.compile(r'\S+'), } if isinstance(completions, dict): completer_kwargs['meta_dict'] = completions completer_kwargs['words'] = list(completions.keys()) return WordCompleter(**completer_kwargs) - def get_value(self, current_value, prompt_text='', - completions=None, validator=None, toolbar=None, - prompt_fmt=None): + def get_value( + self, + current_value, + prompt_text='', + completions=None, + validator=None, + toolbar=None, + prompt_fmt=None, + ): if prompt_fmt is None: prompt_fmt = self._DEFAULT_PROMPT_FORMAT prompt_string = prompt_fmt.format( - prompt_text=prompt_text, - current_value=current_value + prompt_text=prompt_text, current_value=current_value ) prompter_kwargs = { 'validator': validator, @@ -192,7 +199,8 @@ def __init__(self, botocore_session, prompter): self._botocore_session = botocore_session self._prompter = prompter self._sso_sessions = self._botocore_session.full_config.get( - 'sso_sessions', {}) + 'sso_sessions', {} + ) self._sso_session = None self.sso_session_config = {} @@ -204,7 +212,8 @@ def sso_session(self): def sso_session(self, value): self._sso_session = value self.sso_session_config = self._sso_sessions.get( - self._sso_session, {}).copy() + self._sso_session, {} + ).copy() def prompt_for_sso_session(self, required=True): prompt_text = 'SSO session name' @@ -217,7 +226,8 @@ def prompt_for_sso_session(self, required=True): if not required: prompt_fmt = f'{prompt_text} (Recommended): ' sso_session = self._prompt_for( - 'sso_session', prompt_text, + 'sso_session', + prompt_text, completions=sorted(self._sso_sessions), toolbar=self._get_sso_session_toolbar, validator_cls=validator_cls, @@ -229,43 +239,55 @@ def prompt_for_sso_session(self, required=True): def prompt_for_sso_start_url(self): return self._prompt_for( - 'sso_start_url', 'SSO start URL', + 'sso_start_url', + 'SSO start URL', completions=self._get_potential_start_urls(), validator_cls=StartUrlValidator, ) def prompt_for_sso_region(self): return self._prompt_for( - 'sso_region', 'SSO region', + 'sso_region', + 'SSO region', completions=self._get_potential_sso_regions(), validator_cls=RequiredInputValidator, ) def prompt_for_sso_registration_scopes(self): if 'sso_registration_scopes' not in self.sso_session_config: - self.sso_session_config['sso_registration_scopes'] = \ + self.sso_session_config['sso_registration_scopes'] = ( self._DEFAULT_SSO_SCOPE + ) raw_scopes = self._prompt_for( - 'sso_registration_scopes', 'SSO registration scopes', + 'sso_registration_scopes', + 'SSO registration scopes', completions=self._get_potential_sso_registrations_scopes(), validator_cls=ScopesValidator, ) return parse_sso_registration_scopes(raw_scopes) - def _prompt_for(self, config_name, text, - completions=None, validator_cls=None, - toolbar=None, prompt_fmt=None, current_value=None): + def _prompt_for( + self, + config_name, + text, + completions=None, + validator_cls=None, + toolbar=None, + prompt_fmt=None, + current_value=None, + ): if current_value is None: current_value = self.sso_session_config.get(config_name) validator = None if validator_cls: validator = validator_cls(current_value) value = self._prompter.get_value( - current_value, text, + current_value, + text, completions=completions, validator=validator, toolbar=toolbar, - prompt_fmt=prompt_fmt + prompt_fmt=prompt_fmt, ) if value: self.sso_session_config[config_name] = value @@ -275,12 +297,17 @@ def _get_sso_session_toolbar(self): current_input = get_app().current_buffer.document.text if current_input in self._sso_sessions: selected_sso_config = self._sso_sessions[current_input] - return FormattedText([ - ('', self._get_toolbar_border()), - ('', '\n'), - ('bold', f'Configuration for SSO session: {current_input}\n\n'), - ('', json.dumps(selected_sso_config, indent=2)), - ]) + return FormattedText( + [ + ('', self._get_toolbar_border()), + ('', '\n'), + ( + 'bold', + f'Configuration for SSO session: {current_input}\n\n', + ), + ('', json.dumps(selected_sso_config, indent=2)), + ] + ) def _get_toolbar_border(self): horizontal_line_char = '\u2500' @@ -289,8 +316,7 @@ def _get_toolbar_border(self): def _get_potential_start_urls(self): profiles = self._botocore_session.full_config.get('profiles', {}) configs_to_search = itertools.chain( - profiles.values(), - self._sso_sessions.values() + profiles.values(), self._sso_sessions.values() ) potential_start_urls = set() for config_to_search in configs_to_search: @@ -335,14 +361,16 @@ def __init__(self, session, prompter=None, config_writer=None): self._config_writer = config_writer self._sso_sessions = self._session.full_config.get('sso_sessions', {}) self._sso_session_prompter = SSOSessionConfigurationPrompter( - botocore_session=session, prompter=self._prompter, + botocore_session=session, + prompter=self._prompter, ) def _write_sso_configuration(self): self._update_section( section_header=get_section_header( - 'sso-session', self._sso_session_prompter.sso_session), - new_values=self._sso_session_prompter.sso_session_config + 'sso-session', self._sso_session_prompter.sso_session + ), + new_values=self._sso_session_prompter.sso_session_config, ) def _update_section(self, section_header, new_values): @@ -354,7 +382,7 @@ def _update_section(self, section_header, new_values): class ConfigureSSOCommand(BaseSSOConfigurationCommand): NAME = 'sso' - SYNOPSIS = ('aws configure sso [--profile profile-name]') + SYNOPSIS = 'aws configure sso [--profile profile-name]' DESCRIPTION = ( 'The ``aws configure sso`` command interactively prompts for the ' 'configuration values required to create a profile that sources ' @@ -368,10 +396,18 @@ class ConfigureSSOCommand(BaseSSOConfigurationCommand): # TODO: Add CLI parameters to skip prompted values, --start-url, etc. ARG_TABLE = LOGIN_ARGS - def __init__(self, session, prompter=None, selector=None, - config_writer=None, sso_token_cache=None, sso_login=None): + def __init__( + self, + session, + prompter=None, + selector=None, + config_writer=None, + sso_token_cache=None, + sso_login=None, + ): super(ConfigureSSOCommand, self).__init__( - session, prompter=prompter, config_writer=config_writer) + session, prompter=prompter, config_writer=config_writer + ) if selector is None: selector = select_menu self._selector = selector @@ -390,14 +426,13 @@ def __init__(self, session, prompter=None, selector=None, def _set_sso_session_if_configured_in_profile(self): if 'sso_session' in self._profile_config: - self._sso_session_prompter.sso_session = \ - self._profile_config['sso_session'] + self._sso_session_prompter.sso_session = self._profile_config[ + 'sso_session' + ] def _handle_single_account(self, accounts): sso_account_id = accounts[0]['accountId'] - single_account_msg = ( - 'The only AWS account available to you is: {}\n' - ) + single_account_msg = 'The only AWS account available to you is: {}\n' uni_print(single_account_msg.format(sso_account_id)) return sso_account_id @@ -407,7 +442,8 @@ def _handle_multiple_accounts(self, accounts): ) uni_print(available_accounts_msg.format(len(accounts))) selected_account = self._selector( - accounts, display_format=display_account) + accounts, display_format=display_account + ) sso_account_id = selected_account['accountId'] return sso_account_id @@ -444,8 +480,7 @@ def _handle_multiple_roles(self, roles): def _get_all_roles(self, sso, sso_token, sso_account_id): paginator = sso.get_paginator('list_account_roles') results = paginator.paginate( - accountId=sso_account_id, - accessToken=sso_token['accessToken'] + accountId=sso_account_id, accessToken=sso_token['accessToken'] ) return results.build_full_result() @@ -472,24 +507,28 @@ def _prompt_for_profile(self, sso_account_id=None, sso_role_name=None): default_profile = f'{sso_role_name}-{sso_account_id}' validator = RequiredInputValidator(default_profile) profile_name = self._prompter.get_value( - default_profile, text, validator=validator) + default_profile, text, validator=validator + ) return profile_name def _prompt_for_cli_default_region(self): # TODO: figure out a way to get a list of reasonable client regions return self._prompt_for_profile_config( - 'region', 'CLI default client Region') + 'region', 'CLI default client Region' + ) def _prompt_for_cli_output_format(self): return self._prompt_for_profile_config( - 'output', 'CLI default output format', + 'output', + 'CLI default output format', completions=list(CLI_OUTPUT_FORMATS.keys()), ) def _prompt_for_profile_config(self, config_name, text, completions=None): current_value = self._profile_config.get(config_name) new_value = self._prompter.get_value( - current_value, text, + current_value, + text, completions=completions, ) if new_value: @@ -519,7 +558,7 @@ def _run_main(self, parsed_args, parsed_globals): token_cache=self._sso_token_cache, on_pending_authorization=on_pending_authorization, use_device_code=parsed_args.use_device_code, - **sso_registration_args + **sso_registration_args, ) # Construct an SSO client to explore the accounts / roles @@ -546,7 +585,8 @@ def _run_main(self, parsed_args, parsed_globals): def _prompt_for_sso_registration_args(self): sso_session = self._sso_session_prompter.prompt_for_sso_session( - required=False) + required=False + ) if sso_session is None: self._warn_configuring_using_legacy_format() return self._prompt_for_registration_args_with_legacy_format() @@ -554,7 +594,8 @@ def _prompt_for_sso_registration_args(self): self._set_sso_session_in_profile_config(sso_session) if sso_session in self._sso_sessions: return self._get_sso_registration_args_from_sso_config( - sso_session) + sso_session + ) else: return self._prompt_for_registration_args_for_new_sso_session( sso_session=sso_session @@ -564,10 +605,7 @@ def _prompt_for_registration_args_with_legacy_format(self): self._store_sso_session_prompter_answers_to_profile_config() self._set_sso_session_defaults_from_profile_config() start_url, sso_region = self._prompt_for_sso_start_url_and_sso_region() - return { - 'start_url': start_url, - 'sso_region': sso_region - } + return {'start_url': start_url, 'sso_region': sso_region} def _get_sso_registration_args_from_sso_config(self, sso_session): sso_config = self._get_sso_session_config(sso_session) @@ -575,13 +613,15 @@ def _get_sso_registration_args_from_sso_config(self, sso_session): 'session_name': sso_session, 'start_url': sso_config['sso_start_url'], 'sso_region': sso_config['sso_region'], - 'registration_scopes': sso_config.get('registration_scopes') + 'registration_scopes': sso_config.get('registration_scopes'), } def _prompt_for_registration_args_for_new_sso_session(self, sso_session): self._set_sso_session_defaults_from_profile_config() start_url, sso_region = self._prompt_for_sso_start_url_and_sso_region() - scopes = self._sso_session_prompter.prompt_for_sso_registration_scopes() + scopes = ( + self._sso_session_prompter.prompt_for_sso_registration_scopes() + ) return { 'session_name': sso_session, 'start_url': start_url, @@ -591,14 +631,15 @@ def _prompt_for_registration_args_for_new_sso_session(self, sso_session): # using any cached tokens from any previous of attempts to # create/authenticate a new SSO session as part of the configure # sso flow. - 'force_refresh': True + 'force_refresh': True, } def _store_sso_session_prompter_answers_to_profile_config(self): # Wire the SSO session prompter to set config values to the # dictionary used for writing to the profile section - self._sso_session_prompter.sso_session_config = \ + self._sso_session_prompter.sso_session_config = ( self._new_profile_config_values + ) def _set_sso_session_in_profile_config(self, sso_session): self._new_profile_config_values['sso_session'] = sso_session @@ -608,11 +649,13 @@ def _set_sso_session_defaults_from_profile_config(self): # SSO configuration as part of the prompt if a profile was explicitly # provided that already had SSO configuration if 'sso_start_url' in self._profile_config: - self._sso_session_prompter.sso_session_config['sso_start_url'] = \ + self._sso_session_prompter.sso_session_config['sso_start_url'] = ( self._profile_config['sso_start_url'] + ) if 'sso_region' in self._profile_config: - self._sso_session_prompter.sso_session_config['sso_region'] = \ + self._sso_session_prompter.sso_session_config['sso_region'] = ( self._profile_config['sso_region'] + ) def _prompt_for_sso_start_url_and_sso_region(self): start_url = self._sso_session_prompter.prompt_for_sso_start_url() @@ -633,7 +676,8 @@ def _prompt_for_sso_account_and_role(self, sso, sso_token): try: sso_account_id = self._prompt_for_account(sso, sso_token) sso_role_name = self._prompt_for_role( - sso, sso_token, sso_account_id) + sso, sso_token, sso_account_id + ) except sso.exceptions.UnauthorizedException as e: uni_print( 'Unable to list AWS accounts and/or roles. ' @@ -645,7 +689,8 @@ def _write_new_config(self, profile): if self._new_profile_config_values: profile_section = profile_to_section(profile) self._update_section( - profile_section, self._new_profile_config_values) + profile_section, self._new_profile_config_values + ) if self._sso_session_prompter.sso_session: self._write_sso_configuration() @@ -663,7 +708,7 @@ def _print_conclusion(self, configured_for_aws_credentials, profile_name): class ConfigureSSOSessionCommand(BaseSSOConfigurationCommand): NAME = 'sso-session' - SYNOPSIS = ('aws configure sso-session') + SYNOPSIS = 'aws configure sso-session' DESCRIPTION = ( 'The ``aws configure sso-session`` command interactively prompts for ' 'the configuration values required to create a SSO session. ' diff --git a/awscli/customizations/configure/writer.py b/awscli/customizations/configure/writer.py index 4aedabc43792..439d4b5e9f1e 100644 --- a/awscli/customizations/configure/writer.py +++ b/awscli/customizations/configure/writer.py @@ -19,9 +19,7 @@ class ConfigFileWriter(object): SECTION_REGEX = re.compile(r'^\s*\[(?P
      [^]]+)\]') OPTION_REGEX = re.compile( - r'(?P
    ' - ) + ), } PROJECTION_EXPRESSION = { - 'name': 'projection', 'nargs': '+', + 'name': 'projection', + 'nargs': '+', 'help_text': ( '

    A string that identifies one or more attributes to retrieve from ' 'the specified table or index. These attributes can include scalars, ' @@ -71,11 +73,12 @@ '

    For CLI specific syntax see ' 'aws help ddb-expressions

    ' - ) + ), } FILTER_EXPRESSION = { - 'name': 'filter', 'nargs': '+', + 'name': 'filter', + 'nargs': '+', 'help_text': ( '

    A string that contains conditions that DynamoDB applies after the ' 'operation, but before the data is returned to you. Items that do ' @@ -90,11 +93,12 @@ '

    For CLI specific syntax see ' 'aws help ddb-expressions

    ' - ) + ), } CONDITION_EXPRESSION = { - 'name': 'condition', 'nargs': '+', + 'name': 'condition', + 'nargs': '+', 'help_text': ( '

    A condition that must be satisfied in order for a conditional ' 'put operation to succeed.

    ' @@ -106,11 +110,12 @@ '

    For CLI specific syntax see ' 'aws help ddb-expressions

    ' - ) + ), } KEY_CONDITION_EXPRESSION = { - 'name': 'key-condition', 'nargs': '+', + 'name': 'key-condition', + 'nargs': '+', 'help_text': ( '

    The condition that specifies the key value(s) for items to be ' 'retrieved. Must perform an equality test on a single partition key ' @@ -152,7 +157,7 @@ '

    For CLI specific syntax see ' 'aws help ddb-expressions

    ' - ) + ), } ITEMS = { @@ -161,12 +166,15 @@ 'synopsis': '', 'help_text': ( '

    One or more items to put into the table, in YAML format.

    ' - ) + ), } CONSISTENT_READ = { - 'name': 'consistent-read', 'action': 'store_true', 'default': True, - 'group_name': 'consistent_read', 'dest': 'consistent_read', + 'name': 'consistent-read', + 'action': 'store_true', + 'default': True, + 'group_name': 'consistent_read', + 'dest': 'consistent_read', 'help_text': ( '

    Determines the read consistency model: If set to ' '--consistent-read, then the operation uses strongly ' @@ -175,29 +183,36 @@ 'global secondary indexes. If you query a global secondary index ' 'with --consistent-read, you will receive a ' 'ValidationException.

    ' - ) + ), } NO_CONSISTENT_READ = { - 'name': 'no-consistent-read', 'action': 'store_false', 'default': True, - 'group_name': 'consistent_read', 'dest': 'consistent_read', + 'name': 'no-consistent-read', + 'action': 'store_false', + 'default': True, + 'group_name': 'consistent_read', + 'dest': 'consistent_read', } RETURN_CONSUMED_CAPACITY = { - 'name': 'return-consumed-capacity', 'action': 'store_true', - 'default': False, 'group_name': 'return_consumed_capacity', + 'name': 'return-consumed-capacity', + 'action': 'store_true', + 'default': False, + 'group_name': 'return_consumed_capacity', 'dest': 'return_consumed_capacity', 'help_text': ( '

    Will include the aggregate ConsumedCapacity for the ' 'operation. If --index-name is also specified, ' 'then the ConsumedCapacity for each table and secondary ' 'index that was accessed will be returned.

    ' - ) + ), } NO_RETURN_CONSUMED_CAPACITY = { - 'name': 'no-return-consumed-capacity', 'action': 'store_false', - 'default': False, 'group_name': 'return_consumed_capacity', + 'name': 'no-return-consumed-capacity', + 'action': 'store_false', + 'default': False, + 'group_name': 'return_consumed_capacity', 'dest': 'return_consumed_capacity', } diff --git a/awscli/customizations/dynamodb/parser.py b/awscli/customizations/dynamodb/parser.py index f4ce822ee872..adef98aa1791 100644 --- a/awscli/customizations/dynamodb/parser.py +++ b/awscli/customizations/dynamodb/parser.py @@ -13,11 +13,13 @@ from decimal import Decimal import awscli.customizations.dynamodb.ast as ast + from .exceptions import ( - EmptyExpressionError, UnexpectedTokenError, UnknownExpressionError, + EmptyExpressionError, InvalidLiteralValueError, + UnexpectedTokenError, + UnknownExpressionError, ) - from .lexer import Lexer from .types import Binary @@ -57,7 +59,9 @@ def _parse_expression(self): expression = self._parse_simple_expression() identifier_types = [ - 'identifier', 'path_identifier', 'index_identifier' + 'identifier', + 'path_identifier', + 'index_identifier', ] if expression['type'] in identifier_types and self._match('comma'): self._advance() @@ -104,8 +108,11 @@ def _parse_condition_expression(self): return self._parse_function() operand_types = [ - 'literal', 'identifier', 'unquoted_identifier', - 'lbracket', 'lbrace', + 'literal', + 'identifier', + 'unquoted_identifier', + 'lbracket', + 'lbrace', ] if not self._match(operand_types): raise UnknownExpressionError( @@ -174,7 +181,10 @@ def _parse_operand(self): token=self._current, expression=self._expression, expected_type=[ - 'literal', 'lbracket', 'lbrace', 'identifier', + 'literal', + 'lbracket', + 'lbrace', + 'identifier', 'unquoted_identiifer', ], ) @@ -291,7 +301,7 @@ def _parse_literal_map(self): expression=self._expression, message=( 'Keys must be of type `str`, found `%s`' % type(key) - ) + ), ) self._advance_if_match('literal') self._advance_if_match('colon') diff --git a/awscli/customizations/dynamodb/subcommands.py b/awscli/customizations/dynamodb/subcommands.py index d9908a0dc70c..91f4e815ce42 100644 --- a/awscli/customizations/dynamodb/subcommands.py +++ b/awscli/customizations/dynamodb/subcommands.py @@ -10,26 +10,28 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from base64 import b64decode -from decimal import Decimal import logging import sys +from base64 import b64decode +from decimal import Decimal from ruamel.yaml import YAML -from awscli.formatter import YAMLFormatter -from awscli.utils import OutputStreamFactory import awscli.customizations.dynamodb.params as parameters from awscli.customizations.commands import BasicCommand, CustomArgument from awscli.customizations.dynamodb.extractor import AttributeExtractor +from awscli.customizations.dynamodb.formatter import DynamoYAMLDumper from awscli.customizations.dynamodb.transform import ( - ParameterTransformer, TypeSerializer, TypeDeserializer + ParameterTransformer, + TypeDeserializer, + TypeSerializer, ) -from awscli.customizations.dynamodb.formatter import DynamoYAMLDumper -from awscli.customizations.paginate import ensure_paging_params_not_set from awscli.customizations.exceptions import ParamValidationError -from .types import Binary +from awscli.customizations.paginate import ensure_paging_params_not_set +from awscli.formatter import YAMLFormatter +from awscli.utils import OutputStreamFactory +from .types import Binary LOGGER = logging.getLogger(__name__) @@ -39,9 +41,10 @@ def _run_main(self, parsed_args, parsed_globals): factory = self._session.get_component('response_parser_factory') factory.set_parser_defaults(blob_parser=None) self._client = self._session.create_client( - 'dynamodb', region_name=parsed_globals.region, + 'dynamodb', + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) self._transformer = ParameterTransformer() self._serializer = TypeSerializer() @@ -55,8 +58,10 @@ def _serialize(self, operation_name, data): self._client.meta.method_to_api_mapping.get(operation_name) ) self._transformer.transform( - data, operation_model.input_shape, self._serializer.serialize, - 'AttributeValue' + data, + operation_model.input_shape, + self._serializer.serialize, + 'AttributeValue', ) def _deserialize(self, operation_name, data): @@ -65,12 +70,15 @@ def _deserialize(self, operation_name, data): self._client.meta.method_to_api_mapping.get(operation_name) ) self._transformer.transform( - data, operation_model.output_shape, self._deserializer.deserialize, - 'AttributeValue' + data, + operation_model.output_shape, + self._deserializer.deserialize, + 'AttributeValue', ) - def _make_api_call(self, operation_name, client_args, - should_paginate=True): + def _make_api_call( + self, operation_name, client_args, should_paginate=True + ): self._serialize(operation_name, client_args) if self._client.can_paginate(operation_name) and should_paginate: @@ -78,8 +86,10 @@ def _make_api_call(self, operation_name, client_args, response = paginator.paginate(**client_args).build_full_result() else: response = getattr(self._client, operation_name)(**client_args) - if 'ConsumedCapacity' in response and \ - response['ConsumedCapacity'] is None: + if ( + 'ConsumedCapacity' in response + and response['ConsumedCapacity'] is None + ): del response['ConsumedCapacity'] self._deserialize(operation_name, response) return response @@ -98,11 +108,11 @@ def _dump_yaml(self, operation_name, data, parsed_globals): with self._output_stream_factory.get_output_stream() as stream: formatter(operation_name, data, stream) - def _add_expression_args(self, expression_name, expression, args, - substitution_count=0): + def _add_expression_args( + self, expression_name, expression, args, substitution_count=0 + ): result = self._extractor.extract( - ' '.join(expression), - substitution_count + ' '.join(expression), substitution_count ) args[expression_name] = result['expression'] @@ -121,7 +131,9 @@ def _add_expression_args(self, expression_name, expression, args, class PaginatedDDBCommand(DDBCommand): PAGING_ARGS = [ - parameters.STARTING_TOKEN, parameters.MAX_ITEMS, parameters.PAGE_SIZE + parameters.STARTING_TOKEN, + parameters.MAX_ITEMS, + parameters.PAGE_SIZE, ] def _build_arg_table(self): @@ -175,7 +187,10 @@ def _run_main(self, parsed_args, parsed_globals): def _select(self, parsed_args, parsed_globals): output_type = parsed_globals.output - if output_type is not None and output_type not in self._SUPPORTED_OUTPUT_TYPES: + if ( + output_type is not None + and output_type not in self._SUPPORTED_OUTPUT_TYPES + ): raise ParamValidationError( f'{output_type} output format is not supported for ddb commands' ) @@ -201,27 +216,35 @@ def _select(self, parsed_args, parsed_globals): def _get_client_args(self, parsed_args): client_args = super(SelectCommand, self)._get_client_args(parsed_args) - client_args.update({ - 'TableName': parsed_args.table_name, - 'ConsistentRead': parsed_args.consistent_read, - }) + client_args.update( + { + 'TableName': parsed_args.table_name, + 'ConsistentRead': parsed_args.consistent_read, + } + ) substitution_count = 0 if parsed_args.index_name is not None: client_args['IndexName'] = parsed_args.index_name if parsed_args.projection is not None: substitution_count = self._add_expression_args( - 'ProjectionExpression', parsed_args.projection, client_args, + 'ProjectionExpression', + parsed_args.projection, + client_args, substitution_count, ) if parsed_args.filter is not None: substitution_count += self._add_expression_args( - 'FilterExpression', parsed_args.filter, client_args, + 'FilterExpression', + parsed_args.filter, + client_args, substitution_count, ) if parsed_args.key_condition is not None: self._add_expression_args( - 'KeyConditionExpression', parsed_args.key_condition, - client_args, substitution_count, + 'KeyConditionExpression', + parsed_args.key_condition, + client_args, + substitution_count, ) if parsed_args.attributes is not None: select_map = { @@ -245,9 +268,7 @@ def _get_client_args(self, parsed_args): class PutCommand(DDBCommand): NAME = 'put' - DESCRIPTION = ( - '``put`` puts one or more items into a table.' - ) + DESCRIPTION = '``put`` puts one or more items into a table.' ARG_TABLE = [ parameters.TABLE_NAME, parameters.ITEMS, @@ -301,9 +322,7 @@ def _batch_write(self, items, parsed_args): put_requests = [{'PutRequest': {'Item': i}} for i in items] while len(put_requests) > 0: batch_items = put_requests[:batch_size] - client_args['RequestItems'] = { - parsed_args.table_name: batch_items - } + client_args['RequestItems'] = {parsed_args.table_name: batch_items} result = self._make_api_call('batch_write_item', client_args) put_requests = put_requests[batch_size:] @@ -328,6 +347,8 @@ def _get_base_args(self, parsed_args): client_args = {'ReturnConsumedCapacity': 'NONE'} if parsed_args.condition is not None: self._add_expression_args( - 'ConditionExpression', parsed_args.condition, client_args, + 'ConditionExpression', + parsed_args.condition, + client_args, ) return client_args diff --git a/awscli/customizations/dynamodb/transform.py b/awscli/customizations/dynamodb/transform.py index 22987f0605e0..fa6e9c62cf1b 100644 --- a/awscli/customizations/dynamodb/transform.py +++ b/awscli/customizations/dynamodb/transform.py @@ -13,7 +13,8 @@ from collections.abc import Mapping, MutableSequence from awscli.customizations.dynamodb.types import ( - TypeSerializer, TypeDeserializer + TypeDeserializer, + TypeSerializer, ) @@ -30,18 +31,20 @@ def transform(self, params, model, transformation, target_shape): :param target_shape: The name of the shape to apply the transformation to """ - self._transform_parameters( - model, params, transformation, target_shape) + self._transform_parameters(model, params, transformation, target_shape) - def _transform_parameters(self, model, params, transformation, - target_shape): + def _transform_parameters( + self, model, params, transformation, target_shape + ): type_name = model.type_name if type_name in ['structure', 'map', 'list']: getattr(self, '_transform_%s' % type_name)( - model, params, transformation, target_shape) + model, params, transformation, target_shape + ) - def _transform_structure(self, model, params, transformation, - target_shape): + def _transform_structure( + self, model, params, transformation, target_shape + ): if not isinstance(params, Mapping): return for param in params: @@ -52,8 +55,11 @@ def _transform_structure(self, model, params, transformation, params[param] = transformation(params[param]) else: self._transform_parameters( - member_model, params[param], transformation, - target_shape) + member_model, + params[param], + transformation, + target_shape, + ) def _transform_map(self, model, params, transformation, target_shape): if not isinstance(params, Mapping): @@ -65,7 +71,8 @@ def _transform_map(self, model, params, transformation, target_shape): params[key] = transformation(value) else: self._transform_parameters( - value_model, params[key], transformation, target_shape) + value_model, params[key], transformation, target_shape + ) def _transform_list(self, model, params, transformation, target_shape): if not isinstance(params, MutableSequence): @@ -77,4 +84,5 @@ def _transform_list(self, model, params, transformation, target_shape): params[i] = transformation(item) else: self._transform_parameters( - member_model, params[i], transformation, target_shape) + member_model, params[i], transformation, target_shape + ) diff --git a/awscli/customizations/dynamodb/types.py b/awscli/customizations/dynamodb/types.py index 9bb429673ccd..25548f21526f 100644 --- a/awscli/customizations/dynamodb/types.py +++ b/awscli/customizations/dynamodb/types.py @@ -11,9 +11,15 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from collections.abc import Mapping, Set -from decimal import Decimal, Context, Clamped -from decimal import Overflow, Inexact, Underflow, Rounded - +from decimal import ( + Clamped, + Context, + Decimal, + Inexact, + Overflow, + Rounded, + Underflow, +) STRING = 'S' NUMBER = 'N' @@ -28,8 +34,10 @@ DYNAMODB_CONTEXT = Context( - Emin=-128, Emax=126, prec=38, - traps=[Clamped, Overflow, Inexact, Rounded, Underflow] + Emin=-128, + Emax=126, + prec=38, + traps=[Clamped, Overflow, Inexact, Rounded, Underflow], ) @@ -42,10 +50,13 @@ class Binary(object): binary data for item in DynamoDB. It is essentially a wrapper around binary. Unicode and Python 3 string types are not allowed. """ + def __init__(self, value): if not isinstance(value, BINARY_TYPES): - raise TypeError('Value must be of the following types: %s.' % - ', '.join([str(t) for t in BINARY_TYPES])) + raise TypeError( + 'Value must be of the following types: %s.' + % ', '.join([str(t) for t in BINARY_TYPES]) + ) self.value = value def __eq__(self, other): @@ -68,6 +79,7 @@ def __hash__(self): class TypeSerializer(object): """This class serializes Python data types to DynamoDB types.""" + def serialize(self, value): """The method to serialize the Python data types. :param value: A python value to be serialized to DynamoDB. Here are @@ -150,7 +162,8 @@ def _is_number(self, value): return True elif isinstance(value, float): raise TypeError( - 'Float types are not supported. Use Decimal types instead.') + 'Float types are not supported. Use Decimal types instead.' + ) return False def _is_string(self, value): @@ -226,6 +239,7 @@ def _serialize_m(self, value): class TypeDeserializer(object): """This class deserializes DynamoDB types to Python types.""" + def deserialize(self, value): """The method to deserialize the DynamoDB data types. :param value: A DynamoDB value to be deserialized to a pythonic value. @@ -246,15 +260,19 @@ def deserialize(self, value): """ if not value: - raise TypeError('Value must be a nonempty dictionary whose key ' - 'is a valid dynamodb type.') + raise TypeError( + 'Value must be a nonempty dictionary whose key ' + 'is a valid dynamodb type.' + ) dynamodb_type = list(value.keys())[0] try: deserializer = getattr( - self, '_deserialize_%s' % dynamodb_type.lower()) + self, '_deserialize_%s' % dynamodb_type.lower() + ) except AttributeError: raise TypeError( - 'Dynamodb type %s is not supported' % dynamodb_type) + 'Dynamodb type %s is not supported' % dynamodb_type + ) return deserializer(value[dynamodb_type]) def _deserialize_null(self, value): diff --git a/awscli/customizations/ec2/addcount.py b/awscli/customizations/ec2/addcount.py index 16a2a16ae3ed..f2e0333b49fe 100644 --- a/awscli/customizations/ec2/addcount.py +++ b/awscli/customizations/ec2/addcount.py @@ -12,28 +12,31 @@ # language governing permissions and limitations under the License. import logging -from botocore import model - from awscli.arguments import BaseCLIArgument - +from botocore import model logger = logging.getLogger(__name__) DEFAULT = 1 -HELP = """ +HELP = ( + """

    Number of instances to launch. If a single number is provided, it is assumed to be the minimum to launch (defaults to %d). If a range is provided in the form min:max then the first number is interpreted as the minimum number of instances to launch and the second -is interpreted as the maximum number of instances to launch.

    """ % DEFAULT +is interpreted as the maximum number of instances to launch.

    """ + % DEFAULT +) def register_count_events(event_handler): event_handler.register( - 'building-argument-table.ec2.run-instances', ec2_add_count) + 'building-argument-table.ec2.run-instances', ec2_add_count + ) event_handler.register( - 'before-parameter-build.ec2.RunInstances', set_default_count) + 'before-parameter-build.ec2.RunInstances', set_default_count + ) def ec2_add_count(argument_table, **kwargs): @@ -48,7 +51,6 @@ def set_default_count(params, **kwargs): class CountArgument(BaseCLIArgument): - def __init__(self, name): self.argument_model = model.Shape('CountArgument', {'type': 'string'}) self._name = name @@ -76,8 +78,11 @@ def documentation(self): def add_to_parser(self, parser): # We do NOT set default value here. It will be set later by event hook. - parser.add_argument(self.cli_name, metavar=self.py_name, - help='Number of instances to launch') + parser.add_argument( + self.cli_name, + metavar=self.py_name, + help='Number of instances to launch', + ) def add_to_params(self, parameters, value): if value is None: @@ -91,6 +96,8 @@ def add_to_params(self, parameters, value): parameters['MinCount'] = int(minstr) parameters['MaxCount'] = int(maxstr) except: - msg = ('count parameter should be of ' - 'form min[:max] (e.g. 1 or 1:10)') + msg = ( + 'count parameter should be of ' + 'form min[:max] (e.g. 1 or 1:10)' + ) raise ParamValidationError(msg) diff --git a/awscli/customizations/ec2/bundleinstance.py b/awscli/customizations/ec2/bundleinstance.py index cc6802d6f47c..3d1dc33c971b 100644 --- a/awscli/customizations/ec2/bundleinstance.py +++ b/awscli/customizations/ec2/bundleinstance.py @@ -11,11 +11,11 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import logging -from hashlib import sha1 -import hmac import base64 import datetime +import hmac +import logging +from hashlib import sha1 from awscli.arguments import CustomArgument from awscli.customizations.exceptions import ParamValidationError @@ -26,15 +26,18 @@ # bundle-instance operation: # --bucket: -BUCKET_DOCS = ('The bucket in which to store the AMI. ' - 'You can specify a bucket that you already own or ' - 'a new bucket that Amazon EC2 creates on your behalf. ' - 'If you specify a bucket that belongs to someone else, ' - 'Amazon EC2 returns an error.') +BUCKET_DOCS = ( + 'The bucket in which to store the AMI. ' + 'You can specify a bucket that you already own or ' + 'a new bucket that Amazon EC2 creates on your behalf. ' + 'If you specify a bucket that belongs to someone else, ' + 'Amazon EC2 returns an error.' +) # --prefix: -PREFIX_DOCS = ('The prefix for the image component names being stored ' - 'in Amazon S3.') +PREFIX_DOCS = ( + 'The prefix for the image component names being stored ' 'in Amazon S3.' +) # --owner-akid OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.' @@ -53,13 +56,16 @@ "sections about policy construction and signatures in the " '' - 'Amazon Simple Storage Service Developer Guide.') + 'Amazon Simple Storage Service Developer Guide.' +) # --owner-sak -OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the ' - 'Amazon S3 bucket specified in the --bucket ' - 'parameter. This parameter is required so that a ' - 'signature can be computed for the policy.') +OWNER_SAK_DOCS = ( + 'The AWS secret access key for the owner of the ' + 'Amazon S3 bucket specified in the --bucket ' + 'parameter. This parameter is required so that a ' + 'signature can be computed for the policy.' +) def _add_params(argument_table, **kwargs): @@ -68,25 +74,27 @@ def _add_params(argument_table, **kwargs): # argparse if they only supply scalar params. storage_arg = argument_table['storage'] storage_arg.required = False - arg = BundleArgument(storage_param='Bucket', - name='bucket', - help_text=BUCKET_DOCS) + arg = BundleArgument( + storage_param='Bucket', name='bucket', help_text=BUCKET_DOCS + ) argument_table['bucket'] = arg - arg = BundleArgument(storage_param='Prefix', - name='prefix', - help_text=PREFIX_DOCS) + arg = BundleArgument( + storage_param='Prefix', name='prefix', help_text=PREFIX_DOCS + ) argument_table['prefix'] = arg - arg = BundleArgument(storage_param='AWSAccessKeyId', - name='owner-akid', - help_text=OWNER_AKID_DOCS) + arg = BundleArgument( + storage_param='AWSAccessKeyId', + name='owner-akid', + help_text=OWNER_AKID_DOCS, + ) argument_table['owner-akid'] = arg - arg = BundleArgument(storage_param='_SAK', - name='owner-sak', - help_text=OWNER_SAK_DOCS) + arg = BundleArgument( + storage_param='_SAK', name='owner-sak', help_text=OWNER_SAK_DOCS + ) argument_table['owner-sak'] = arg - arg = BundleArgument(storage_param='UploadPolicy', - name='policy', - help_text=POLICY_DOCS) + arg = BundleArgument( + storage_param='UploadPolicy', name='policy', help_text=POLICY_DOCS + ) argument_table['policy'] = arg @@ -97,21 +105,24 @@ def _check_args(parsed_args, **kwargs): logger.debug(parsed_args) arg_dict = vars(parsed_args) if arg_dict['storage']: - for key in ('bucket', 'prefix', 'owner_akid', - 'owner_sak', 'policy'): + for key in ('bucket', 'prefix', 'owner_akid', 'owner_sak', 'policy'): if arg_dict[key]: - msg = ('Mixing the --storage option ' - 'with the simple, scalar options is ' - 'not recommended.') + msg = ( + 'Mixing the --storage option ' + 'with the simple, scalar options is ' + 'not recommended.' + ) raise ParamValidationError(msg) -POLICY = ('{{"expiration": "{expires}",' - '"conditions": [' - '{{"bucket": "{bucket}"}},' - '{{"acl": "ec2-bundle-read"}},' - '["starts-with", "$key", "{prefix}"]' - ']}}' - ) + +POLICY = ( + '{{"expiration": "{expires}",' + '"conditions": [' + '{{"bucket": "{bucket}"}},' + '{{"acl": "ec2-bundle-read"}},' + '["starts-with", "$key", "{prefix}"]' + ']}}' +) def _generate_policy(params): @@ -120,9 +131,9 @@ def _generate_policy(params): delta = datetime.timedelta(hours=24) expires = datetime.datetime.utcnow() + delta expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - policy = POLICY.format(expires=expires_iso, - bucket=params['Bucket'], - prefix=params['Prefix']) + policy = POLICY.format( + expires=expires_iso, bucket=params['Bucket'], prefix=params['Prefix'] + ) params['UploadPolicy'] = policy.encode('utf-8') @@ -163,7 +174,6 @@ def register_bundleinstance(event_handler): class BundleArgument(CustomArgument): - def __init__(self, storage_param, *args, **kwargs): super(BundleArgument, self).__init__(*args, **kwargs) self._storage_param = storage_param diff --git a/awscli/customizations/ec2/decryptpassword.py b/awscli/customizations/ec2/decryptpassword.py index b948c7051f2d..b2d8737decd6 100644 --- a/awscli/customizations/ec2/decryptpassword.py +++ b/awscli/customizations/ec2/decryptpassword.py @@ -15,9 +15,9 @@ import os from awscrt.crypto import RSA, RSASignatureAlgorithm -from botocore import model from awscli.arguments import BaseCLIArgument +from botocore import model logger = logging.getLogger(__name__) @@ -27,22 +27,25 @@ password data sent from EC2 will be decrypted before display.

    """ -def ec2_add_priv_launch_key(argument_table, operation_model, session, - **kwargs): +def ec2_add_priv_launch_key( + argument_table, operation_model, session, **kwargs +): """ This handler gets called after the argument table for the operation has been created. It's job is to add the ``priv-launch-key`` parameter. """ argument_table['priv-launch-key'] = LaunchKeyArgument( - session, operation_model, 'priv-launch-key') + session, operation_model, 'priv-launch-key' + ) class LaunchKeyArgument(BaseCLIArgument): - def __init__(self, session, operation_model, name): self._session = session - self.argument_model = model.Shape('LaunchKeyArgument', {'type': 'string'}) + self.argument_model = model.Shape( + 'LaunchKeyArgument', {'type': 'string'} + ) self._operation_model = operation_model self._name = name self._key_path = None @@ -65,8 +68,9 @@ def documentation(self): return HELP def add_to_parser(self, parser): - parser.add_argument(self.cli_name, dest=self.py_name, - help='SSH Private Key file') + parser.add_argument( + self.cli_name, dest=self.py_name, help='SSH Private Key file' + ) def add_to_params(self, parameters, value): """ @@ -81,13 +85,17 @@ def add_to_params(self, parameters, value): if os.path.isfile(path): self._key_path = path service_id = self._operation_model.service_model.service_id - event = 'after-call.%s.%s' % (service_id.hyphenize(), - self._operation_model.name) + event = 'after-call.%s.%s' % ( + service_id.hyphenize(), + self._operation_model.name, + ) self._session.register(event, self._decrypt_password_data) else: - msg = ('priv-launch-key should be a path to the ' - 'local SSH private key file used to launch ' - 'the instance.') + msg = ( + 'priv-launch-key should be a path to the ' + 'local SSH private key file used to launch ' + 'the instance.' + ) raise ValueError(msg) def _decrypt_password_data(self, parsed, **kwargs): @@ -108,12 +116,16 @@ def _decrypt_password_data(self, parsed, **kwargs): pk_bytes = pk_file.read() private_key = RSA.new_private_key_from_pem_data(pk_bytes) value = base64.b64decode(value) - value = private_key.decrypt(RSASignatureAlgorithm.PKCS1_5_SHA256, value) + value = private_key.decrypt( + RSASignatureAlgorithm.PKCS1_5_SHA256, value + ) logger.debug(parsed) parsed['PasswordData'] = value.decode('utf-8') logger.debug(parsed) except Exception: logger.debug('Unable to decrypt PasswordData', exc_info=True) - msg = ('Unable to decrypt password data using ' - 'provided private key file.') + msg = ( + 'Unable to decrypt password data using ' + 'provided private key file.' + ) raise ValueError(msg) diff --git a/awscli/customizations/ec2/paginate.py b/awscli/customizations/ec2/paginate.py index 9a555eed8190..bf016dbe30be 100644 --- a/awscli/customizations/ec2/paginate.py +++ b/awscli/customizations/ec2/paginate.py @@ -17,14 +17,13 @@ def register_ec2_page_size_injector(event_emitter): class EC2PageSizeInjector(object): - # Operations to auto-paginate and their specific whitelists. # Format: # Key: Operation # Value: List of parameters to add to whitelist for that operation. TARGET_OPERATIONS = { "describe-volumes": [], - "describe-snapshots": ['OwnerIds', 'RestorableByUserIds'] + "describe-snapshots": ['OwnerIds', 'RestorableByUserIds'], } # Parameters which should be whitelisted for every operation. diff --git a/awscli/customizations/ec2/protocolarg.py b/awscli/customizations/ec2/protocolarg.py index a4cb4329836b..5f20d23f92b4 100644 --- a/awscli/customizations/ec2/protocolarg.py +++ b/awscli/customizations/ec2/protocolarg.py @@ -31,7 +31,7 @@ def _fix_args(params, **kwargs): def register_protocol_args(cli): - cli.register('before-parameter-build.ec2.CreateNetworkAclEntry', - _fix_args) - cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry', - _fix_args) + cli.register('before-parameter-build.ec2.CreateNetworkAclEntry', _fix_args) + cli.register( + 'before-parameter-build.ec2.ReplaceNetworkAclEntry', _fix_args + ) diff --git a/awscli/customizations/ec2/runinstances.py b/awscli/customizations/ec2/runinstances.py index 154b238af182..acdf4c401526 100644 --- a/awscli/customizations/ec2/runinstances.py +++ b/awscli/customizations/ec2/runinstances.py @@ -22,6 +22,7 @@ ``--network-interfaces`` complex argument. This just makes two of the most commonly used features available more easily. """ + from awscli.arguments import CustomArgument from awscli.customizations.exceptions import ParamValidationError @@ -31,37 +32,46 @@ 'or instance. You can specify this multiple times to assign multiple ' 'secondary IP addresses. If you want additional private IP addresses ' 'but do not need a specific address, use the ' - '--secondary-private-ip-address-count option.') + '--secondary-private-ip-address-count option.' +) # --secondary-private-ip-address-count SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = ( '[EC2-VPC] The number of secondary IP addresses to assign to ' - 'the network interface or instance.') + 'the network interface or instance.' +) # --associate-public-ip-address ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = ( '[EC2-VPC] If specified a public IP address will be assigned ' - 'to the new instance in a VPC.') + 'to the new instance in a VPC.' +) def _add_params(argument_table, **kwargs): arg = SecondaryPrivateIpAddressesArgument( name='secondary-private-ip-addresses', - help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS) + help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS, + ) argument_table['secondary-private-ip-addresses'] = arg arg = SecondaryPrivateIpAddressCountArgument( name='secondary-private-ip-address-count', - help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS) + help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS, + ) argument_table['secondary-private-ip-address-count'] = arg arg = AssociatePublicIpAddressArgument( name='associate-public-ip-address', help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS, - action='store_true', group_name='associate_public_ip') + action='store_true', + group_name='associate_public_ip', + ) argument_table['associate-public-ip-address'] = arg arg = NoAssociatePublicIpAddressArgument( name='no-associate-public-ip-address', help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS, - action='store_false', group_name='associate_public_ip') + action='store_false', + group_name='associate_public_ip', + ) argument_table['no-associate-public-ip-address'] = arg @@ -71,13 +81,17 @@ def _check_args(parsed_args, **kwargs): # raise an error. arg_dict = vars(parsed_args) if arg_dict['network_interfaces']: - for key in ('secondary_private_ip_addresses', - 'secondary_private_ip_address_count', - 'associate_public_ip_address'): + for key in ( + 'secondary_private_ip_addresses', + 'secondary_private_ip_address_count', + 'associate_public_ip_address', + ): if arg_dict[key]: - msg = ('Mixing the --network-interfaces option ' - 'with the simple, scalar options is ' - 'not supported.') + msg = ( + 'Mixing the --network-interfaces option ' + 'with the simple, scalar options is ' + 'not supported.' + ) raise ParamValidationError(msg) @@ -96,7 +110,7 @@ def _fix_args(params, **kwargs): network_interface_params = [ 'PrivateIpAddresses', 'SecondaryPrivateIpAddressCount', - 'AssociatePublicIpAddress' + 'AssociatePublicIpAddress', ] if 'NetworkInterfaces' in params: interface = params['NetworkInterfaces'][0] @@ -108,8 +122,10 @@ def _fix_args(params, **kwargs): interface['Groups'] = params['SecurityGroupIds'] del params['SecurityGroupIds'] if 'PrivateIpAddress' in params: - ip_addr = {'PrivateIpAddress': params['PrivateIpAddress'], - 'Primary': True} + ip_addr = { + 'PrivateIpAddress': params['PrivateIpAddress'], + 'Primary': True, + } interface['PrivateIpAddresses'] = [ip_addr] del params['PrivateIpAddress'] if 'Ipv6AddressCount' in params: @@ -149,41 +165,41 @@ def _build_network_interfaces(params, key, value): class SecondaryPrivateIpAddressesArgument(CustomArgument): - def add_to_parser(self, parser, cli_name=None): - parser.add_argument(self.cli_name, dest=self.py_name, - default=self._default, nargs='*') + parser.add_argument( + self.cli_name, dest=self.py_name, default=self._default, nargs='*' + ) def add_to_params(self, parameters, value): if value: value = [{'PrivateIpAddress': v, 'Primary': False} for v in value] - _build_network_interfaces( - parameters, 'PrivateIpAddresses', value) + _build_network_interfaces(parameters, 'PrivateIpAddresses', value) class SecondaryPrivateIpAddressCountArgument(CustomArgument): - def add_to_parser(self, parser, cli_name=None): - parser.add_argument(self.cli_name, dest=self.py_name, - default=self._default, type=int) + parser.add_argument( + self.cli_name, dest=self.py_name, default=self._default, type=int + ) def add_to_params(self, parameters, value): if value: _build_network_interfaces( - parameters, 'SecondaryPrivateIpAddressCount', value) + parameters, 'SecondaryPrivateIpAddressCount', value + ) class AssociatePublicIpAddressArgument(CustomArgument): - def add_to_params(self, parameters, value): if value is True: _build_network_interfaces( - parameters, 'AssociatePublicIpAddress', value) + parameters, 'AssociatePublicIpAddress', value + ) class NoAssociatePublicIpAddressArgument(CustomArgument): - def add_to_params(self, parameters, value): if value is False: _build_network_interfaces( - parameters, 'AssociatePublicIpAddress', value) + parameters, 'AssociatePublicIpAddress', value + ) diff --git a/awscli/customizations/ec2/secgroupsimplify.py b/awscli/customizations/ec2/secgroupsimplify.py index 1debca5c76ec..af57e1e79dfe 100644 --- a/awscli/customizations/ec2/secgroupsimplify.py +++ b/awscli/customizations/ec2/secgroupsimplify.py @@ -27,8 +27,7 @@ def _add_params(argument_table, **kwargs): - arg = ProtocolArgument('protocol', - help_text=PROTOCOL_DOCS) + arg = ProtocolArgument('protocol', help_text=PROTOCOL_DOCS) argument_table['protocol'] = arg argument_table['ip-protocol']._UNDOCUMENTED = True @@ -43,13 +42,11 @@ def _add_params(argument_table, **kwargs): argument_table['cidr'] = arg argument_table['cidr-ip']._UNDOCUMENTED = True - arg = SourceGroupArgument('source-group', - help_text=SOURCEGROUP_DOCS) + arg = SourceGroupArgument('source-group', help_text=SOURCEGROUP_DOCS) argument_table['source-group'] = arg argument_table['source-security-group-name']._UNDOCUMENTED = True - arg = GroupOwnerArgument('group-owner', - help_text=GROUPOWNER_DOCS) + arg = GroupOwnerArgument('group-owner', help_text=GROUPOWNER_DOCS) argument_table['group-owner'] = arg argument_table['source-security-group-owner-id']._UNDOCUMENTED = True @@ -60,11 +57,12 @@ def _check_args(parsed_args, **kwargs): # raise an error. arg_dict = vars(parsed_args) if arg_dict['ip_permissions']: - for key in ('protocol', 'port', 'cidr', - 'source_group', 'group_owner'): + for key in ('protocol', 'port', 'cidr', 'source_group', 'group_owner'): if arg_dict[key]: - msg = ('The --%s option is not compatible ' - 'with the --ip-permissions option ') % key + msg = ( + 'The --%s option is not compatible ' + 'with the --ip-permissions option ' + ) % key raise ParamValidationError(msg) @@ -72,21 +70,29 @@ def _add_docs(help_command, **kwargs): doc = help_command.doc doc.style.new_paragraph() doc.style.start_note() - msg = ('To specify multiple rules in a single command ' - 'use the --ip-permissions option') + msg = ( + 'To specify multiple rules in a single command ' + 'use the --ip-permissions option' + ) doc.include_doc_string(msg) doc.style.end_note() EVENTS = [ - ('building-argument-table.ec2.authorize-security-group-ingress', - _add_params), - ('building-argument-table.ec2.authorize-security-group-egress', - _add_params), + ( + 'building-argument-table.ec2.authorize-security-group-ingress', + _add_params, + ), + ( + 'building-argument-table.ec2.authorize-security-group-egress', + _add_params, + ), ('building-argument-table.ec2.revoke-security-group-ingress', _add_params), ('building-argument-table.ec2.revoke-security-group-egress', _add_params), - ('operation-args-parsed.ec2.authorize-security-group-ingress', - _check_args), + ( + 'operation-args-parsed.ec2.authorize-security-group-ingress', + _check_args, + ), ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args), ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args), ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args), @@ -95,25 +101,31 @@ def _add_docs(help_command, **kwargs): ('doc-description.ec2.revoke-security-group-ingress', _add_docs), ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs), ] -PROTOCOL_DOCS = ('

    The IP protocol: tcp | ' - 'udp | icmp

    ' - '

    (VPC only) Use all to specify all protocols.

    ' - '

    If this argument is provided without also providing the ' - 'port argument, then it will be applied to all ' - 'ports for the specified protocol.

    ') -PORT_DOCS = ('

    For TCP or UDP: The range of ports to allow.' - ' A single integer or a range (min-max).

    ' - '

    For ICMP: A single integer or a range (type-code)' - ' representing the ICMP type' - ' number and the ICMP code number respectively.' - ' A value of -1 indicates all ICMP codes for' - ' all ICMP types. A value of -1 just for type' - ' indicates all ICMP codes for the specified ICMP type.

    ') +PROTOCOL_DOCS = ( + '

    The IP protocol: tcp | ' + 'udp | icmp

    ' + '

    (VPC only) Use all to specify all protocols.

    ' + '

    If this argument is provided without also providing the ' + 'port argument, then it will be applied to all ' + 'ports for the specified protocol.

    ' +) +PORT_DOCS = ( + '

    For TCP or UDP: The range of ports to allow.' + ' A single integer or a range (min-max).

    ' + '

    For ICMP: A single integer or a range (type-code)' + ' representing the ICMP type' + ' number and the ICMP code number respectively.' + ' A value of -1 indicates all ICMP codes for' + ' all ICMP types. A value of -1 just for type' + ' indicates all ICMP codes for the specified ICMP type.

    ' +) CIDR_DOCS = '

    The IPv4 address range, in CIDR format.

    ' -SOURCEGROUP_DOCS = ('

    The name or ID of the source security group.

    ') -GROUPOWNER_DOCS = ('

    The AWS account ID that owns the source security ' - 'group. Cannot be used when specifying a CIDR IP ' - 'address.

    ') +SOURCEGROUP_DOCS = '

    The name or ID of the source security group.

    ' +GROUPOWNER_DOCS = ( + '

    The AWS account ID that owns the source security ' + 'group. Cannot be used when specifying a CIDR IP ' + 'address.

    ' +) def register_secgroup(event_handler): @@ -137,19 +149,22 @@ def _build_ip_permissions(params, key, value): class ProtocolArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: try: int_value = int(value) if (int_value < 0 or int_value > 255) and int_value != -1: - msg = ('protocol numbers must be in the range 0-255 ' - 'or -1 to specify all protocols') + msg = ( + 'protocol numbers must be in the range 0-255 ' + 'or -1 to specify all protocols' + ) raise ParamValidationError(msg) except ValueError: if value not in ('tcp', 'udp', 'icmp', 'all'): - msg = ('protocol parameter should be one of: ' - 'tcp|udp|icmp|all or any valid protocol number.') + msg = ( + 'protocol parameter should be one of: ' + 'tcp|udp|icmp|all or any valid protocol number.' + ) raise ParamValidationError(msg) if value == 'all': value = '-1' @@ -157,7 +172,6 @@ def add_to_params(self, parameters, value): class PortArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: try: @@ -175,13 +189,14 @@ def add_to_params(self, parameters, value): _build_ip_permissions(parameters, 'FromPort', int(fromstr)) _build_ip_permissions(parameters, 'ToPort', int(tostr)) except ValueError: - msg = ('port parameter should be of the ' - 'form (e.g. 22 or 22-25)') + msg = ( + 'port parameter should be of the ' + 'form (e.g. 22 or 22-25)' + ) raise ParamValidationError(msg) class CidrArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: value = [{'CidrIp': value}] @@ -189,7 +204,6 @@ def add_to_params(self, parameters, value): class SourceGroupArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: if value.startswith('sg-'): @@ -199,7 +213,6 @@ def add_to_params(self, parameters, value): class GroupOwnerArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: _build_ip_permissions(parameters, 'UserId', value) diff --git a/awscli/customizations/ec2instanceconnect/__init__.py b/awscli/customizations/ec2instanceconnect/__init__.py index 282f7d0a4872..86d5466982f3 100644 --- a/awscli/customizations/ec2instanceconnect/__init__.py +++ b/awscli/customizations/ec2instanceconnect/__init__.py @@ -13,9 +13,7 @@ from awscli.customizations.ec2instanceconnect.opentunnel import ( OpenTunnelCommand, ) -from awscli.customizations.ec2instanceconnect.ssh import ( - SshCommand, -) +from awscli.customizations.ec2instanceconnect.ssh import SshCommand def register_ec2_instance_connect_commands(event_handlers): diff --git a/awscli/customizations/ec2instanceconnect/eicefetcher.py b/awscli/customizations/ec2instanceconnect/eicefetcher.py index 1b65bf0cbc62..0282d7e0253f 100644 --- a/awscli/customizations/ec2instanceconnect/eicefetcher.py +++ b/awscli/customizations/ec2instanceconnect/eicefetcher.py @@ -12,7 +12,10 @@ # language governing permissions and limitations under the License. import logging -from awscli.customizations.exceptions import ParamValidationError, ConfigurationError +from awscli.customizations.exceptions import ( + ConfigurationError, + ParamValidationError, +) logger = logging.getLogger(__name__) @@ -22,7 +25,6 @@ class FipsEndpointUnsupported(ConfigurationError): class InstanceConnectEndpointRequestFetcher: - def get_eice_dns_name(self, eice_info, is_fips_enabled): fips_dns_name = eice_info.get('FipsDnsName') @@ -33,45 +35,72 @@ def get_eice_dns_name(self, eice_info, is_fips_enabled): elif is_fips_enabled and not fips_dns_name: raise FipsEndpointUnsupported("Unable to find FIPS Endpoint") - def get_available_instance_connect_endpoint(self, ec2_client, vpc_id, subnet_id, instance_connect_endpoint_id): + def get_available_instance_connect_endpoint( + self, ec2_client, vpc_id, subnet_id, instance_connect_endpoint_id + ): if instance_connect_endpoint_id: - return self._get_instance_connect_endpoint_by_id(ec2_client, instance_connect_endpoint_id) + return self._get_instance_connect_endpoint_by_id( + ec2_client, instance_connect_endpoint_id + ) else: - return self._get_instance_connect_endpoint_by_vpc(ec2_client, vpc_id, subnet_id) + return self._get_instance_connect_endpoint_by_vpc( + ec2_client, vpc_id, subnet_id + ) - def _get_instance_connect_endpoint_by_id(self, ec2_client, instance_connect_endpoint_id): + def _get_instance_connect_endpoint_by_id( + self, ec2_client, instance_connect_endpoint_id + ): args = { "Filters": [{"Name": "state", "Values": ["create-complete"]}], - "InstanceConnectEndpointIds": [instance_connect_endpoint_id] + "InstanceConnectEndpointIds": [instance_connect_endpoint_id], } - describe_eice_response = ec2_client.describe_instance_connect_endpoints(**args) - instance_connect_endpoints = describe_eice_response["InstanceConnectEndpoints"] + describe_eice_response = ( + ec2_client.describe_instance_connect_endpoints(**args) + ) + instance_connect_endpoints = describe_eice_response[ + "InstanceConnectEndpoints" + ] if instance_connect_endpoints: return instance_connect_endpoints[0] raise ParamValidationError( - f"There are no available instance connect endpoints with {instance_connect_endpoint_id}") + f"There are no available instance connect endpoints with {instance_connect_endpoint_id}" + ) - def _get_instance_connect_endpoint_by_vpc(self, ec2_client, vpc_id, subnet_id): + def _get_instance_connect_endpoint_by_vpc( + self, ec2_client, vpc_id, subnet_id + ): ## Describe until subnet match and if none match subnet then return the first one based on vpc-id filter - args = {"Filters": [ - {"Name": "state", "Values": ["create-complete"]}, - {"Name": "vpc-id", "Values": [vpc_id]} - ]} + args = { + "Filters": [ + {"Name": "state", "Values": ["create-complete"]}, + {"Name": "vpc-id", "Values": [vpc_id]}, + ] + } - paginator = ec2_client.get_paginator('describe_instance_connect_endpoints') + paginator = ec2_client.get_paginator( + 'describe_instance_connect_endpoints' + ) page_iterator = paginator.paginate(**args) instance_connect_endpoints = [] for page in page_iterator: page_result = page["InstanceConnectEndpoints"] - instance_connect_endpoints = instance_connect_endpoints + page_result + instance_connect_endpoints = ( + instance_connect_endpoints + page_result + ) if page_result: for eice in page_result: if eice['SubnetId'] == subnet_id: - logger.debug(f"Using EICE based on subnet: {instance_connect_endpoints[0]}") + logger.debug( + f"Using EICE based on subnet: {instance_connect_endpoints[0]}" + ) return eice if instance_connect_endpoints: - logger.debug(f"Using EICE based on vpc: {instance_connect_endpoints[0]}") + logger.debug( + f"Using EICE based on vpc: {instance_connect_endpoints[0]}" + ) return instance_connect_endpoints[0] - raise ParamValidationError("There are no available instance connect endpoints.") + raise ParamValidationError( + "There are no available instance connect endpoints." + ) diff --git a/awscli/customizations/ec2instanceconnect/eicesigner.py b/awscli/customizations/ec2instanceconnect/eicesigner.py index b9ccba861044..2568a15d3d1b 100644 --- a/awscli/customizations/ec2instanceconnect/eicesigner.py +++ b/awscli/customizations/ec2instanceconnect/eicesigner.py @@ -17,14 +17,14 @@ class InstanceConnectEndpointRequestSigner: def __init__( - self, - session, - instance_connect_endpoint_dns_name, - instance_connect_endpoint_id, - remote_port, - instance_private_ip, - max_tunnel_duration, - request_singer=None, + self, + session, + instance_connect_endpoint_dns_name, + instance_connect_endpoint_id, + remote_port, + instance_private_ip, + max_tunnel_duration, + request_singer=None, ): service_model = session.get_service_model("ec2-instance-connect") diff --git a/awscli/customizations/ec2instanceconnect/opentunnel.py b/awscli/customizations/ec2instanceconnect/opentunnel.py index 83807f012fb8..2d301b77b3a6 100644 --- a/awscli/customizations/ec2instanceconnect/opentunnel.py +++ b/awscli/customizations/ec2instanceconnect/opentunnel.py @@ -14,12 +14,14 @@ import sys from awscli.customizations.commands import BasicCommand +from awscli.customizations.ec2instanceconnect.eicefetcher import ( + InstanceConnectEndpointRequestFetcher, +) from awscli.customizations.ec2instanceconnect.eicesigner import ( InstanceConnectEndpointRequestSigner, ) from awscli.customizations.ec2instanceconnect.websocket import WebsocketManager from awscli.customizations.exceptions import ParamValidationError -from awscli.customizations.ec2instanceconnect.eicefetcher import InstanceConnectEndpointRequestFetcher logger = logging.getLogger(__name__) @@ -27,7 +29,9 @@ class OpenTunnelCommand(BasicCommand): NAME = "open-tunnel" - DESCRIPTION = "Opens a websocket tunnel to the specified EC2 Instance or private ip." + DESCRIPTION = ( + "Opens a websocket tunnel to the specified EC2 Instance or private ip." + ) ARG_TABLE = [ { @@ -126,10 +130,16 @@ def _run_main(self, parsed_args, parsed_globals): ) instance_connect_endpoint_id = eice["InstanceConnectEndpointId"] - is_fips_enabled = self._session.get_config_variable('use_fips_endpoint') - instance_connect_endpoint_dns_name = eice_fetcher.get_eice_dns_name(eice, is_fips_enabled) + is_fips_enabled = self._session.get_config_variable( + 'use_fips_endpoint' + ) + instance_connect_endpoint_dns_name = ( + eice_fetcher.get_eice_dns_name(eice, is_fips_enabled) + ) - logger.debug(f"Using endpoint dns: {instance_connect_endpoint_dns_name}") + logger.debug( + f"Using endpoint dns: {instance_connect_endpoint_dns_name}" + ) eice_request_signer = InstanceConnectEndpointRequestSigner( self._session, instance_connect_endpoint_dns_name=instance_connect_endpoint_dns_name, @@ -140,7 +150,10 @@ def _run_main(self, parsed_args, parsed_globals): ) with WebsocketManager( - parsed_args.local_port, parsed_args.max_websocket_connections, eice_request_signer, self._session.user_agent(), + parsed_args.local_port, + parsed_args.max_websocket_connections, + eice_request_signer, + self._session.user_agent(), ) as websocket_manager: websocket_manager.run() return 0 @@ -148,13 +161,23 @@ def _run_main(self, parsed_args, parsed_globals): def _validate_parsed_args(self, parsed_args): if not parsed_args.instance_id and not parsed_args.private_ip_address: raise ParamValidationError("Specify an instance id or private ip.") - if parsed_args.instance_connect_endpoint_dns_name and not parsed_args.instance_connect_endpoint_id: - raise ParamValidationError("Specify an instance connect endpoint id when providing a DNS name.") - if parsed_args.private_ip_address and not parsed_args.instance_connect_endpoint_id: - raise ParamValidationError("Specify an instance connect endpoint id when providing a private ip.") + if ( + parsed_args.instance_connect_endpoint_dns_name + and not parsed_args.instance_connect_endpoint_id + ): + raise ParamValidationError( + "Specify an instance connect endpoint id when providing a DNS name." + ) + if ( + parsed_args.private_ip_address + and not parsed_args.instance_connect_endpoint_id + ): + raise ParamValidationError( + "Specify an instance connect endpoint id when providing a private ip." + ) if parsed_args.max_tunnel_duration is not None and ( - parsed_args.max_tunnel_duration < 1 - or parsed_args.max_tunnel_duration > 3_600 + parsed_args.max_tunnel_duration < 1 + or parsed_args.max_tunnel_duration > 3_600 ): raise ParamValidationError( "Invalid max connection timeout specified. Value must be greater than 1 and " @@ -164,4 +187,4 @@ def _validate_parsed_args(self, parsed_args): raise ParamValidationError( "This command does not support interactive mode. You must use this command as a proxy or in listener " "mode. " - ) \ No newline at end of file + ) diff --git a/awscli/customizations/ec2instanceconnect/ssh.py b/awscli/customizations/ec2instanceconnect/ssh.py index 3ba2f975255f..5b162d870ac8 100644 --- a/awscli/customizations/ec2instanceconnect/ssh.py +++ b/awscli/customizations/ec2instanceconnect/ssh.py @@ -10,20 +10,31 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os import logging +import os import re import shutil import subprocess import sys import tempfile -from awscli.customizations.commands import BasicCommand -from awscli.customizations.exceptions import ParamValidationError, ConfigurationError from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey -from cryptography.hazmat.primitives.serialization import PublicFormat, Encoding, PrivateFormat, NoEncryption +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, +) + from awscli.compat import compat_shell_quote -from awscli.customizations.ec2instanceconnect.eicefetcher import InstanceConnectEndpointRequestFetcher +from awscli.customizations.commands import BasicCommand +from awscli.customizations.ec2instanceconnect.eicefetcher import ( + InstanceConnectEndpointRequestFetcher, +) +from awscli.customizations.exceptions import ( + ConfigurationError, + ParamValidationError, +) logger = logging.getLogger(__name__) @@ -31,9 +42,7 @@ class SshCommand(BasicCommand): NAME = 'ssh' - CONNECTION_TYPES = ['auto', - 'direct', - 'eice'] + CONNECTION_TYPES = ['auto', 'direct', 'eice'] ARG_TABLE = [ { @@ -67,7 +76,7 @@ class SshCommand(BasicCommand): { 'name': 'local-forwarding', 'help_text': 'Specify the local forwarding specification as defined by your OpenSSH client. ' - '(Example: 3336:remote.host:3306)', + '(Example: 3336:remote.host:3306)', 'required': False, }, { @@ -77,21 +86,21 @@ class SshCommand(BasicCommand): '
      ' '
    • direct: SSH directly to the instance. ' 'The CLI tries to connect using the IP addresses in the following order: ' - '
        ' - '
      • Public IPv4
      • ' - '
      • IPv6
      • ' - '
      • Private IPv4
      • ' - '
      ' + '
        ' + '
      • Public IPv4
      • ' + '
      • IPv6
      • ' + '
      • Private IPv4
      • ' + '
      ' '
    • ' '
    • eice: SSH using EC2 Instance Connect Endpoint. The CLI always uses the private IPv4 address.
    • ' '
    • auto: The CLI automatically determines the connection type (direct or eice) ' 'to use based on the instance info. Currently the CLI tries to connect using the IP addresses ' 'in the following order and with the corresponding connection type:' - '
        ' - '
      • Public IPv4: direct
      • ' - '
      • Private IPv4: eice
      • ' - '
      • IPv6: direct
      • ' - '
      ' + '
        ' + '
      • Public IPv4: direct
      • ' + '
      • Private IPv4: eice
      • ' + '
      • IPv6: direct
      • ' + '
      ' '
    • ' '
    ' 'In the future, we might change the behavior of the auto connection type. To ensure that your ' @@ -130,7 +139,7 @@ class SshCommand(BasicCommand): ), 'required': False, }, - } + }, }, }, ] @@ -139,7 +148,9 @@ class SshCommand(BasicCommand): def __init__(self, session, key_manager=None): super(SshCommand, self).__init__(session) - self._key_manager = KeyManager() if (key_manager is None) else key_manager + self._key_manager = ( + KeyManager() if (key_manager is None) else key_manager + ) def _run_main(self, parsed_args, parsed_globals): self._validate_parsed_args(parsed_args) @@ -169,7 +180,9 @@ def _run_main(self, parsed_args, parsed_globals): ip_address_to_connect = private_ip_address elif parsed_args.connection_type == 'direct': use_open_tunnel = False - ip_address_to_connect = public_ip_address or ipv6_address or private_ip_address + ip_address_to_connect = ( + public_ip_address or ipv6_address or private_ip_address + ) elif parsed_args.connection_type == 'auto': # In case of auto we use IPv4 address first and then IPv6 because right now most instance have these, but # in future we might want to switch this logic to where we select IPv6 first and then fallback to IPv4. @@ -184,22 +197,34 @@ def _run_main(self, parsed_args, parsed_globals): ip_address_to_connect = ipv6_address if ip_address_to_connect is None: - raise ParamValidationError('Unable to find any IP address on the instance to connect to.') + raise ParamValidationError( + 'Unable to find any IP address on the instance to connect to.' + ) - instance_connect_endpoint_id = self._get_eice_option(parsed_args.eice_options, 'endpointId') - instance_connect_endpoint_dns_name = self._get_eice_option(parsed_args.eice_options, 'dnsName') + instance_connect_endpoint_id = self._get_eice_option( + parsed_args.eice_options, 'endpointId' + ) + instance_connect_endpoint_dns_name = self._get_eice_option( + parsed_args.eice_options, 'dnsName' + ) if use_open_tunnel and not instance_connect_endpoint_dns_name: eice_fetcher = InstanceConnectEndpointRequestFetcher() eice_info = eice_fetcher.get_available_instance_connect_endpoint( ec2_client, instance_metadata["VpcId"], instance_metadata["SubnetId"], - instance_connect_endpoint_id + instance_connect_endpoint_id, ) - instance_connect_endpoint_id = eice_info["InstanceConnectEndpointId"] + instance_connect_endpoint_id = eice_info[ + "InstanceConnectEndpointId" + ] - is_fips_enabled = self._session.get_config_variable('use_fips_endpoint') - instance_connect_endpoint_dns_name = eice_fetcher.get_eice_dns_name(eice_info, is_fips_enabled) + is_fips_enabled = self._session.get_config_variable( + 'use_fips_endpoint' + ) + instance_connect_endpoint_dns_name = ( + eice_fetcher.get_eice_dns_name(eice_info, is_fips_enabled) + ) # Validate ssh key exist (either use user defined or create new one) key_file = parsed_args.private_key_file @@ -209,7 +234,10 @@ def _run_main(self, parsed_args, parsed_globals): key_file = os.path.join(tmp_dir, 'private-key') logger.debug('Generate new ssh key and upload') private_pem_bytes = self._generate_and_upload_ssh_key( - parsed_args.instance_id, parsed_args.os_user, parsed_globals) + parsed_args.instance_id, + parsed_args.os_user, + parsed_globals, + ) with open(key_file, 'wb') as fd: fd.write(private_pem_bytes) @@ -226,50 +254,70 @@ def _run_main(self, parsed_args, parsed_globals): ip_address_to_connect, instance_connect_endpoint_id, instance_connect_endpoint_dns_name, - self._get_eice_option(parsed_args.eice_options, 'maxTunnelDuration'), - parsed_globals + self._get_eice_option( + parsed_args.eice_options, 'maxTunnelDuration' + ), + parsed_globals, ) def _validate_parsed_args(self, parsed_args): if parsed_args.instance_id: if not re.search("^i-[0-9a-zA-Z]+$", parsed_args.instance_id): - raise ParamValidationError('The specified instance ID is invalid. ' - 'Provide the full instance ID in the form i-xxxxxxxxxxxxxxxxx.') + raise ParamValidationError( + 'The specified instance ID is invalid. ' + 'Provide the full instance ID in the form i-xxxxxxxxxxxxxxxxx.' + ) eice_options = parsed_args.eice_options if parsed_args.connection_type == "direct" and eice_options: - raise ParamValidationError('eice-options can\'t be specified when connection type is direct.') + raise ParamValidationError( + 'eice-options can\'t be specified when connection type is direct.' + ) - if self._get_eice_option(eice_options, 'dnsName') and not self._get_eice_option(eice_options, 'endpointId'): - raise ParamValidationError('When specifying dnsName, you must specify endpointId.') + if self._get_eice_option( + eice_options, 'dnsName' + ) and not self._get_eice_option(eice_options, 'endpointId'): + raise ParamValidationError( + 'When specifying dnsName, you must specify endpointId.' + ) if eice_options and 'maxTunnelDuration' in eice_options: max_tunnel_duration = eice_options['maxTunnelDuration'] - if max_tunnel_duration is not None and (max_tunnel_duration < 1 or max_tunnel_duration > 3_600): + if max_tunnel_duration is not None and ( + max_tunnel_duration < 1 or max_tunnel_duration > 3_600 + ): raise ParamValidationError( 'Invalid value specified for maxTunnelDuration. Value must be greater than 1 and ' 'less than 3600.' ) if self._get_eice_option(eice_options, 'endpointId'): - if not re.search("^eice-[0-9a-zA-Z_]+$", eice_options['endpointId']): - raise ParamValidationError('The specified endpointId is invalid. ' - 'Provide the full EC2 Instance Connect Endpoint ID in ' - 'the form eice-xxxxxxxxxxxxxxxxx.') + if not re.search( + "^eice-[0-9a-zA-Z_]+$", eice_options['endpointId'] + ): + raise ParamValidationError( + 'The specified endpointId is invalid. ' + 'Provide the full EC2 Instance Connect Endpoint ID in ' + 'the form eice-xxxxxxxxxxxxxxxxx.' + ) if self._get_eice_option(eice_options, 'dnsName'): if not re.search('^[0-9a-zA-Z.-]+$', eice_options['dnsName']): raise ParamValidationError('The specified dnsName is invalid.') if parsed_args.instance_ip and parsed_args.connection_type == 'auto': - raise ParamValidationError('When specifying instance-ip, you must specify connection-type.') + raise ParamValidationError( + 'When specifying instance-ip, you must specify connection-type.' + ) def _get_eice_option(self, eice_options, option): if eice_options: return eice_options.get(option) return None - def _generate_and_upload_ssh_key(self, instance_id, os_user, parsed_globals): + def _generate_and_upload_ssh_key( + self, instance_id, os_user, parsed_globals + ): private_key = self._key_manager.generate_key() logger.debug('Upload public ssh key to instance') @@ -279,18 +327,33 @@ def _generate_and_upload_ssh_key(self, instance_id, os_user, parsed_globals): verify=parsed_globals.verify_ssl, ) public_key = self._key_manager.get_public_key(private_key) - self._key_manager.upload_public_key(ec2_instance_connect_client, instance_id, os_user, public_key) + self._key_manager.upload_public_key( + ec2_instance_connect_client, instance_id, os_user, public_key + ) return self._key_manager.get_private_pem(private_key) - def _generate_open_tunnel_command(self, instance_id, private_ip_address, ssh_port, eice_id, eice_dns_name, - max_tunnel_duration, parsed_globals): + def _generate_open_tunnel_command( + self, + instance_id, + private_ip_address, + ssh_port, + eice_id, + eice_dns_name, + max_tunnel_duration, + parsed_globals, + ): aws_cli_path = sys.argv[0] proxy_command = [ - aws_cli_path, 'ec2-instance-connect', 'open-tunnel', - '--instance-id', instance_id, - '--private-ip-address', private_ip_address, - '--remote-port', str(ssh_port), + aws_cli_path, + 'ec2-instance-connect', + 'open-tunnel', + '--instance-id', + instance_id, + '--private-ip-address', + private_ip_address, + '--remote-port', + str(ssh_port), ] logger.debug(f"Using aws: {aws_cli_path}") @@ -312,20 +375,42 @@ def _generate_open_tunnel_command(self, instance_id, private_ip_address, ssh_por return proxy_command - def _ssh(self, use_open_tunnel, instance_id, ssh_port, os_user, local_forwarding, key_file, - ip_address, eice_id, eice_dns_name, max_tunnel_duration, parsed_globals): - + def _ssh( + self, + use_open_tunnel, + instance_id, + ssh_port, + os_user, + local_forwarding, + key_file, + ip_address, + eice_id, + eice_dns_name, + max_tunnel_duration, + parsed_globals, + ): proxy_command = self._generate_open_tunnel_command( - instance_id, ip_address, ssh_port, eice_id, eice_dns_name, max_tunnel_duration, parsed_globals) + instance_id, + ip_address, + ssh_port, + eice_id, + eice_dns_name, + max_tunnel_duration, + parsed_globals, + ) command = [ 'ssh', # adding ServerAliveInterval as default because it offers better customer experience as it let customer # know about terminated connections. If we want to allow customer to override this we can add additional # parameter to this cli command - '-o', 'ServerAliveInterval=5', - '-p', str(ssh_port), - '-i', key_file, os_user + '@' + ip_address, + '-o', + 'ServerAliveInterval=5', + '-p', + str(ssh_port), + '-i', + key_file, + os_user + '@' + ip_address, ] ssh_path = shutil.which('ssh') @@ -333,8 +418,10 @@ def _ssh(self, use_open_tunnel, instance_id, ssh_port, os_user, local_forwarding command[0] = ssh_path logger.debug(f"Using ssh: {ssh_path}") else: - raise ConfigurationError('SSH not available. Please refer to the documentation ' - 'at https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Connect-using-EC2-Instance-Connect-Endpoint.html.') + raise ConfigurationError( + 'SSH not available. Please refer to the documentation ' + 'at https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Connect-using-EC2-Instance-Connect-Endpoint.html.' + ) # Add local-forwarding option if users passed local-forwarding if local_forwarding: @@ -348,7 +435,10 @@ def _ssh(self, use_open_tunnel, instance_id, ssh_port, os_user, local_forwarding # If we are trying to connect to instance in private subnet lets use open-tunnel command to use eice if use_open_tunnel: command.insert(-1, '-o') - command.insert(-1, f"ProxyCommand={' '.join(compat_shell_quote(a) for a in proxy_command)}") + command.insert( + -1, + f"ProxyCommand={' '.join(compat_shell_quote(a) for a in proxy_command)}", + ) logger.debug('Invoking SSH command: %s', command) rc = subprocess.call(command) @@ -362,18 +452,19 @@ def generate_key(self): def get_public_key(self, private_key): return private_key.public_key().public_bytes( - encoding=Encoding.OpenSSH, - format=PublicFormat.OpenSSH + encoding=Encoding.OpenSSH, format=PublicFormat.OpenSSH ) def get_private_pem(self, private_key): return private_key.private_bytes( encoding=Encoding.PEM, format=PrivateFormat.OpenSSH, - encryption_algorithm=NoEncryption() + encryption_algorithm=NoEncryption(), ) - def upload_public_key(self, ec2_instance_connect_client, instance_id, os_user, public_key): + def upload_public_key( + self, ec2_instance_connect_client, instance_id, os_user, public_key + ): logger.debug('Upload ssh key to instance') ec2_instance_connect_client.send_ssh_public_key( InstanceId=instance_id, diff --git a/awscli/customizations/ec2instanceconnect/websocket.py b/awscli/customizations/ec2instanceconnect/websocket.py index 6f3d4618ad7f..013397a6c6da 100644 --- a/awscli/customizations/ec2instanceconnect/websocket.py +++ b/awscli/customizations/ec2instanceconnect/websocket.py @@ -28,10 +28,11 @@ from awscrt.io import ClientTlsContext, TlsContextOptions from awscrt.websocket import ( OnConnectionSetupData, + OnConnectionShutdownData, + OnIncomingFrameCompleteData, OnIncomingFramePayloadData, OnSendFrameCompleteData, Opcode, - OnConnectionShutdownData, OnIncomingFrameCompleteData, ) from awscli.compat import is_windows @@ -42,9 +43,11 @@ class WebsocketException(RuntimeError): pass + class InputClosedError(RuntimeError): pass + class BaseWebsocketIO: def has_data_to_read(self): raise NotImplementedError("has_data_to_read") @@ -63,10 +66,11 @@ def close(self): class StdinStdoutIO(BaseWebsocketIO): - def has_data_to_read(self): socket_list = [sys.stdin] - read_sockets, _, _ = select.select(socket_list, [], [], _SELECT_TIMEOUT) + read_sockets, _, _ = select.select( + socket_list, [], [], _SELECT_TIMEOUT + ) if read_sockets: return True return False @@ -83,13 +87,11 @@ def close(self): class WindowsStdinStdoutIO(StdinStdoutIO): - def has_data_to_read(self): return True class TCPSocketIO(BaseWebsocketIO): - def __init__(self, conn): self.conn = conn @@ -121,19 +123,36 @@ def close(self): class Websocket: - def __init__(self, websocketio, websocket_id, tls_connection_options=None, on_connection_event=None, - shutdown_event=None, send_frame_results_queue=None): + def __init__( + self, + websocketio, + websocket_id, + tls_connection_options=None, + on_connection_event=None, + shutdown_event=None, + send_frame_results_queue=None, + ): self.websocketio = websocketio self.websocket_id = websocket_id - self._on_connection_event = Event() if (on_connection_event is None) else on_connection_event - self._send_frame_results_queue = Queue() if (send_frame_results_queue is None) else send_frame_results_queue - self._shutdown_event = Event() if (shutdown_event is None) else shutdown_event + self._on_connection_event = ( + Event() if (on_connection_event is None) else on_connection_event + ) + self._send_frame_results_queue = ( + Queue() + if (send_frame_results_queue is None) + else send_frame_results_queue + ) + self._shutdown_event = ( + Event() if (shutdown_event is None) else shutdown_event + ) self._websocket = None self._exception = None self._close_frame_bytes = bytearray() if tls_connection_options is None: - self._tls_connection_options = ClientTlsContext(TlsContextOptions()).new_connection_options() + self._tls_connection_options = ClientTlsContext( + TlsContextOptions() + ).new_connection_options() else: self._tls_connection_options = tls_connection_options @@ -147,21 +166,29 @@ def exception(self): def connect(self, url, user_agent=None): parsed_url = urlparse(url) path = parsed_url.path + "?" + parsed_url.query - request = websocket.create_handshake_request(host=parsed_url.hostname, path=path) + request = websocket.create_handshake_request( + host=parsed_url.hostname, path=path + ) if user_agent: request.headers.set("User-Agent", user_agent) environment = os.environ.copy() proxy_options = None - proxy_url = environment.get("HTTP_PROXY") or environment.get("HTTPS_PROXY") + proxy_url = environment.get("HTTP_PROXY") or environment.get( + "HTTPS_PROXY" + ) no_proxy = environment.get("NO_PROXY", "") if proxy_url and parsed_url.hostname not in no_proxy: parsed_proxy_url = urlparse(proxy_url) - logger.debug(f"Using the following proxy: {parsed_proxy_url.hostname}") + logger.debug( + f"Using the following proxy: {parsed_proxy_url.hostname}" + ) proxy_options = HttpProxyOptions( host_name=parsed_proxy_url.hostname, port=parsed_proxy_url.port, - auth_type=HttpProxyAuthenticationType.Basic if proxy_url else HttpProxyAuthenticationType.Nothing, + auth_type=HttpProxyAuthenticationType.Basic + if proxy_url + else HttpProxyAuthenticationType.Nothing, auth_username=parsed_proxy_url.username or None, auth_password=parsed_proxy_url.password or None, ) @@ -176,7 +203,7 @@ def connect(self, url, user_agent=None): on_connection_setup=self._on_connection, on_connection_shutdown=self._on_connection_shutdown, on_incoming_frame_payload=self._on_incoming_frame_payload_data, - on_incoming_frame_complete=self._on_incoming_frame_complete + on_incoming_frame_complete=self._on_incoming_frame_complete, ) # Wait for the on_connection callback to be called. @@ -230,7 +257,9 @@ def _write_data_from_input(self): ] # Expected exception if server or user closes conn. Catch it and gracefully exit this method. if any(exc_code in str(e.args) for exc_code in crt_exceptions): - logger.debug(f"Received exception when sending websocket frame: {e.args}") + logger.debug( + f"Received exception when sending websocket frame: {e.args}" + ) self.close() else: raise e @@ -253,20 +282,29 @@ def _on_connection(self, data: OnConnectionSetupData) -> None: self._websocket = data.websocket self._on_connection_event.set() - def _on_incoming_frame_payload_data(self, incoming_frame_data: OnIncomingFramePayloadData) -> None: + def _on_incoming_frame_payload_data( + self, incoming_frame_data: OnIncomingFramePayloadData + ) -> None: opcode = incoming_frame_data.frame.opcode if not opcode.is_data_frame(): if opcode == Opcode.CLOSE and incoming_frame_data.data: self._close_frame_bytes += incoming_frame_data.data return if opcode == Opcode.TEXT: - self._exception = WebsocketException("Received invalid data from server, closing websocket connection.") + self._exception = WebsocketException( + "Received invalid data from server, closing websocket connection." + ) self.close() return self.websocketio.write(incoming_frame_data.data) - def _on_incoming_frame_complete(self, incoming_frame_data: OnIncomingFrameCompleteData): - if incoming_frame_data.frame.opcode == Opcode.CLOSE and incoming_frame_data.exception is None: + def _on_incoming_frame_complete( + self, incoming_frame_data: OnIncomingFrameCompleteData + ): + if ( + incoming_frame_data.frame.opcode == Opcode.CLOSE + and incoming_frame_data.exception is None + ): if len(self._close_frame_bytes) > 0: shutdown_code_as_bytes = self._close_frame_bytes[0:2] # The shutdown code is a packed 2 byte unsigned int. @@ -274,11 +312,17 @@ def _on_incoming_frame_complete(self, incoming_frame_data: OnIncomingFrameComple shutdown_reason_in_bytes = self._close_frame_bytes[2:] if shutdown_code != 1000: logger.debug("Shutdown code: %s", str(shutdown_code)) - decoded_shutdown_reason = shutdown_reason_in_bytes.decode("utf-8") - self._exception = WebsocketException(f"Websocket Closure Reason: {decoded_shutdown_reason}") + decoded_shutdown_reason = shutdown_reason_in_bytes.decode( + "utf-8" + ) + self._exception = WebsocketException( + f"Websocket Closure Reason: {decoded_shutdown_reason}" + ) self.close() - def _on_send_frame_complete_data(self, send_frame_data: OnSendFrameCompleteData) -> None: + def _on_send_frame_complete_data( + self, send_frame_data: OnSendFrameCompleteData + ) -> None: self._send_frame_results_queue.put(send_frame_data) def _on_connection_shutdown(self, data: OnConnectionShutdownData) -> None: @@ -288,7 +332,13 @@ def _on_connection_shutdown(self, data: OnConnectionShutdownData) -> None: class WebsocketManager: - def __init__(self, port, max_websocket_connections, eice_request_signer, user_agent=None): + def __init__( + self, + port, + max_websocket_connections, + eice_request_signer, + user_agent=None, + ): self._port = port self._executor = ThreadPoolExecutor( max_workers=max_websocket_connections @@ -322,8 +372,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): def run(self): # If no port is specified, open a singular websocket connection. if not self._port: - websocketio = WindowsStdinStdoutIO() if is_windows else StdinStdoutIO() - future = self._open_websocket_connection(Websocket(websocketio, websocket_id=None)) + websocketio = ( + WindowsStdinStdoutIO() if is_windows else StdinStdoutIO() + ) + future = self._open_websocket_connection( + Websocket(websocketio, websocket_id=None) + ) # Block until the future completes. future.result() else: @@ -342,21 +396,32 @@ def _listen_on_port(self): conn, addr = self._socket.accept() # Check if we have reached max connections self._remove_done_futures() - if len(self._inflight_futures_and_websockets) >= self._max_websocket_connections: - print(f"Max websocket connections {self._max_websocket_connections} have been reached, closing " - f"incoming connection.") + if ( + len(self._inflight_futures_and_websockets) + >= self._max_websocket_connections + ): + print( + f"Max websocket connections {self._max_websocket_connections} have been reached, closing " + f"incoming connection." + ) conn.close() continue websocket_id = self._connection_id_counter self._connection_id_counter += 1 - print(f"[{websocket_id}] Accepted new tcp connection, opening websocket tunnel.") + print( + f"[{websocket_id}] Accepted new tcp connection, opening websocket tunnel." + ) try: web_socket = Websocket(TCPSocketIO(conn), websocket_id) future = self._open_websocket_connection(web_socket) - future.add_done_callback(self._print_tcp_conn_closed(web_socket)) + future.add_done_callback( + self._print_tcp_conn_closed(web_socket) + ) except WebsocketException as e: - logger.error(f"[{websocket_id}] Encountered error opening websocket: {e.args}") + logger.error( + f"[{websocket_id}] Encountered error opening websocket: {e.args}" + ) def _open_websocket_connection(self, web_socket): presigned_url = self._eice_request_signer.get_presigned_url() diff --git a/awscli/customizations/ecr.py b/awscli/customizations/ecr.py index 71e7abe19b51..ae141b9ee7ac 100644 --- a/awscli/customizations/ecr.py +++ b/awscli/customizations/ecr.py @@ -10,12 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import sys +from base64 import b64decode + from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import create_client_from_parsed_globals -from base64 import b64decode -import sys - def register_ecr_commands(cli): cli.register('building-command-table.ecr', _inject_commands) @@ -27,16 +27,17 @@ def _inject_commands(command_table, session, **kwargs): class ECRGetLoginPassword(BasicCommand): """Get a password to be used with container clients such as Docker""" + NAME = 'get-login-password' DESCRIPTION = BasicCommand.FROM_FILE( - 'ecr/get-login-password_description.rst') + 'ecr/get-login-password_description.rst' + ) def _run_main(self, parsed_args, parsed_globals): ecr_client = create_client_from_parsed_globals( - self._session, - 'ecr', - parsed_globals) + self._session, 'ecr', parsed_globals + ) result = ecr_client.get_authorization_token() auth = result['authorizationData'][0] auth_token = b64decode(auth['authorizationToken']).decode() diff --git a/awscli/customizations/ecr_public.py b/awscli/customizations/ecr_public.py index 01a5458907a2..c48f81a32b5f 100644 --- a/awscli/customizations/ecr_public.py +++ b/awscli/customizations/ecr_public.py @@ -10,12 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import sys +from base64 import b64decode + from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import create_client_from_parsed_globals -from base64 import b64decode -import sys - def register_ecr_public_commands(cli): cli.register('building-command-table.ecr-public', _inject_commands) @@ -27,16 +27,17 @@ def _inject_commands(command_table, session, **kwargs): class ECRPublicGetLoginPassword(BasicCommand): """Get a password to be used with container clients such as Docker""" + NAME = 'get-login-password' DESCRIPTION = BasicCommand.FROM_FILE( - 'ecr-public/get-login-password_description.rst') + 'ecr-public/get-login-password_description.rst' + ) def _run_main(self, parsed_args, parsed_globals): ecr_public_client = create_client_from_parsed_globals( - self._session, - 'ecr-public', - parsed_globals) + self._session, 'ecr-public', parsed_globals + ) result = ecr_public_client.get_authorization_token() auth = result['authorizationData'] auth_token = b64decode(auth['authorizationToken']).decode() diff --git a/awscli/customizations/ecs/__init__.py b/awscli/customizations/ecs/__init__.py index 6492cedf2e1c..6516360a42e4 100644 --- a/awscli/customizations/ecs/__init__.py +++ b/awscli/customizations/ecs/__init__.py @@ -12,8 +12,10 @@ # language governing permissions and limitations under the License. from awscli.customizations.ecs.deploy import ECSDeploy -from awscli.customizations.ecs.executecommand import ECSExecuteCommand -from awscli.customizations.ecs.executecommand import ExecuteCommandCaller +from awscli.customizations.ecs.executecommand import ( + ECSExecuteCommand, + ExecuteCommandCaller, +) def initialize(cli): @@ -33,7 +35,8 @@ def inject_commands(command_table, session, **kwargs): name='execute-command', parent_name='ecs', session=session, - operation_model=session.get_service_model('ecs') - .operation_model('ExecuteCommand'), + operation_model=session.get_service_model('ecs').operation_model( + 'ExecuteCommand' + ), operation_caller=ExecuteCommandCaller(session), ) diff --git a/awscli/customizations/ecs/deploy.py b/awscli/customizations/ecs/deploy.py index d8afe62054d3..043ca7ca5ed8 100644 --- a/awscli/customizations/ecs/deploy.py +++ b/awscli/customizations/ecs/deploy.py @@ -16,11 +16,11 @@ import os import sys -from botocore import compat, config -from botocore.exceptions import ClientError from awscli.compat import compat_open -from awscli.customizations.ecs import exceptions, filehelpers from awscli.customizations.commands import BasicCommand +from awscli.customizations.ecs import exceptions, filehelpers +from botocore import compat, config +from botocore.exceptions import ClientError TIMEOUT_BUFFER_MIN = 10 DEFAULT_DELAY_SEC = 15 @@ -45,96 +45,113 @@ class ECSDeploy(BasicCommand): ARG_TABLE = [ { 'name': 'service', - 'help_text': ("The short name or full Amazon Resource Name " - "(ARN) of the service to update"), - 'required': True + 'help_text': ( + "The short name or full Amazon Resource Name " + "(ARN) of the service to update" + ), + 'required': True, }, { 'name': 'task-definition', - 'help_text': ("The file path where your task definition file is " - "located. The format of the file must be the same " - "as the JSON output of: aws ecs " - "register-task-definition " - "--generate-cli-skeleton"), - 'required': True + 'help_text': ( + "The file path where your task definition file is " + "located. The format of the file must be the same " + "as the JSON output of: aws ecs " + "register-task-definition " + "--generate-cli-skeleton" + ), + 'required': True, }, { 'name': 'codedeploy-appspec', - 'help_text': ("The file path where your AWS CodeDeploy appspec " - "file is located. The appspec file may be in JSON " - "or YAML format. The TaskDefinition " - "property will be updated within the appspec with " - "the newly registered task definition ARN, " - "overwriting any placeholder values in the file."), - 'required': True + 'help_text': ( + "The file path where your AWS CodeDeploy appspec " + "file is located. The appspec file may be in JSON " + "or YAML format. The TaskDefinition " + "property will be updated within the appspec with " + "the newly registered task definition ARN, " + "overwriting any placeholder values in the file." + ), + 'required': True, }, { 'name': 'cluster', - 'help_text': ("The short name or full Amazon Resource Name " - "(ARN) of the cluster that your service is " - "running within. If you do not specify a " - "cluster, the \"default\" cluster is assumed."), - 'required': False + 'help_text': ( + "The short name or full Amazon Resource Name " + "(ARN) of the cluster that your service is " + "running within. If you do not specify a " + "cluster, the \"default\" cluster is assumed." + ), + 'required': False, }, { 'name': 'codedeploy-application', - 'help_text': ("The name of the AWS CodeDeploy application " - "to use for the deployment. The specified " - "application must use the 'ECS' compute " - "platform. If you do not specify an " - "application, the application name " - "AppECS-[CLUSTER_NAME]-[SERVICE_NAME] " - "is assumed."), - 'required': False + 'help_text': ( + "The name of the AWS CodeDeploy application " + "to use for the deployment. The specified " + "application must use the 'ECS' compute " + "platform. If you do not specify an " + "application, the application name " + "AppECS-[CLUSTER_NAME]-[SERVICE_NAME] " + "is assumed." + ), + 'required': False, }, { 'name': 'codedeploy-deployment-group', - 'help_text': ("The name of the AWS CodeDeploy deployment " - "group to use for the deployment. The " - "specified deployment group must be associated " - "with the specified ECS service and cluster. " - "If you do not specify a deployment group, " - "the deployment group name " - "DgpECS-[CLUSTER_NAME]-[SERVICE_NAME] " - "is assumed."), - 'required': False - } + 'help_text': ( + "The name of the AWS CodeDeploy deployment " + "group to use for the deployment. The " + "specified deployment group must be associated " + "with the specified ECS service and cluster. " + "If you do not specify a deployment group, " + "the deployment group name " + "DgpECS-[CLUSTER_NAME]-[SERVICE_NAME] " + "is assumed." + ), + 'required': False, + }, ] - MSG_TASK_DEF_REGISTERED = \ + MSG_TASK_DEF_REGISTERED = ( "Successfully registered new ECS task definition {arn}\n" + ) MSG_CREATED_DEPLOYMENT = "Successfully created deployment {id}\n" - MSG_SUCCESS = ("Successfully deployed {task_def} to " - "service '{service}'\n") + MSG_SUCCESS = ( + "Successfully deployed {task_def} to " "service '{service}'\n" + ) USER_AGENT_EXTRA = 'md/customization#ecs-deploy' def _run_main(self, parsed_args, parsed_globals): - - register_task_def_kwargs, appspec_obj = \ - self._load_file_args(parsed_args.task_definition, - parsed_args.codedeploy_appspec) + register_task_def_kwargs, appspec_obj = self._load_file_args( + parsed_args.task_definition, parsed_args.codedeploy_appspec + ) ecs_client_wrapper = ECSClient( - self._session, parsed_args, parsed_globals, self.USER_AGENT_EXTRA) + self._session, parsed_args, parsed_globals, self.USER_AGENT_EXTRA + ) self.resources = self._get_resource_names( - parsed_args, ecs_client_wrapper) + parsed_args, ecs_client_wrapper + ) codedeploy_client = self._session.create_client( 'codedeploy', region_name=parsed_globals.region, verify=parsed_globals.verify_ssl, - config=config.Config(user_agent_extra=self.USER_AGENT_EXTRA)) + config=config.Config(user_agent_extra=self.USER_AGENT_EXTRA), + ) self._validate_code_deploy_resources(codedeploy_client) self.wait_time = self._cd_validator.get_deployment_wait_time() self.task_def_arn = self._register_task_def( - register_task_def_kwargs, ecs_client_wrapper) + register_task_def_kwargs, ecs_client_wrapper + ) self._create_and_wait_for_deployment(codedeploy_client, appspec_obj) return 0 @@ -143,18 +160,19 @@ def _create_and_wait_for_deployment(self, client, appspec): deployer = CodeDeployer(client, appspec) deployer.update_task_def_arn(self.task_def_arn) deployment_id = deployer.create_deployment( - self.resources['app_name'], - self.resources['deployment_group_name']) + self.resources['app_name'], self.resources['deployment_group_name'] + ) - sys.stdout.write(self.MSG_CREATED_DEPLOYMENT.format( - id=deployment_id)) + sys.stdout.write(self.MSG_CREATED_DEPLOYMENT.format(id=deployment_id)) deployer.wait_for_deploy_success(deployment_id, self.wait_time) service_name = self.resources['service'] sys.stdout.write( self.MSG_SUCCESS.format( - task_def=self.task_def_arn, service=service_name)) + task_def=self.task_def_arn, service=service_name + ) + ) sys.stdout.flush() def _get_file_contents(self, file_path): @@ -163,8 +181,7 @@ def _get_file_contents(self, file_path): with compat_open(full_path) as f: return f.read() except (OSError, IOError, UnicodeDecodeError) as e: - raise exceptions.FileLoadError( - file_path=file_path, error=e) + raise exceptions.FileLoadError(file_path=file_path, error=e) def _get_resource_names(self, args, ecs_client): service_details = ecs_client.get_service_details() @@ -172,9 +189,11 @@ def _get_resource_names(self, args, ecs_client): cluster_name = service_details['cluster_name'] application_name = filehelpers.get_app_name( - service_name, cluster_name, args.codedeploy_application) + service_name, cluster_name, args.codedeploy_application + ) deployment_group_name = filehelpers.get_deploy_group_name( - service_name, cluster_name, args.codedeploy_deployment_group) + service_name, cluster_name, args.codedeploy_deployment_group + ) return { 'service': service_name, @@ -182,7 +201,7 @@ def _get_resource_names(self, args, ecs_client): 'cluster': cluster_name, 'cluster_arn': service_details['cluster_arn'], 'app_name': application_name, - 'deployment_group_name': deployment_group_name + 'deployment_group_name': deployment_group_name, } def _load_file_args(self, task_def_arg, appspec_arg): @@ -199,8 +218,7 @@ def _register_task_def(self, task_def_kwargs, ecs_client): task_def_arn = response['taskDefinition']['taskDefinitionArn'] - sys.stdout.write(self.MSG_TASK_DEF_REGISTERED.format( - arn=task_def_arn)) + sys.stdout.write(self.MSG_TASK_DEF_REGISTERED.format(arn=task_def_arn)) sys.stdout.flush() return task_def_arn @@ -212,10 +230,11 @@ def _validate_code_deploy_resources(self, client): self._cd_validator = validator -class CodeDeployer(): - - MSG_WAITING = ("Waiting for {deployment_id} to succeed " - "(will wait up to {wait} minutes)...\n") +class CodeDeployer: + MSG_WAITING = ( + "Waiting for {deployment_id} to succeed " + "(will wait up to {wait} minutes)...\n" + ) def __init__(self, cd_client, appspec_dict): self._client = cd_client @@ -223,13 +242,15 @@ def __init__(self, cd_client, appspec_dict): def create_deployment(self, app_name, deploy_grp_name): request_obj = self._get_create_deploy_request( - app_name, deploy_grp_name) + app_name, deploy_grp_name + ) try: response = self._client.create_deployment(**request_obj) except ClientError as e: raise exceptions.ServiceClientError( - action='create deployment', error=e) + action='create deployment', error=e + ) return response['deploymentId'] @@ -246,9 +267,9 @@ def _get_create_deploy_request(self, app_name, deploy_grp_name): "revisionType": "AppSpecContent", "appSpecContent": { "content": json.dumps(self._appspec_dict), - "sha256": self._get_appspec_hash() - } - } + "sha256": self._get_appspec_hash(), + }, + }, } def update_task_def_arn(self, new_arn): @@ -270,7 +291,8 @@ def update_task_def_arn(self, new_arn): appspec_obj = self._appspec_dict resources_key = filehelpers.find_required_key( - 'codedeploy-appspec', appspec_obj, 'resources') + 'codedeploy-appspec', appspec_obj, 'resources' + ) updated_resources = [] # 'resources' is a list of string:obj dictionaries @@ -280,11 +302,13 @@ def update_task_def_arn(self, new_arn): resource_content = resource[name] # get resource properties properties_key = filehelpers.find_required_key( - name, resource_content, 'properties') + name, resource_content, 'properties' + ) properties_content = resource_content[properties_key] # find task definition property task_def_key = filehelpers.find_required_key( - properties_key, properties_content, 'taskDefinition') + properties_key, properties_content, 'taskDefinition' + ) # insert new task def ARN into resource properties_content[task_def_key] = new_arn @@ -305,22 +329,19 @@ def wait_for_deploy_success(self, id, wait_min): delay_sec = DEFAULT_DELAY_SEC max_attempts = (wait_min * 60) / delay_sec - config = { - 'Delay': delay_sec, - 'MaxAttempts': max_attempts - } + config = {'Delay': delay_sec, 'MaxAttempts': max_attempts} self._show_deploy_wait_msg(id, wait_min) waiter.wait(deploymentId=id, WaiterConfig=config) def _show_deploy_wait_msg(self, id, wait_min): sys.stdout.write( - self.MSG_WAITING.format(deployment_id=id, - wait=wait_min)) + self.MSG_WAITING.format(deployment_id=id, wait=wait_min) + ) sys.stdout.flush() -class CodeDeployValidator(): +class CodeDeployValidator: def __init__(self, cd_client, resources): self._client = cd_client self._resource_names = resources @@ -328,35 +349,42 @@ def __init__(self, cd_client, resources): def describe_cd_resources(self): try: self.app_details = self._client.get_application( - applicationName=self._resource_names['app_name']) + applicationName=self._resource_names['app_name'] + ) except ClientError as e: raise exceptions.ServiceClientError( - action='describe Code Deploy application', error=e) + action='describe Code Deploy application', error=e + ) try: dgp = self._resource_names['deployment_group_name'] app = self._resource_names['app_name'] self.deployment_group_details = self._client.get_deployment_group( - applicationName=app, deploymentGroupName=dgp) + applicationName=app, deploymentGroupName=dgp + ) except ClientError as e: raise exceptions.ServiceClientError( - action='describe Code Deploy deployment group', error=e) + action='describe Code Deploy deployment group', error=e + ) def get_deployment_wait_time(self): - - if (not hasattr(self, 'deployment_group_details') or - self.deployment_group_details is None): + if ( + not hasattr(self, 'deployment_group_details') + or self.deployment_group_details is None + ): return None else: dgp_info = self.deployment_group_details['deploymentGroupInfo'] blue_green_info = dgp_info['blueGreenDeploymentConfiguration'] - deploy_ready_wait_min = \ - blue_green_info['deploymentReadyOption']['waitTimeInMinutes'] + deploy_ready_wait_min = blue_green_info['deploymentReadyOption'][ + 'waitTimeInMinutes' + ] terminate_key = 'terminateBlueInstancesOnDeploymentSuccess' - termination_wait_min = \ - blue_green_info[terminate_key]['terminationWaitTimeInMinutes'] + termination_wait_min = blue_green_info[terminate_key][ + 'terminationWaitTimeInMinutes' + ] configured_wait = deploy_ready_wait_min + termination_wait_min @@ -370,7 +398,8 @@ def validate_application(self): app_name = self._resource_names['app_name'] if self.app_details['application']['computePlatform'] != 'ECS': raise exceptions.InvalidPlatformError( - resource='Application', name=app_name) + resource='Application', name=app_name + ) def validate_deployment_group(self): dgp = self._resource_names['deployment_group_name'] @@ -384,26 +413,29 @@ def validate_deployment_group(self): if compute_platform != 'ECS': raise exceptions.InvalidPlatformError( - resource='Deployment Group', name=dgp) + resource='Deployment Group', name=dgp + ) - target_services = \ - self.deployment_group_details['deploymentGroupInfo']['ecsServices'] + target_services = self.deployment_group_details['deploymentGroupInfo'][ + 'ecsServices' + ] # either ECS resource names or ARNs can be stored, so check both for target in target_services: target_serv = target['serviceName'] if target_serv != service and target_serv != service_arn: raise exceptions.InvalidProperyError( - dg_name=dgp, resource='service', resource_name=service) + dg_name=dgp, resource='service', resource_name=service + ) target_cluster = target['clusterName'] if target_cluster != cluster and target_cluster != cluster_arn: raise exceptions.InvalidProperyError( - dg_name=dgp, resource='cluster', resource_name=cluster) - + dg_name=dgp, resource='cluster', resource_name=cluster + ) -class ECSClient(): +class ECSClient: def __init__(self, session, parsed_args, parsed_globals, user_agent_extra): self._args = parsed_args self._custom_config = config.Config(user_agent_extra=user_agent_extra) @@ -412,7 +444,8 @@ def __init__(self, session, parsed_args, parsed_globals, user_agent_extra): region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, verify=parsed_globals.verify_ssl, - config=self._custom_config) + config=self._custom_config, + ) def get_service_details(self): cluster = self._args.cluster @@ -422,33 +455,36 @@ def get_service_details(self): try: service_response = self._client.describe_services( - cluster=cluster, services=[self._args.service]) + cluster=cluster, services=[self._args.service] + ) except ClientError as e: raise exceptions.ServiceClientError( - action='describe ECS service', error=e) + action='describe ECS service', error=e + ) if len(service_response['services']) == 0: raise exceptions.InvalidServiceError( - service=self._args.service, cluster=cluster) + service=self._args.service, cluster=cluster + ) service_details = service_response['services'][0] - cluster_name = \ - filehelpers.get_cluster_name_from_arn( - service_details['clusterArn']) + cluster_name = filehelpers.get_cluster_name_from_arn( + service_details['clusterArn'] + ) return { 'service_arn': service_details['serviceArn'], 'service_name': service_details['serviceName'], 'cluster_arn': service_details['clusterArn'], - 'cluster_name': cluster_name + 'cluster_name': cluster_name, } def register_task_definition(self, kwargs): try: - response = \ - self._client.register_task_definition(**kwargs) + response = self._client.register_task_definition(**kwargs) except ClientError as e: raise exceptions.ServiceClientError( - action='register ECS task definition', error=e) + action='register ECS task definition', error=e + ) return response diff --git a/awscli/customizations/ecs/exceptions.py b/awscli/customizations/ecs/exceptions.py index 0dbc564ef811..52b078d56c58 100644 --- a/awscli/customizations/ecs/exceptions.py +++ b/awscli/customizations/ecs/exceptions.py @@ -13,7 +13,8 @@ class ECSError(Exception): - """ Base class for all ECSErrors.""" + """Base class for all ECSErrors.""" + fmt = 'An unspecified error occurred' def __init__(self, **kwargs): @@ -23,8 +24,7 @@ def __init__(self, **kwargs): class MissingPropertyError(ECSError): - fmt = \ - "Error: Resource '{resource}' must include property '{prop_name}'" + fmt = "Error: Resource '{resource}' must include property '{prop_name}'" class FileLoadError(ECSError): @@ -36,8 +36,10 @@ class InvalidPlatformError(ECSError): class InvalidProperyError(ECSError): - fmt = ("Error: deployment group '{dg_name}' does not target " - "ECS {resource} '{resource_name}'") + fmt = ( + "Error: deployment group '{dg_name}' does not target " + "ECS {resource} '{resource_name}'" + ) class InvalidServiceError(ECSError): @@ -45,4 +47,4 @@ class InvalidServiceError(ECSError): class ServiceClientError(ECSError): - fmt = "Failed to {action}:\n{error}" \ No newline at end of file + fmt = "Failed to {action}:\n{error}" diff --git a/awscli/customizations/ecs/executecommand.py b/awscli/customizations/ecs/executecommand.py index a578c73734c4..1da62aaa5626 100644 --- a/awscli/customizations/ecs/executecommand.py +++ b/awscli/customizations/ecs/executecommand.py @@ -10,13 +10,13 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import logging -import json import errno - +import json +import logging from subprocess import check_call + +from awscli.clidriver import CLIOperationCaller, ServiceOperation from awscli.compat import ignore_user_entered_signals -from awscli.clidriver import ServiceOperation, CLIOperationCaller logger = logging.getLogger(__name__) @@ -24,17 +24,13 @@ 'SessionManagerPlugin is not found. ', 'Please refer to SessionManager Documentation here: ', 'http://docs.aws.amazon.com/console/systems-manager/', - 'session-manager-plugin-not-found' + 'session-manager-plugin-not-found', ) -TASK_NOT_FOUND = ( - 'The task provided in the request was ' - 'not found.' -) +TASK_NOT_FOUND = 'The task provided in the request was ' 'not found.' class ECSExecuteCommand(ServiceOperation): - def create_help_command(self): help_command = super(ECSExecuteCommand, self).create_help_command() # change the output shape because the command provides no output. @@ -43,10 +39,7 @@ def create_help_command(self): def get_container_runtime_id(client, container_name, task_id, cluster_name): - describe_tasks_params = { - "cluster": cluster_name, - "tasks": [task_id] - } + describe_tasks_params = {"cluster": cluster_name, "tasks": [task_id]} describe_tasks_response = client.describe_tasks(**describe_tasks_params) # need to fail here if task has failed in the intermediate time tasks = describe_tasks_response['tasks'] @@ -64,11 +57,10 @@ def build_ssm_request_paramaters(response, client): container_name = response['containerName'] # in order to get container run-time id # we need to make a call to describe-tasks - container_runtime_id = \ - get_container_runtime_id(client, container_name, - task_id, cluster_name) - target = "ecs:{}_{}_{}".format(cluster_name, task_id, - container_runtime_id) + container_runtime_id = get_container_runtime_id( + client, container_name, task_id, cluster_name + ) + target = "ecs:{}_{}_{}".format(cluster_name, task_id, container_runtime_id) ssm_request_params = {"Target": target} return ssm_request_params @@ -85,13 +77,18 @@ def invoke(self, service_name, operation_name, parameters, parsed_globals): # before execute-command-command is made check_call(["session-manager-plugin"]) client = self._session.create_client( - service_name, region_name=parsed_globals.region, + service_name, + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) response = client.execute_command(**parameters) region_name = client.meta.region_name - profile_name = self._session.profile \ - if self._session.profile is not None else '' + profile_name = ( + self._session.profile + if self._session.profile is not None + else '' + ) endpoint_url = client.meta.endpoint_url ssm_request_params = build_ssm_request_paramaters(response, client) # ignore_user_entered_signals ignores these signals @@ -102,16 +99,21 @@ def invoke(self, service_name, operation_name, parameters, parsed_globals): # and handling in there with ignore_user_entered_signals(): # call executable with necessary input - check_call(["session-manager-plugin", - json.dumps(response['session']), - region_name, - "StartSession", - profile_name, - json.dumps(ssm_request_params), - endpoint_url]) + check_call( + [ + "session-manager-plugin", + json.dumps(response['session']), + region_name, + "StartSession", + profile_name, + json.dumps(ssm_request_params), + endpoint_url, + ] + ) return 0 except OSError as ex: if ex.errno == errno.ENOENT: - logger.debug('SessionManagerPlugin is not present', - exc_info=True) + logger.debug( + 'SessionManagerPlugin is not present', exc_info=True + ) raise ValueError(''.join(ERROR_MESSAGE)) diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py index 6c8b67a0d81d..8b52ef24d8ae 100644 --- a/awscli/customizations/ecs/filehelpers.py +++ b/awscli/customizations/ecs/filehelpers.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json + from ruamel.yaml import YAML from awscli.customizations.ecs import exceptions @@ -21,16 +22,17 @@ def find_required_key(resource_name, obj, key): - if obj is None: raise exceptions.MissingPropertyError( - resource=resource_name, prop_name=key) + resource=resource_name, prop_name=key + ) result = _get_case_insensitive_key(obj, key) if result is None: raise exceptions.MissingPropertyError( - resource=resource_name, prop_name=key) + resource=resource_name, prop_name=key + ) else: return result diff --git a/awscli/customizations/eks/__init__.py b/awscli/customizations/eks/__init__.py index 9f3114a64d84..6e5d7e73837d 100644 --- a/awscli/customizations/eks/__init__.py +++ b/awscli/customizations/eks/__init__.py @@ -11,8 +11,8 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.eks.update_kubeconfig import UpdateKubeconfigCommand from awscli.customizations.eks.get_token import GetTokenCommand +from awscli.customizations.eks.update_kubeconfig import UpdateKubeconfigCommand def initialize(cli): diff --git a/awscli/customizations/eks/exceptions.py b/awscli/customizations/eks/exceptions.py index bf01b2323d97..8bdef92016ce 100644 --- a/awscli/customizations/eks/exceptions.py +++ b/awscli/customizations/eks/exceptions.py @@ -13,8 +13,8 @@ class EKSError(Exception): - """ Base class for all EKSErrors.""" + """Base class for all EKSErrors.""" class EKSClusterError(EKSError): - """ Raised when a cluster is not in the correct state.""" + """Raised when a cluster is not in the correct state.""" diff --git a/awscli/customizations/eks/get_token.py b/awscli/customizations/eks/get_token.py index c85b86dd7d0e..6b43f569797f 100644 --- a/awscli/customizations/eks/get_token.py +++ b/awscli/customizations/eks/get_token.py @@ -11,19 +11,17 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import base64 -import botocore import json import os import sys - from datetime import datetime, timedelta -from botocore.signers import RequestSigner -from botocore.model import ServiceId -from awscli.formatter import get_formatter +import botocore from awscli.customizations.commands import BasicCommand -from awscli.customizations.utils import uni_print -from awscli.customizations.utils import validate_mutually_exclusive +from awscli.customizations.utils import uni_print, validate_mutually_exclusive +from awscli.formatter import get_formatter +from botocore.model import ServiceId +from botocore.signers import RequestSigner AUTH_SERVICE = "sts" AUTH_COMMAND = "GetCallerIdentity" @@ -116,15 +114,19 @@ def _run_main(self, parsed_args, parsed_globals): sts_client = client_factory.get_sts_client( region_name=parsed_globals.region, role_arn=parsed_args.role_arn ) - - validate_mutually_exclusive(parsed_args, ['cluster_name'], ['cluster_id']) + + validate_mutually_exclusive( + parsed_args, ['cluster_name'], ['cluster_id'] + ) if parsed_args.cluster_id: identifier = parsed_args.cluster_id elif parsed_args.cluster_name: identifier = parsed_args.cluster_name else: - return ValueError("Either parameter --cluster-name or --cluster-id must be specified.") + return ValueError( + "Either parameter --cluster-name or --cluster-id must be specified." + ) token = TokenGenerator(sts_client).get_token(identifier) @@ -273,4 +275,6 @@ def _retrieve_k8s_aws_id(self, params, context, **kwargs): def _inject_k8s_aws_id_header(self, request, **kwargs): if K8S_AWS_ID_HEADER in request.context: - request.headers[K8S_AWS_ID_HEADER] = request.context[K8S_AWS_ID_HEADER] + request.headers[K8S_AWS_ID_HEADER] = request.context[ + K8S_AWS_ID_HEADER + ] diff --git a/awscli/customizations/eks/kubeconfig.py b/awscli/customizations/eks/kubeconfig.py index f4c69039635b..101e70a41c8a 100644 --- a/awscli/customizations/eks/kubeconfig.py +++ b/awscli/customizations/eks/kubeconfig.py @@ -15,38 +15,40 @@ import os import ruamel.yaml as yaml -from botocore.compat import OrderedDict from awscli.compat import compat_open from awscli.customizations.eks.exceptions import EKSError from awscli.customizations.eks.ordered_yaml import ( + ordered_yaml_dump, ordered_yaml_load, - ordered_yaml_dump ) +from botocore.compat import OrderedDict class KubeconfigError(EKSError): - """ Base class for all kubeconfig errors.""" + """Base class for all kubeconfig errors.""" class KubeconfigCorruptedError(KubeconfigError): - """ Raised when a kubeconfig cannot be parsed.""" + """Raised when a kubeconfig cannot be parsed.""" class KubeconfigInaccessableError(KubeconfigError): - """ Raised when a kubeconfig cannot be opened for read/writing.""" + """Raised when a kubeconfig cannot be opened for read/writing.""" def _get_new_kubeconfig_content(): - return OrderedDict([ - ("apiVersion", "v1"), - ("clusters", []), - ("contexts", []), - ("current-context", ""), - ("kind", "Config"), - ("preferences", OrderedDict()), - ("users", []) - ]) + return OrderedDict( + [ + ("apiVersion", "v1"), + ("clusters", []), + ("contexts", []), + ("current-context", ""), + ("kind", "Config"), + ("preferences", OrderedDict()), + ("users", []), + ] + ) class Kubeconfig(object): @@ -57,7 +59,7 @@ def __init__(self, path, content=None): self.content = content def dump_content(self): - """ Return the stored content in yaml format. """ + """Return the stored content in yaml format.""" return ordered_yaml_dump(self.content) def has_cluster(self, name): @@ -67,14 +69,17 @@ def has_cluster(self, name): """ if self.content.get('clusters') is None: return False - return name in [cluster['name'] - for cluster in self.content['clusters'] if 'name' in cluster] + return name in [ + cluster['name'] + for cluster in self.content['clusters'] + if 'name' in cluster + ] def __eq__(self, other): return ( - isinstance(other, Kubeconfig) - and self.path == other.path - and self.content == other.content + isinstance(other, Kubeconfig) + and self.path == other.path + and self.content == other.content ) @@ -92,8 +97,9 @@ def validate_config(self, config): :type config: Kubeconfig """ if not isinstance(config, Kubeconfig): - raise KubeconfigCorruptedError("Internal error: " - f"Not a {Kubeconfig}.") + raise KubeconfigCorruptedError( + "Internal error: " f"Not a {Kubeconfig}." + ) self._validate_config_types(config) self._validate_list_entry_types(config) @@ -108,9 +114,11 @@ def _validate_config_types(self, config): if not isinstance(config.content, dict): raise KubeconfigCorruptedError(f"Content not a {dict}.") for key, value in self._validation_content.items(): - if (key in config.content and - config.content[key] is not None and - not isinstance(config.content[key], type(value))): + if ( + key in config.content + and config.content[key] is not None + and not isinstance(config.content[key], type(value)) + ): raise KubeconfigCorruptedError( f"{key} is wrong type: {type(config.content[key])} " f"(Should be {type(value)})" @@ -125,19 +133,19 @@ def _validate_list_entry_types(self, config): :type config: Kubeconfig """ for key, value in self._validation_content.items(): - if (key in config.content and - type(config.content[key]) == list): + if key in config.content and type(config.content[key]) == list: for element in config.content[key]: if not isinstance(element, OrderedDict): raise KubeconfigCorruptedError( - f"Entry in {key} not a {dict}. ") + f"Entry in {key} not a {dict}. " + ) class KubeconfigLoader(object): - def __init__(self, validator = None): + def __init__(self, validator=None): if validator is None: - validator=KubeconfigValidator() - self._validator=validator + validator = KubeconfigValidator() + self._validator = validator def load_kubeconfig(self, path): """ @@ -161,15 +169,17 @@ def load_kubeconfig(self, path): loaded_content = ordered_yaml_load(stream) except IOError as e: if e.errno == errno.ENOENT: - loaded_content=None + loaded_content = None else: raise KubeconfigInaccessableError( - f"Can't open kubeconfig for reading: {e}") + f"Can't open kubeconfig for reading: {e}" + ) except yaml.YAMLError as e: raise KubeconfigCorruptedError( - f"YamlError while loading kubeconfig: {e}") + f"YamlError while loading kubeconfig: {e}" + ) - loaded_config=Kubeconfig(path, loaded_content) + loaded_config = Kubeconfig(path, loaded_content) self._validator.validate_config(loaded_config) return loaded_config @@ -187,21 +197,24 @@ def write_kubeconfig(self, config): :raises KubeconfigInaccessableError: if the kubeconfig can't be opened for writing """ - directory=os.path.dirname(config.path) + directory = os.path.dirname(config.path) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise KubeconfigInaccessableError( - f"Can't create directory for writing: {e}") + f"Can't create directory for writing: {e}" + ) try: with compat_open( - config.path, "w+", access_permissions=0o600) as stream: + config.path, "w+", access_permissions=0o600 + ) as stream: ordered_yaml_dump(config.content, stream) except IOError as e: raise KubeconfigInaccessableError( - f"Can't open kubeconfig for writing: {e}") + f"Can't open kubeconfig for writing: {e}" + ) class KubeconfigAppender(object): @@ -213,42 +226,48 @@ def insert_entry(self, config, key, new_entry): :param config: The kubeconfig to insert an entry into :type config: Kubeconfig """ - entries=self._setdefault_existing_entries(config, key) - same_name_index=self._index_same_name(entries, new_entry) + entries = self._setdefault_existing_entries(config, key) + same_name_index = self._index_same_name(entries, new_entry) if same_name_index is None: entries.append(new_entry) else: - entries[same_name_index]=new_entry + entries[same_name_index] = new_entry return config def _setdefault_existing_entries(self, config, key): - config.content[key]=config.content.get(key) or [] - entries=config.content[key] + config.content[key] = config.content.get(key) or [] + entries = config.content[key] if not isinstance(entries, list): - raise KubeconfigError(f"Tried to insert into {key}, " - f"which is a {type(entries)} " - f"not a {list}") + raise KubeconfigError( + f"Tried to insert into {key}, " + f"which is a {type(entries)} " + f"not a {list}" + ) return entries def _index_same_name(self, entries, new_entry): if "name" in new_entry: - name_to_search=new_entry["name"] + name_to_search = new_entry["name"] for i, entry in enumerate(entries): if "name" in entry and entry["name"] == name_to_search: return i return None - def _make_context(self, cluster, user, alias = None): - """ Generate a context to associate cluster and user with a given alias.""" - return OrderedDict([ - ("context", OrderedDict([ - ("cluster", cluster["name"]), - ("user", user["name"]) - ])), - ("name", alias or user["name"]) - ]) - - def insert_cluster_user_pair(self, config, cluster, user, alias = None): + def _make_context(self, cluster, user, alias=None): + """Generate a context to associate cluster and user with a given alias.""" + return OrderedDict( + [ + ( + "context", + OrderedDict( + [("cluster", cluster["name"]), ("user", user["name"])] + ), + ), + ("name", alias or user["name"]), + ] + ) + + def insert_cluster_user_pair(self, config, cluster, user, alias=None): """ Insert the passed cluster entry and user entry, then make a context to associate them @@ -270,11 +289,11 @@ def insert_cluster_user_pair(self, config, cluster, user, alias = None): :return: The generated context :rtype: OrderedDict """ - context=self._make_context(cluster, user, alias = alias) + context = self._make_context(cluster, user, alias=alias) self.insert_entry(config, "clusters", cluster) self.insert_entry(config, "users", user) self.insert_entry(config, "contexts", context) - config.content["current-context"]=context["name"] + config.content["current-context"] = context["name"] return context diff --git a/awscli/customizations/eks/ordered_yaml.py b/awscli/customizations/eks/ordered_yaml.py index 23834e0d0623..851627129eb1 100644 --- a/awscli/customizations/eks/ordered_yaml.py +++ b/awscli/customizations/eks/ordered_yaml.py @@ -11,25 +11,29 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import ruamel.yaml -from botocore.compat import OrderedDict from awscli.utils import dump_yaml_to_str +from botocore.compat import OrderedDict + def _ordered_constructor(loader, node): loader.flatten_mapping(node) return OrderedDict(loader.construct_pairs(node)) + def _ordered_representer(dumper, data): return dumper.represent_mapping( - ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - data.items()) + ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items() + ) + def ordered_yaml_load(stream): - """ Load an OrderedDict object from a yaml stream.""" + """Load an OrderedDict object from a yaml stream.""" yaml = ruamel.yaml.YAML(typ="safe", pure=True) yaml.Constructor.add_constructor( ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - _ordered_constructor) + _ordered_constructor, + ) return yaml.load(stream) diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py index 8bce83525bbb..12f19e4534b1 100644 --- a/awscli/customizations/eks/update_kubeconfig.py +++ b/awscli/customizations/eks/update_kubeconfig.py @@ -13,21 +13,20 @@ import logging import os -from botocore.compat import OrderedDict - from awscli.compat import is_windows from awscli.customizations.commands import BasicCommand from awscli.customizations.eks.exceptions import EKSClusterError from awscli.customizations.eks.kubeconfig import ( Kubeconfig, + KubeconfigAppender, KubeconfigError, KubeconfigLoader, - KubeconfigWriter, KubeconfigValidator, - KubeconfigAppender + KubeconfigWriter, ) from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump from awscli.customizations.utils import uni_print +from botocore.compat import OrderedDict LOG = logging.getLogger(__name__) @@ -37,75 +36,88 @@ # this can be safely changed to default to writing "v1" API_VERSION = "client.authentication.k8s.io/v1beta1" + class UpdateKubeconfigCommand(BasicCommand): NAME = 'update-kubeconfig' DESCRIPTION = BasicCommand.FROM_FILE( - 'eks', - 'update-kubeconfig', - '_description.rst' + 'eks', 'update-kubeconfig', '_description.rst' ) ARG_TABLE = [ { 'name': 'name', 'dest': 'cluster_name', - 'help_text': ("The name of the cluster for which " - "to create a kubeconfig entry. " - "This cluster must exist in your account and in the " - "specified or configured default Region " - "for your AWS CLI installation."), - 'required': True + 'help_text': ( + "The name of the cluster for which " + "to create a kubeconfig entry. " + "This cluster must exist in your account and in the " + "specified or configured default Region " + "for your AWS CLI installation." + ), + 'required': True, }, { 'name': 'kubeconfig', - 'help_text': ("Optionally specify a kubeconfig file to append " - "with your configuration. " - "By default, the configuration is written to the " - "first file path in the KUBECONFIG " - "environment variable (if it is set) " - "or the default kubeconfig path (.kube/config) " - "in your home directory."), - 'required': False + 'help_text': ( + "Optionally specify a kubeconfig file to append " + "with your configuration. " + "By default, the configuration is written to the " + "first file path in the KUBECONFIG " + "environment variable (if it is set) " + "or the default kubeconfig path (.kube/config) " + "in your home directory." + ), + 'required': False, }, { 'name': 'role-arn', - 'help_text': ("To assume a role for cluster authentication, " - "specify an IAM role ARN with this option. " - "For example, if you created a cluster " - "while assuming an IAM role, " - "then you must also assume that role to " - "connect to the cluster the first time."), - 'required': False + 'help_text': ( + "To assume a role for cluster authentication, " + "specify an IAM role ARN with this option. " + "For example, if you created a cluster " + "while assuming an IAM role, " + "then you must also assume that role to " + "connect to the cluster the first time." + ), + 'required': False, }, { 'name': 'dry-run', 'action': 'store_true', 'default': False, - 'help_text': ("Print the merged kubeconfig to stdout instead of " - "writing it to the specified file."), - 'required': False + 'help_text': ( + "Print the merged kubeconfig to stdout instead of " + "writing it to the specified file." + ), + 'required': False, }, { 'name': 'verbose', 'action': 'store_true', 'default': False, - 'help_text': ("Print more detailed output " - "when writing to the kubeconfig file, " - "including the appended entries.") + 'help_text': ( + "Print more detailed output " + "when writing to the kubeconfig file, " + "including the appended entries." + ), }, { 'name': 'alias', - 'help_text': ("Alias for the cluster context name. " - "Defaults to match cluster ARN."), - 'required': False + 'help_text': ( + "Alias for the cluster context name. " + "Defaults to match cluster ARN." + ), + 'required': False, }, { 'name': 'user-alias', - 'help_text': ("Alias for the generated user name. " - "Defaults to match cluster ARN."), - 'required': False - } + 'help_text': ( + "Alias for the generated user name. " + "Defaults to match cluster ARN." + ), + 'required': False, + }, ] def _display_entries(self, entries): @@ -121,25 +133,25 @@ def _display_entries(self, entries): uni_print("\n") def _run_main(self, parsed_args, parsed_globals): - client = EKSClient(self._session, - parsed_args=parsed_args, - parsed_globals=parsed_globals) + client = EKSClient( + self._session, + parsed_args=parsed_args, + parsed_globals=parsed_globals, + ) new_cluster_dict = client.get_cluster_entry() - new_user_dict = client.get_user_entry(user_alias=parsed_args.user_alias) + new_user_dict = client.get_user_entry( + user_alias=parsed_args.user_alias + ) config_selector = KubeconfigSelector( - os.environ.get("KUBECONFIG", ""), - parsed_args.kubeconfig - ) - config = config_selector.choose_kubeconfig( - new_cluster_dict["name"] + os.environ.get("KUBECONFIG", ""), parsed_args.kubeconfig ) + config = config_selector.choose_kubeconfig(new_cluster_dict["name"]) updating_existing = config.has_cluster(new_cluster_dict["name"]) appender = KubeconfigAppender() - new_context_dict = appender.insert_cluster_user_pair(config, - new_cluster_dict, - new_user_dict, - parsed_args.alias) + new_context_dict = appender.insert_cluster_user_pair( + config, new_cluster_dict, new_user_dict, parsed_args.alias + ) if parsed_args.dry_run: uni_print(config.dump_content()) @@ -148,27 +160,27 @@ def _run_main(self, parsed_args, parsed_globals): writer.write_kubeconfig(config) if updating_existing: - uni_print("Updated context {0} in {1}\n".format( - new_context_dict["name"], config.path - )) + uni_print( + "Updated context {0} in {1}\n".format( + new_context_dict["name"], config.path + ) + ) else: - uni_print("Added new context {0} to {1}\n".format( - new_context_dict["name"], config.path - )) + uni_print( + "Added new context {0} to {1}\n".format( + new_context_dict["name"], config.path + ) + ) if parsed_args.verbose: - self._display_entries([ - new_context_dict, - new_user_dict, - new_cluster_dict - ]) + self._display_entries( + [new_context_dict, new_user_dict, new_cluster_dict] + ) return 0 class KubeconfigSelector(object): - - def __init__(self, env_variable, path_in, validator=None, - loader=None): + def __init__(self, env_variable, path_in, validator=None, loader=None): """ Parse KUBECONFIG into a list of absolute paths. Also replace the empty list with DEFAULT_PATH @@ -194,9 +206,11 @@ def __init__(self, env_variable, path_in, validator=None, # Get the list of paths from the environment variable if env_variable == "": env_variable = DEFAULT_PATH - self._paths = [self._expand_path(element) - for element in env_variable.split(os.pathsep) - if len(element.strip()) > 0] + self._paths = [ + self._expand_path(element) + for element in env_variable.split(os.pathsep) + if len(element.strip()) > 0 + ] if len(self._paths) == 0: self._paths = [DEFAULT_PATH] @@ -219,9 +233,9 @@ def choose_kubeconfig(self, cluster_name): loaded_config = self._loader.load_kubeconfig(candidate_path) if loaded_config.has_cluster(cluster_name): - LOG.debug("Found entry to update at {0}".format( - candidate_path - )) + LOG.debug( + "Found entry to update at {0}".format(candidate_path) + ) return loaded_config except KubeconfigError as e: LOG.warning("Passing {0}:{1}".format(candidate_path, e)) @@ -232,7 +246,7 @@ def choose_kubeconfig(self, cluster_name): return self._loader.load_kubeconfig(self._paths[0]) def _expand_path(self, path): - """ A helper to expand a path to a full absolute path. """ + """A helper to expand a path to a full absolute path.""" return os.path.abspath(os.path.expanduser(path)) @@ -259,17 +273,22 @@ def cluster_description(self): "eks", region_name=self._parsed_globals.region, endpoint_url=self._parsed_globals.endpoint_url, - verify=self._parsed_globals.verify_ssl + verify=self._parsed_globals.verify_ssl, ) full_description = client.describe_cluster(name=self._cluster_name) self._cluster_description = full_description["cluster"] if "status" not in self._cluster_description: raise EKSClusterError("Cluster not found") - if self._cluster_description["status"] not in ["ACTIVE", "UPDATING"]: - raise EKSClusterError("Cluster status is {0}".format( - self._cluster_description["status"] - )) + if self._cluster_description["status"] not in [ + "ACTIVE", + "UPDATING", + ]: + raise EKSClusterError( + "Cluster status is {0}".format( + self._cluster_description["status"] + ) + ) return self._cluster_description @@ -279,17 +298,26 @@ def get_cluster_entry(self): the previously obtained description. """ - cert_data = self.cluster_description.get("certificateAuthority", {}).get("data", "") + cert_data = self.cluster_description.get( + "certificateAuthority", {} + ).get("data", "") endpoint = self.cluster_description.get("endpoint") arn = self.cluster_description.get("arn") - return OrderedDict([ - ("cluster", OrderedDict([ - ("certificate-authority-data", cert_data), - ("server", endpoint) - ])), - ("name", arn) - ]) + return OrderedDict( + [ + ( + "cluster", + OrderedDict( + [ + ("certificate-authority-data", cert_data), + ("server", endpoint), + ] + ), + ), + ("name", arn), + ] + ) def get_user_entry(self, user_alias=None): """ @@ -307,37 +335,54 @@ def get_user_entry(self, user_alias=None): cluster_identification_parameter = "--cluster-id" cluster_identification_value = self.cluster_description.get("id") - generated_user = OrderedDict([ - ("name", user_alias or self.cluster_description.get("arn", "")), - ("user", OrderedDict([ - ("exec", OrderedDict([ - ("apiVersion", API_VERSION), - ("args", + generated_user = OrderedDict( + [ + ( + "name", + user_alias or self.cluster_description.get("arn", ""), + ), + ( + "user", + OrderedDict( [ - "--region", - region, - "eks", - "get-token", - cluster_identification_parameter, - cluster_identification_value, - "--output", - "json", - ]), - ("command", "aws"), - ])) - ])) - ]) + ( + "exec", + OrderedDict( + [ + ("apiVersion", API_VERSION), + ( + "args", + [ + "--region", + region, + "eks", + "get-token", + cluster_identification_parameter, + cluster_identification_value, + "--output", + "json", + ], + ), + ("command", "aws"), + ] + ), + ) + ] + ), + ), + ] + ) if self._parsed_args.role_arn is not None: - generated_user["user"]["exec"]["args"].extend([ - "--role", - self._parsed_args.role_arn - ]) + generated_user["user"]["exec"]["args"].extend( + ["--role", self._parsed_args.role_arn] + ) if self._session.profile: - generated_user["user"]["exec"]["env"] = [OrderedDict([ - ("name", "AWS_PROFILE"), - ("value", self._session.profile) - ])] + generated_user["user"]["exec"]["env"] = [ + OrderedDict( + [("name", "AWS_PROFILE"), ("value", self._session.profile)] + ) + ] return generated_user diff --git a/awscli/customizations/emr/addinstancegroups.py b/awscli/customizations/emr/addinstancegroups.py index eb5282393589..d4f7ed986065 100644 --- a/awscli/customizations/emr/addinstancegroups.py +++ b/awscli/customizations/emr/addinstancegroups.py @@ -12,10 +12,12 @@ # language governing permissions and limitations under the License. -from awscli.customizations.emr import argumentschema -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext -from awscli.customizations.emr import instancegroupsutils +from awscli.customizations.emr import ( + argumentschema, + emrutils, + helptext, + instancegroupsutils, +) from awscli.customizations.emr.command import Command @@ -23,29 +25,46 @@ class AddInstanceGroups(Command): NAME = 'add-instance-groups' DESCRIPTION = 'Adds an instance group to a running cluster.' ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'instance-groups', 'required': True, - 'help_text': helptext.INSTANCE_GROUPS, - 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'instance-groups', + 'required': True, + 'help_text': helptext.INSTANCE_GROUPS, + 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA, + }, ] def _run_main_command(self, parsed_args, parsed_globals): parameters = {'JobFlowId': parsed_args.cluster_id} - parameters['InstanceGroups'] = \ + parameters['InstanceGroups'] = ( instancegroupsutils.build_instance_groups( - parsed_args.instance_groups) + parsed_args.instance_groups + ) + ) add_instance_groups_response = emrutils.call( - self._session, 'add_instance_groups', parameters, - self.region, parsed_globals.endpoint_url, - parsed_globals.verify_ssl) + self._session, + 'add_instance_groups', + parameters, + self.region, + parsed_globals.endpoint_url, + parsed_globals.verify_ssl, + ) constructed_result = self._construct_result( - add_instance_groups_response) + add_instance_groups_response + ) - emrutils.display_response(self._session, 'add_instance_groups', - constructed_result, parsed_globals) + emrutils.display_response( + self._session, + 'add_instance_groups', + constructed_result, + parsed_globals, + ) return 0 def _construct_result(self, add_instance_groups_result): @@ -55,12 +74,15 @@ def _construct_result(self, add_instance_groups_result): if add_instance_groups_result is not None: jobFlowId = add_instance_groups_result.get('JobFlowId') instanceGroupIds = add_instance_groups_result.get( - 'InstanceGroupIds') + 'InstanceGroupIds' + ) clusterArn = add_instance_groups_result.get('ClusterArn') if jobFlowId is not None and instanceGroupIds is not None: - return {'ClusterId': jobFlowId, - 'InstanceGroupIds': instanceGroupIds, - 'ClusterArn': clusterArn} + return { + 'ClusterId': jobFlowId, + 'InstanceGroupIds': instanceGroupIds, + 'ClusterArn': clusterArn, + } else: return {} diff --git a/awscli/customizations/emr/addsteps.py b/awscli/customizations/emr/addsteps.py index b816d2eee76a..0db662fcd055 100644 --- a/awscli/customizations/emr/addsteps.py +++ b/awscli/customizations/emr/addsteps.py @@ -11,50 +11,60 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import argumentschema -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext -from awscli.customizations.emr import steputils +from awscli.customizations.emr import ( + argumentschema, + emrutils, + helptext, + steputils, +) from awscli.customizations.emr.command import Command class AddSteps(Command): NAME = 'add-steps' - DESCRIPTION = ('Add a list of steps to a cluster.') + DESCRIPTION = 'Add a list of steps to a cluster.' ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID - }, - {'name': 'steps', - 'required': True, - 'nargs': '+', - 'schema': argumentschema.STEPS_SCHEMA, - 'help_text': helptext.STEPS - }, - {'name': 'execution-role-arn', - 'required': False, - 'help_text': helptext.EXECUTION_ROLE_ARN - } + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'steps', + 'required': True, + 'nargs': '+', + 'schema': argumentschema.STEPS_SCHEMA, + 'help_text': helptext.STEPS, + }, + { + 'name': 'execution-role-arn', + 'required': False, + 'help_text': helptext.EXECUTION_ROLE_ARN, + }, ] def _run_main_command(self, parsed_args, parsed_globals): parsed_steps = parsed_args.steps release_label = emrutils.get_release_label( - parsed_args.cluster_id, self._session, self.region, - parsed_globals.endpoint_url, parsed_globals.verify_ssl) + parsed_args.cluster_id, + self._session, + self.region, + parsed_globals.endpoint_url, + parsed_globals.verify_ssl, + ) step_list = steputils.build_step_config_list( - parsed_step_list=parsed_steps, region=self.region, - release_label=release_label) - parameters = { - 'JobFlowId': parsed_args.cluster_id, - 'Steps': step_list - } + parsed_step_list=parsed_steps, + region=self.region, + release_label=release_label, + ) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': step_list} if parsed_args.execution_role_arn is not None: parameters['ExecutionRoleArn'] = parsed_args.execution_role_arn - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 diff --git a/awscli/customizations/emr/addtags.py b/awscli/customizations/emr/addtags.py index 8332d9a54a51..fcedc2026755 100644 --- a/awscli/customizations/emr/addtags.py +++ b/awscli/customizations/emr/addtags.py @@ -13,13 +13,13 @@ from awscli.arguments import CustomArgument -from awscli.customizations.emr import helptext -from awscli.customizations.emr import emrutils +from awscli.customizations.emr import emrutils, helptext def modify_tags_argument(argument_table, **kwargs): - argument_table['tags'] = TagsArgument('tags', required=True, - help_text=helptext.TAGS, nargs='+') + argument_table['tags'] = TagsArgument( + 'tags', required=True, help_text=helptext.TAGS, nargs='+' + ) class TagsArgument(CustomArgument): diff --git a/awscli/customizations/emr/applicationutils.py b/awscli/customizations/emr/applicationutils.py index 8a11eb3ab092..4072fc4126c8 100644 --- a/awscli/customizations/emr/applicationutils.py +++ b/awscli/customizations/emr/applicationutils.py @@ -11,14 +11,11 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, emrutils, exceptions from awscli.customizations.exceptions import ParamValidationError -def build_applications(region, - parsed_applications, ami_version=None): +def build_applications(region, parsed_applications, ami_version=None): app_list = [] step_list = [] ba_list = [] @@ -28,38 +25,41 @@ def build_applications(region, if app_name == constants.HIVE: hive_version = constants.LATEST - step_list.append( - _build_install_hive_step(region=region)) + step_list.append(_build_install_hive_step(region=region)) args = app_config.get('Args') if args is not None: hive_site_path = _find_matching_arg( - key=constants.HIVE_SITE_KEY, args_list=args) + key=constants.HIVE_SITE_KEY, args_list=args + ) if hive_site_path is not None: step_list.append( _build_install_hive_site_step( - region=region, - hive_site_path=hive_site_path)) + region=region, hive_site_path=hive_site_path + ) + ) elif app_name == constants.PIG: pig_version = constants.LATEST - step_list.append( - _build_pig_install_step( - region=region)) + step_list.append(_build_pig_install_step(region=region)) elif app_name == constants.GANGLIA: ba_list.append( - _build_ganglia_install_bootstrap_action( - region=region)) + _build_ganglia_install_bootstrap_action(region=region) + ) elif app_name == constants.HBASE: ba_list.append( - _build_hbase_install_bootstrap_action( - region=region)) + _build_hbase_install_bootstrap_action(region=region) + ) if ami_version >= '3.0': step_list.append( _build_hbase_install_step( - constants.HBASE_PATH_HADOOP2_INSTALL_JAR)) + constants.HBASE_PATH_HADOOP2_INSTALL_JAR + ) + ) elif ami_version >= '2.1': step_list.append( _build_hbase_install_step( - constants.HBASE_PATH_HADOOP1_INSTALL_JAR)) + constants.HBASE_PATH_HADOOP1_INSTALL_JAR + ) + ) else: raise ParamValidationError( 'aws: error: AMI version %s is not ' @@ -68,12 +68,15 @@ def build_applications(region, elif app_name == constants.IMPALA: ba_list.append( _build_impala_install_bootstrap_action( - region=region, - args=app_config.get('Args'))) + region=region, args=app_config.get('Args') + ) + ) else: app_list.append( _build_supported_product( - app_config['Name'], app_config.get('Args'))) + app_config['Name'], app_config.get('Args') + ) + ) return app_list, ba_list, step_list @@ -89,16 +92,18 @@ def _build_ganglia_install_bootstrap_action(region): return emrutils.build_bootstrap_action( name=constants.INSTALL_GANGLIA_NAME, path=emrutils.build_s3_link( - relative_path=constants.GANGLIA_INSTALL_BA_PATH, - region=region)) + relative_path=constants.GANGLIA_INSTALL_BA_PATH, region=region + ), + ) def _build_hbase_install_bootstrap_action(region): return emrutils.build_bootstrap_action( name=constants.INSTALL_HBASE_NAME, path=emrutils.build_s3_link( - relative_path=constants.HBASE_INSTALL_BA_PATH, - region=region)) + relative_path=constants.HBASE_INSTALL_BA_PATH, region=region + ), + ) def _build_hbase_install_step(jar): @@ -106,7 +111,8 @@ def _build_hbase_install_step(jar): jar=jar, name=constants.START_HBASE_NAME, action_on_failure=constants.TERMINATE_CLUSTER, - args=constants.HBASE_INSTALL_ARG) + args=constants.HBASE_INSTALL_ARG, + ) def _build_impala_install_bootstrap_action(region, args=None): @@ -114,37 +120,43 @@ def _build_impala_install_bootstrap_action(region, args=None): constants.BASE_PATH_ARG, emrutils.build_s3_link(region=region), constants.IMPALA_VERSION, - constants.LATEST] + constants.LATEST, + ] if args is not None: args_list.append(constants.IMPALA_CONF) args_list.append(','.join(args)) return emrutils.build_bootstrap_action( name=constants.INSTALL_IMPALA_NAME, path=emrutils.build_s3_link( - relative_path=constants.IMPALA_INSTALL_PATH, - region=region), - args=args_list) + relative_path=constants.IMPALA_INSTALL_PATH, region=region + ), + args=args_list, + ) -def _build_install_hive_step(region, - action_on_failure=constants.TERMINATE_CLUSTER): +def _build_install_hive_step( + region, action_on_failure=constants.TERMINATE_CLUSTER +): step_args = [ emrutils.build_s3_link(constants.HIVE_SCRIPT_PATH, region), constants.INSTALL_HIVE_ARG, constants.BASE_PATH_ARG, emrutils.build_s3_link(constants.HIVE_BASE_PATH, region), constants.HIVE_VERSIONS, - constants.LATEST] + constants.LATEST, + ] step = emrutils.build_step( name=constants.INSTALL_HIVE_NAME, action_on_failure=action_on_failure, jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region), - args=step_args) + args=step_args, + ) return step -def _build_install_hive_site_step(region, hive_site_path, - action_on_failure=constants.CANCEL_AND_WAIT): +def _build_install_hive_site_step( + region, hive_site_path, action_on_failure=constants.CANCEL_AND_WAIT +): step_args = [ emrutils.build_s3_link(constants.HIVE_SCRIPT_PATH, region), constants.BASE_PATH_ARG, @@ -152,29 +164,34 @@ def _build_install_hive_site_step(region, hive_site_path, constants.INSTALL_HIVE_SITE_ARG, hive_site_path, constants.HIVE_VERSIONS, - constants.LATEST] + constants.LATEST, + ] step = emrutils.build_step( name=constants.INSTALL_HIVE_SITE_NAME, action_on_failure=action_on_failure, jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region), - args=step_args) + args=step_args, + ) return step -def _build_pig_install_step(region, - action_on_failure=constants.TERMINATE_CLUSTER): +def _build_pig_install_step( + region, action_on_failure=constants.TERMINATE_CLUSTER +): step_args = [ emrutils.build_s3_link(constants.PIG_SCRIPT_PATH, region), constants.INSTALL_PIG_ARG, constants.BASE_PATH_ARG, emrutils.build_s3_link(constants.PIG_BASE_PATH, region), constants.PIG_VERSIONS, - constants.LATEST] + constants.LATEST, + ] step = emrutils.build_step( name=constants.INSTALL_PIG_NAME, action_on_failure=action_on_failure, jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region), - args=step_args) + args=step_args, + ) return step diff --git a/awscli/customizations/emr/argumentschema.py b/awscli/customizations/emr/argumentschema.py index 2022480a56ca..f816746ee0f2 100644 --- a/awscli/customizations/emr/argumentschema.py +++ b/awscli/customizations/emr/argumentschema.py @@ -16,15 +16,9 @@ CONFIGURATIONS_PROPERTIES_SCHEMA = { "type": "map", - "key": { - "type": "string", - "description": "Configuration key" - }, - "value": { - "type": "string", - "description": "Configuration value" - }, - "description": "Application configuration properties" + "key": {"type": "string", "description": "Configuration key"}, + "value": {"type": "string", "description": "Configuration value"}, + "description": "Application configuration properties", } CONFIGURATIONS_CLASSIFICATION_SCHEMA = { @@ -38,10 +32,10 @@ "type": "object", "properties": { "Classification": CONFIGURATIONS_CLASSIFICATION_SCHEMA, - "Properties": CONFIGURATIONS_PROPERTIES_SCHEMA - } + "Properties": CONFIGURATIONS_PROPERTIES_SCHEMA, + }, }, - "description": "Instance group application configurations." + "description": "Instance group application configurations.", } OUTER_CONFIGURATIONS_SCHEMA = { @@ -51,45 +45,48 @@ "properties": { "Classification": CONFIGURATIONS_CLASSIFICATION_SCHEMA, "Properties": CONFIGURATIONS_PROPERTIES_SCHEMA, - "Configurations": INNER_CONFIGURATIONS_SCHEMA - } + "Configurations": INNER_CONFIGURATIONS_SCHEMA, + }, }, - "description": "Instance group application configurations." + "description": "Instance group application configurations.", } ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA = { "type": "object", - "properties" : { + "properties": { "UsageStrategy": { "type": "string", "description": "The strategy of whether to use available capacity reservations to fulfill On-Demand capacity.", - "enum": ["use-capacity-reservations-first"] + "enum": ["use-capacity-reservations-first"], }, "CapacityReservationPreference": { "type": "string", "description": "The preference of the capacity reservation of the instance.", - "enum": [ - "open", - "none" - ] + "enum": ["open", "none"], }, "CapacityReservationResourceGroupArn": { "type": "string", - "description": "The ARN of the capacity reservation resource group in which to run the instance." - } - } + "description": "The ARN of the capacity reservation resource group in which to run the instance.", + }, + }, } SPOT_ALLOCATION_STRATEGY_SCHEMA = { "type": "string", "description": "The strategy to use to launch Spot instance fleets.", - "enum": ["capacity-optimized", "price-capacity-optimized", "lowest-price", "diversified", "capacity-optimized-prioritized"] + "enum": [ + "capacity-optimized", + "price-capacity-optimized", + "lowest-price", + "diversified", + "capacity-optimized-prioritized", + ], } ONDEMAND_ALLOCATION_STRATEGY_SCHEMA = { "type": "string", "description": "The strategy to use to launch On-Demand instance fleets.", - "enum": ["lowest-price", "prioritized"] + "enum": ["lowest-price", "prioritized"], } INSTANCE_GROUPS_SCHEMA = { @@ -99,39 +96,35 @@ "properties": { "Name": { "type": "string", - "description": - "Friendly name given to the instance group." + "description": "Friendly name given to the instance group.", }, "InstanceGroupType": { "type": "string", - "description": - "The type of the instance group in the cluster.", + "description": "The type of the instance group in the cluster.", "enum": ["MASTER", "CORE", "TASK"], - "required": True + "required": True, }, "BidPrice": { "type": "string", - "description": - "Bid price for each Amazon EC2 instance in the " - "instance group when launching nodes as Spot Instances, " - "expressed in USD." + "description": "Bid price for each Amazon EC2 instance in the " + "instance group when launching nodes as Spot Instances, " + "expressed in USD.", }, "InstanceType": { "type": "string", - "description": - "The Amazon EC2 instance type for all instances " - "in the instance group.", - "required": True + "description": "The Amazon EC2 instance type for all instances " + "in the instance group.", + "required": True, }, "InstanceCount": { "type": "integer", "description": "Target number of Amazon EC2 instances " "for the instance group", - "required": True + "required": True, }, "CustomAmiId": { "type": "string", - "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances." + "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances.", }, "EbsConfiguration": { "type": "object", @@ -146,19 +139,19 @@ "items": { "type": "object", "properties": { - "VolumeSpecification" : { + "VolumeSpecification": { "type": "object", "description": "The EBS volume specification that will be created and attached to every instance in this instance group.", "properties": { "VolumeType": { "type": "string", "description": "The EBS volume type that is attached to all the instances in the instance group. Valid types are: gp2, io1, and standard.", - "required": True + "required": True, }, "SizeInGB": { "type": "integer", "description": "The EBS volume size, in GB, that is attached to all the instances in the instance group.", - "required": True + "required": True, }, "Iops": { "type": "integer", @@ -167,17 +160,17 @@ "Throughput": { "type": "integer", "description": "The throughput of the EBS volume that is attached to all the instances in the instance group.", - } - } + }, + }, }, "VolumesPerInstance": { "type": "integer", "description": "The number of EBS volumes that will be created and attached to each instance in the instance group.", - } - } - } - } - } + }, + }, + }, + }, + }, }, "AutoScalingPolicy": { "type": "object", @@ -190,14 +183,14 @@ "MinCapacity": { "type": "integer", "description": "The minimum value for the instances to scale in" - " to in response to scaling activities." + " to in response to scaling activities.", }, "MaxCapacity": { "type": "integer", "description": "The maximum value for the instances to scale out to in response" - " to scaling activities" - } - } + " to scaling activities", + }, + }, }, "Rules": { "type": "array", @@ -207,11 +200,11 @@ "properties": { "Name": { "type": "string", - "description": "Name of the Auto Scaling rule." + "description": "Name of the Auto Scaling rule.", }, "Description": { "type": "string", - "description": "Description of the Auto Scaling rule." + "description": "Description of the Auto Scaling rule.", }, "Action": { "type": "object", @@ -220,35 +213,38 @@ "Market": { # Required for Instance Fleets "type": "string", "description": "Market type of the Amazon EC2 instances used to create a " - "cluster node by Auto Scaling action.", - "enum": ["ON_DEMAND", "SPOT"] + "cluster node by Auto Scaling action.", + "enum": ["ON_DEMAND", "SPOT"], }, "SimpleScalingPolicyConfiguration": { "type": "object", "description": "The Simple scaling configuration that will be associated" - "to Auto Scaling action.", + "to Auto Scaling action.", "properties": { "AdjustmentType": { "type": "string", "description": "Specifies how the ScalingAdjustment parameter is " - "interpreted.", - "enum": ["CHANGE_IN_CAPACITY", "PERCENT_CHANGE_IN_CAPACITY", - "EXACT_CAPACITY"] + "interpreted.", + "enum": [ + "CHANGE_IN_CAPACITY", + "PERCENT_CHANGE_IN_CAPACITY", + "EXACT_CAPACITY", + ], }, "ScalingAdjustment": { "type": "integer", "description": "The amount by which to scale, based on the " - "specified adjustment type." + "specified adjustment type.", }, "CoolDown": { "type": "integer", "description": "The amount of time, in seconds, after a scaling " - "activity completes and before the next scaling " - "activity can start." - } - } - } - } + "activity completes and before the next scaling " + "activity can start.", + }, + }, + }, + }, }, "Trigger": { "type": "object", @@ -257,44 +253,44 @@ "CloudWatchAlarmDefinition": { "type": "object", "description": "The Alarm to be registered with CloudWatch, to trigger" - " scaling activities.", + " scaling activities.", "properties": { "ComparisonOperator": { "type": "string", "description": "The arithmetic operation to use when comparing the" - " specified Statistic and Threshold." + " specified Statistic and Threshold.", }, "EvaluationPeriods": { "type": "integer", "description": "The number of periods over which data is compared" - " to the specified threshold." + " to the specified threshold.", }, "MetricName": { "type": "string", - "description": "The name for the alarm's associated metric." + "description": "The name for the alarm's associated metric.", }, "Namespace": { "type": "string", - "description": "The namespace for the alarm's associated metric." + "description": "The namespace for the alarm's associated metric.", }, "Period": { "type": "integer", "description": "The period in seconds over which the specified " - "statistic is applied." + "statistic is applied.", }, "Statistic": { "type": "string", "description": "The statistic to apply to the alarm's associated " - "metric." + "metric.", }, "Threshold": { "type": "double", "description": "The value against which the specified statistic is " - "compared." + "compared.", }, "Unit": { "type": "string", - "description": "The statistic's unit of measure." + "description": "The statistic's unit of measure.", }, "Dimensions": { "type": "array", @@ -304,27 +300,27 @@ "properties": { "Key": { "type": "string", - "description": "Dimension Key." + "description": "Dimension Key.", }, "Value": { "type": "string", - "description": "Dimension Value." - } - } - } - } - } + "description": "Dimension Value.", + }, + }, + }, + }, + }, } - } - } - } - } - } - } + }, + }, + }, + }, + }, + }, }, - "Configurations": OUTER_CONFIGURATIONS_SCHEMA - } - } + "Configurations": OUTER_CONFIGURATIONS_SCHEMA, + }, + }, } INSTANCE_FLEETS_SCHEMA = { @@ -334,21 +330,21 @@ "properties": { "Name": { "type": "string", - "description": "Friendly name given to the instance fleet." + "description": "Friendly name given to the instance fleet.", }, "InstanceFleetType": { "type": "string", "description": "The type of the instance fleet in the cluster.", "enum": ["MASTER", "CORE", "TASK"], - "required": True + "required": True, }, "TargetOnDemandCapacity": { "type": "integer", - "description": "Target on-demand capacity for the instance fleet." + "description": "Target on-demand capacity for the instance fleet.", }, "TargetSpotCapacity": { "type": "integer", - "description": "Target spot capacity for the instance fleet." + "description": "Target spot capacity for the instance fleet.", }, "InstanceTypeConfigs": { "type": "array", @@ -358,30 +354,30 @@ "InstanceType": { "type": "string", "description": "The Amazon EC2 instance type for the instance fleet.", - "required": True + "required": True, }, "WeightedCapacity": { "type": "integer", - "description": "The weight assigned to an instance type, which will impact the overall fulfillment of the capacity." + "description": "The weight assigned to an instance type, which will impact the overall fulfillment of the capacity.", }, "BidPrice": { "type": "string", "description": "Bid price for each Amazon EC2 instance in the " - "instance fleet when launching nodes as Spot Instances, " - "expressed in USD." + "instance fleet when launching nodes as Spot Instances, " + "expressed in USD.", }, "BidPriceAsPercentageOfOnDemandPrice": { "type": "double", - "description": "Bid price as percentage of on-demand price." + "description": "Bid price as percentage of on-demand price.", }, "CustomAmiId": { "type": "string", - "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances." + "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances.", }, "Priority": { "type": "double", "description": "The priority at which Amazon EMR launches the EC2 instances with this instance type. " - "Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first." + "Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.", }, "EbsConfiguration": { "type": "object", @@ -396,83 +392,83 @@ "items": { "type": "object", "properties": { - "VolumeSpecification" : { + "VolumeSpecification": { "type": "object", "description": "The EBS volume specification that is created " - "and attached to each instance in the instance group.", + "and attached to each instance in the instance group.", "properties": { "VolumeType": { "type": "string", "description": "The EBS volume type that is attached to all " - "the instances in the instance group. Valid types are: " - "gp2, io1, and standard.", - "required": True + "the instances in the instance group. Valid types are: " + "gp2, io1, and standard.", + "required": True, }, "SizeInGB": { "type": "integer", "description": "The EBS volume size, in GB, that is attached " - "to all the instances in the instance group.", - "required": True + "to all the instances in the instance group.", + "required": True, }, "Iops": { "type": "integer", "description": "The IOPS of the EBS volume that is attached to " - "all the instances in the instance group.", + "all the instances in the instance group.", }, "Throughput": { - "type": "integer", - "description": "The throughput of the EBS volume that is attached to " - "all the instances in the instance group.", - } - } + "type": "integer", + "description": "The throughput of the EBS volume that is attached to " + "all the instances in the instance group.", + }, + }, }, "VolumesPerInstance": { "type": "integer", "description": "The number of EBS volumes that will be created and " - "attached to each instance in the instance group.", - } - } - } - } - } + "attached to each instance in the instance group.", + }, + }, + }, + }, + }, }, - "Configurations": OUTER_CONFIGURATIONS_SCHEMA - } - } + "Configurations": OUTER_CONFIGURATIONS_SCHEMA, + }, + }, }, "LaunchSpecifications": { "type": "object", - "properties" : { + "properties": { "OnDemandSpecification": { "type": "object", "properties": { "AllocationStrategy": ONDEMAND_ALLOCATION_STRATEGY_SCHEMA, - "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA - } + "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA, + }, }, "SpotSpecification": { "type": "object", "properties": { "TimeoutDurationMinutes": { "type": "integer", - "description": "The time, in minutes, after which the action specified in TimeoutAction field will be performed if requested resources are unavailable." + "description": "The time, in minutes, after which the action specified in TimeoutAction field will be performed if requested resources are unavailable.", }, "TimeoutAction": { "type": "string", "description": "The action that is performed after TimeoutDurationMinutes.", "enum": [ "TERMINATE_CLUSTER", - "SWITCH_TO_ONDEMAND" - ] + "SWITCH_TO_ONDEMAND", + ], }, "BlockDurationMinutes": { "type": "integer", - "description": "Block duration in minutes." + "description": "Block duration in minutes.", }, - "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA - } - } - } + "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA, + }, + }, + }, }, "ResizeSpecifications": { "type": "object", @@ -481,31 +477,28 @@ "type": "object", "properties": { "TimeoutDurationMinutes": { - "type" : "integer", - "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable." + "type": "integer", + "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable.", }, - "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA - } + "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA, + }, }, "OnDemandResizeSpecification": { "type": "object", "properties": { "TimeoutDurationMinutes": { - "type" : "integer", - "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable." + "type": "integer", + "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable.", }, "AllocationStrategy": ONDEMAND_ALLOCATION_STRATEGY_SCHEMA, - "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA - } - } - } + "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA, + }, + }, + }, }, - "Context": { - "type": "string", - "description": "Reserved." - } - } - } + "Context": {"type": "string", "description": "Reserved."}, + }, + }, } EC2_ATTRIBUTES_SCHEMA = { @@ -513,75 +506,64 @@ "properties": { "KeyName": { "type": "string", - "description": - "The name of the Amazon EC2 key pair that can " - "be used to ssh to the master node as the user 'hadoop'." + "description": "The name of the Amazon EC2 key pair that can " + "be used to ssh to the master node as the user 'hadoop'.", }, "SubnetId": { "type": "string", - "description": - "To launch the cluster in Amazon " - "Virtual Private Cloud (Amazon VPC), set this parameter to " - "the identifier of the Amazon VPC subnet where you want " - "the cluster to launch. If you do not specify this value, " - "the cluster is launched in the normal Amazon Web Services " - "cloud, outside of an Amazon VPC. " + "description": "To launch the cluster in Amazon " + "Virtual Private Cloud (Amazon VPC), set this parameter to " + "the identifier of the Amazon VPC subnet where you want " + "the cluster to launch. If you do not specify this value, " + "the cluster is launched in the normal Amazon Web Services " + "cloud, outside of an Amazon VPC. ", }, "SubnetIds": { "type": "array", - "description": - "List of SubnetIds.", - "items": { - "type": "string" - } + "description": "List of SubnetIds.", + "items": {"type": "string"}, }, "AvailabilityZone": { "type": "string", - "description": "The Availability Zone the cluster will run in." + "description": "The Availability Zone the cluster will run in.", }, "AvailabilityZones": { "type": "array", "description": "List of AvailabilityZones.", - "items": { - "type": "string" - } + "items": {"type": "string"}, }, "InstanceProfile": { "type": "string", - "description": - "An IAM role for the cluster. The EC2 instances of the cluster" - " assume this role. The default role is " + - EC2_ROLE_NAME + ". In order to use the default" - " role, you must have already created it using the " - "create-default-roles command. " + "description": "An IAM role for the cluster. The EC2 instances of the cluster" + " assume this role. The default role is " + + EC2_ROLE_NAME + + ". In order to use the default" + " role, you must have already created it using the " + "create-default-roles command. ", }, "EmrManagedMasterSecurityGroup": { "type": "string", - "description": helptext.EMR_MANAGED_MASTER_SECURITY_GROUP + "description": helptext.EMR_MANAGED_MASTER_SECURITY_GROUP, }, "EmrManagedSlaveSecurityGroup": { "type": "string", - "description": helptext.EMR_MANAGED_SLAVE_SECURITY_GROUP + "description": helptext.EMR_MANAGED_SLAVE_SECURITY_GROUP, }, "ServiceAccessSecurityGroup": { "type": "string", - "description": helptext.SERVICE_ACCESS_SECURITY_GROUP + "description": helptext.SERVICE_ACCESS_SECURITY_GROUP, }, "AdditionalMasterSecurityGroups": { "type": "array", "description": helptext.ADDITIONAL_MASTER_SECURITY_GROUPS, - "items": { - "type": "string" - } + "items": {"type": "string"}, }, "AdditionalSlaveSecurityGroups": { "type": "array", "description": helptext.ADDITIONAL_SLAVE_SECURITY_GROUPS, - "items": { - "type": "string" - } - } - } + "items": {"type": "string"}, + }, + }, } @@ -593,20 +575,26 @@ "Name": { "type": "string", "description": "Application name.", - "enum": ["MapR", "HUE", "HIVE", "PIG", "HBASE", - "IMPALA", "GANGLIA", "HADOOP", "SPARK"], - "required": True + "enum": [ + "MapR", + "HUE", + "HIVE", + "PIG", + "HBASE", + "IMPALA", + "GANGLIA", + "HADOOP", + "SPARK", + ], + "required": True, }, "Args": { "type": "array", - "description": - "A list of arguments to pass to the application.", - "items": { - "type": "string" - } - } - } - } + "description": "A list of arguments to pass to the application.", + "items": {"type": "string"}, + }, + }, + }, } BOOTSTRAP_ACTIONS_SCHEMA = { @@ -614,29 +602,22 @@ "items": { "type": "object", "properties": { - "Name": { - "type": "string", - "default": "Bootstrap Action" - }, + "Name": {"type": "string", "default": "Bootstrap Action"}, "Path": { "type": "string", - "description": - "Location of the script to run during a bootstrap action. " - "Can be either a location in Amazon S3 or " - "on a local file system.", - "required": True + "description": "Location of the script to run during a bootstrap action. " + "Can be either a location in Amazon S3 or " + "on a local file system.", + "required": True, }, "Args": { "type": "array", - "description": - "A list of command line arguments to pass to " - "the bootstrap action script", - "items": { - "type": "string" - } - } - } - } + "description": "A list of command line arguments to pass to " + "the bootstrap action script", + "items": {"type": "string"}, + }, + }, + }, } @@ -647,8 +628,7 @@ "properties": { "Type": { "type": "string", - "description": - "The type of a step to be added to the cluster.", + "description": "The type of a step to be added to the cluster.", "default": "custom_jar", "enum": ["CUSTOM_JAR", "STREAMING", "HIVE", "PIG", "IMPALA"], }, @@ -660,7 +640,7 @@ "type": "string", "description": "The action to take if the cluster step fails.", "enum": ["TERMINATE_CLUSTER", "CANCEL_AND_WAIT", "CONTINUE"], - "default": "CONTINUE" + "default": "CONTINUE", }, "Jar": { "type": "string", @@ -668,42 +648,34 @@ }, "Args": { "type": "array", - "description": - "A list of command line arguments to pass to the step.", - "items": { - "type": "string" - } + "description": "A list of command line arguments to pass to the step.", + "items": {"type": "string"}, }, "MainClass": { "type": "string", - "description": - "The name of the main class in the specified " - "Java file. If not specified, the JAR file should " - "specify a Main-Class in its manifest file." + "description": "The name of the main class in the specified " + "Java file. If not specified, the JAR file should " + "specify a Main-Class in its manifest file.", }, "Properties": { "type": "string", - "description": - "A list of Java properties that are set when the step " - "runs. You can use these properties to pass key value " - "pairs to your main function." - } - } - } + "description": "A list of Java properties that are set when the step " + "runs. You can use these properties to pass key value " + "pairs to your main function.", + }, + }, + }, } HBASE_RESTORE_FROM_BACKUP_SCHEMA = { "type": "object", "properties": { - "Dir": { - "type": "string", - "description": helptext.HBASE_BACKUP_DIR - }, + "Dir": {"type": "string", "description": helptext.HBASE_BACKUP_DIR}, "BackupVersion": { "type": "string", - "description": helptext.HBASE_BACKUP_VERSION - } - } + "description": helptext.HBASE_BACKUP_VERSION, + }, + }, } EMR_FS_SCHEMA = { @@ -711,41 +683,38 @@ "properties": { "Consistent": { "type": "boolean", - "description": "Enable EMRFS consistent view." + "description": "Enable EMRFS consistent view.", }, "SSE": { "type": "boolean", "description": "Enable Amazon S3 server-side encryption on files " - "written to S3 by EMRFS." + "written to S3 by EMRFS.", }, "RetryCount": { "type": "integer", - "description": - "The maximum number of times to retry upon S3 inconsistency." + "description": "The maximum number of times to retry upon S3 inconsistency.", }, "RetryPeriod": { "type": "integer", "description": "The amount of time (in seconds) until the first " - "retry. Subsequent retries use an exponential " - "back-off." + "retry. Subsequent retries use an exponential " + "back-off.", }, "Args": { "type": "array", "description": "A list of arguments to pass for additional " - "EMRFS configuration.", - "items": { - "type": "string" - } + "EMRFS configuration.", + "items": {"type": "string"}, }, "Encryption": { "type": "string", "description": "EMRFS encryption type.", - "enum": ["SERVERSIDE", "CLIENTSIDE"] + "enum": ["SERVERSIDE", "CLIENTSIDE"], }, "ProviderType": { "type": "string", "description": "EMRFS client-side encryption provider type.", - "enum": ["KMS", "CUSTOM"] + "enum": ["KMS", "CUSTOM"], }, "KMSKeyId": { "type": "string", @@ -753,46 +722,41 @@ }, "CustomProviderLocation": { "type": "string", - "description": "Custom encryption provider JAR location." + "description": "Custom encryption provider JAR location.", }, "CustomProviderClass": { "type": "string", - "description": "Custom encryption provider full class name." - } - } + "description": "Custom encryption provider full class name.", + }, + }, } -TAGS_SCHEMA = { - "type": "array", - "items": { - "type": "string" - } -} +TAGS_SCHEMA = {"type": "array", "items": {"type": "string"}} KERBEROS_ATTRIBUTES_SCHEMA = { "type": "object", "properties": { "Realm": { "type": "string", - "description": "The name of Kerberos realm." + "description": "The name of Kerberos realm.", }, "KdcAdminPassword": { "type": "string", - "description": "The password of Kerberos administrator." + "description": "The password of Kerberos administrator.", }, "CrossRealmTrustPrincipalPassword": { "type": "string", - "description": "The password to establish cross-realm trusts." + "description": "The password to establish cross-realm trusts.", }, "ADDomainJoinUser": { "type": "string", - "description": "The name of the user with privileges to join instances to Active Directory." + "description": "The name of the user with privileges to join instances to Active Directory.", }, "ADDomainJoinPassword": { "type": "string", - "description": "The password of the user with privileges to join instances to Active Directory." - } - } + "description": "The password of the user with privileges to join instances to Active Directory.", + }, + }, } MANAGED_SCALING_POLICY_SCHEMA = { @@ -800,73 +764,66 @@ "properties": { "ComputeLimits": { "type": "object", - "description": - "The EC2 unit limits for a managed scaling policy. " - "The managed scaling activity of a cluster is not allowed to go above " - "or below these limits. The limits apply to CORE and TASK groups " - "and exclude the capacity of the MASTER group.", + "description": "The EC2 unit limits for a managed scaling policy. " + "The managed scaling activity of a cluster is not allowed to go above " + "or below these limits. The limits apply to CORE and TASK groups " + "and exclude the capacity of the MASTER group.", "properties": { - "MinimumCapacityUnits": { - "type": "integer", - "description": - "The lower boundary of EC2 units. It is measured through " - "VCPU cores or instances for instance groups and measured " - "through units for instance fleets. Managed scaling " - "activities are not allowed beyond this boundary.", - "required": True - }, - "MaximumCapacityUnits": { - "type": "integer", - "description": - "The upper boundary of EC2 units. It is measured through " - "VCPU cores or instances for instance groups and measured " - "through units for instance fleets. Managed scaling " - "activities are not allowed beyond this boundary.", - "required": True - }, - "MaximumOnDemandCapacityUnits": { - "type": "integer", - "description": - "The upper boundary of on-demand EC2 units. It is measured through " - "VCPU cores or instances for instance groups and measured " - "through units for instance fleets. The on-demand units are not " - "allowed to scale beyond this boundary. " - "This value must be lower than MaximumCapacityUnits." - }, - "UnitType": { - "type": "string", - "description": "The unit type used for specifying a managed scaling policy.", - "enum": ["VCPU", "Instances", "InstanceFleetUnits"], - "required": True - }, - "MaximumCoreCapacityUnits": { - "type": "integer", - "description": - "The upper boundary of EC2 units for core node type in a cluster. " - "It is measured through VCPU cores or instances for instance groups " - "and measured through units for instance fleets. " - "The core units are not allowed to scale beyond this boundary. " - "The parameter is used to split capacity allocation between core and task nodes." - } - } + "MinimumCapacityUnits": { + "type": "integer", + "description": "The lower boundary of EC2 units. It is measured through " + "VCPU cores or instances for instance groups and measured " + "through units for instance fleets. Managed scaling " + "activities are not allowed beyond this boundary.", + "required": True, + }, + "MaximumCapacityUnits": { + "type": "integer", + "description": "The upper boundary of EC2 units. It is measured through " + "VCPU cores or instances for instance groups and measured " + "through units for instance fleets. Managed scaling " + "activities are not allowed beyond this boundary.", + "required": True, + }, + "MaximumOnDemandCapacityUnits": { + "type": "integer", + "description": "The upper boundary of on-demand EC2 units. It is measured through " + "VCPU cores or instances for instance groups and measured " + "through units for instance fleets. The on-demand units are not " + "allowed to scale beyond this boundary. " + "This value must be lower than MaximumCapacityUnits.", + }, + "UnitType": { + "type": "string", + "description": "The unit type used for specifying a managed scaling policy.", + "enum": ["VCPU", "Instances", "InstanceFleetUnits"], + "required": True, + }, + "MaximumCoreCapacityUnits": { + "type": "integer", + "description": "The upper boundary of EC2 units for core node type in a cluster. " + "It is measured through VCPU cores or instances for instance groups " + "and measured through units for instance fleets. " + "The core units are not allowed to scale beyond this boundary. " + "The parameter is used to split capacity allocation between core and task nodes.", + }, + }, }, "ScalingStrategy": { "type": "string", "enum": ["DEFAULT", "ADVANCED"], - "description": - "Determines whether a custom scaling utilization performance index can be set. " - "Possible values include ADVANCED or DEFAULT." + "description": "Determines whether a custom scaling utilization performance index can be set. " + "Possible values include ADVANCED or DEFAULT.", }, "UtilizationPerformanceIndex": { "type": "integer", - "description": - "An integer value that represents an advanced scaling strategy. " - "Setting a higher value optimizes for performance. " - "Setting a lower value optimizes for resource conservation. " - "Setting the value to 50 balances performance and resource conservation. " - "Possible values are 1, 25, 50, 75, and 100." - } - } + "description": "An integer value that represents an advanced scaling strategy. " + "Setting a higher value optimizes for performance. " + "Setting a lower value optimizes for resource conservation. " + "Setting the value to 50 balances performance and resource conservation. " + "Possible values are 1, 25, 50, 75, and 100.", + }, + }, } PLACEMENT_GROUP_CONFIGS_SCHEMA = { @@ -878,26 +835,25 @@ "type": "string", "description": "Role of the instance in the cluster.", "enum": ["MASTER", "CORE", "TASK"], - "required": True + "required": True, }, "PlacementStrategy": { "type": "string", "description": "EC2 Placement Group strategy associated " - "with instance role.", - "enum": ["SPREAD", "PARTITION", "CLUSTER", "NONE"] - } - } - } + "with instance role.", + "enum": ["SPREAD", "PARTITION", "CLUSTER", "NONE"], + }, + }, + }, } AUTO_TERMINATION_POLICY_SCHEMA = { "type": "object", - "properties": { + "properties": { "IdleTimeout": { "type": "long", - "description": - "Specifies the amount of idle time in seconds after which the cluster automatically terminates. " - "You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).", + "description": "Specifies the amount of idle time in seconds after which the cluster automatically terminates. " + "You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).", } - } + }, } diff --git a/awscli/customizations/emr/command.py b/awscli/customizations/emr/command.py index b208a3c13170..d55819a6bc6b 100644 --- a/awscli/customizations/emr/command.py +++ b/awscli/customizations/emr/command.py @@ -12,11 +12,9 @@ # language governing permissions and limitations under the License. import logging + from awscli.customizations.commands import BasicCommand -from awscli.customizations.emr import config -from awscli.customizations.emr import configutils -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import config, configutils, emrutils, exceptions LOG = logging.getLogger(__name__) @@ -24,36 +22,42 @@ class Command(BasicCommand): region = None - UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS = set([ - 'install-applications', - 'restore-from-hbase-backup', - 'schedule-hbase-backup', - 'create-hbase-backup', - 'disable-hbase-backups', - ]) + UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS = set( + [ + 'install-applications', + 'restore-from-hbase-backup', + 'schedule-hbase-backup', + 'create-hbase-backup', + 'disable-hbase-backups', + ] + ) def supports_arg(self, name): return any((x['name'] == name for x in self.ARG_TABLE)) def _run_main(self, parsed_args, parsed_globals): - - self._apply_configs(parsed_args, - configutils.get_configs(self._session)) + self._apply_configs( + parsed_args, configutils.get_configs(self._session) + ) self.region = emrutils.get_region(self._session, parsed_globals) self._validate_unsupported_commands_for_release_based_clusters( - parsed_args, parsed_globals) + parsed_args, parsed_globals + ) return self._run_main_command(parsed_args, parsed_globals) def _apply_configs(self, parsed_args, parsed_configs): - applicable_configurations = \ - self._get_applicable_configurations(parsed_args, parsed_configs) + applicable_configurations = self._get_applicable_configurations( + parsed_args, parsed_configs + ) configs_added = {} for configuration in applicable_configurations: - configuration.add(self, parsed_args, - parsed_configs[configuration.name]) - configs_added[configuration.name] = \ - parsed_configs[configuration.name] + configuration.add( + self, parsed_args, parsed_configs[configuration.name] + ) + configs_added[configuration.name] = parsed_configs[ + configuration.name + ] if configs_added: LOG.debug("Updated arguments with configs: %s" % configs_added) @@ -68,20 +72,23 @@ def _get_applicable_configurations(self, parsed_args, parsed_configs): # 3. Configurations that are present in parsed_configs # 2. Configurations that are not present in parsed_args - configurations = \ - config.get_applicable_configurations(self) + configurations = config.get_applicable_configurations(self) - configurations = [x for x in configurations - if x.name in parsed_configs and - not x.is_present(parsed_args)] + configurations = [ + x + for x in configurations + if x.name in parsed_configs and not x.is_present(parsed_args) + ] configurations = self._filter_configurations_in_special_cases( - configurations, parsed_args, parsed_configs) + configurations, parsed_args, parsed_configs + ) return configurations - def _filter_configurations_in_special_cases(self, configurations, - parsed_args, parsed_configs): + def _filter_configurations_in_special_cases( + self, configurations, parsed_args, parsed_configs + ): # Subclasses can override this method to filter the applicable # configurations further based upon some custom logic # Default behavior is to return the configurations list as is @@ -99,18 +106,25 @@ def _run_main_command(self, parsed_args, parsed_globals): raise NotImplementedError("_run_main_command") def _validate_unsupported_commands_for_release_based_clusters( - self, parsed_args, parsed_globals): + self, parsed_args, parsed_globals + ): command = self.NAME - if (command in self.UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS and - hasattr(parsed_args, 'cluster_id')): + if ( + command in self.UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS + and hasattr(parsed_args, 'cluster_id') + ): release_label = emrutils.get_release_label( - parsed_args.cluster_id, self._session, self.region, - parsed_globals.endpoint_url, parsed_globals.verify_ssl) + parsed_args.cluster_id, + self._session, + self.region, + parsed_globals.endpoint_url, + parsed_globals.verify_ssl, + ) if release_label: raise exceptions.UnsupportedCommandWithReleaseError( - command=command, - release_label=release_label) + command=command, release_label=release_label + ) def override_args_required_option(argument_table, args, session, **kwargs): @@ -119,8 +133,7 @@ def override_args_required_option(argument_table, args, session, **kwargs): # file # We don't want to override when user is viewing the help so that we # can show the required options correctly in the help - need_to_override = False if len(args) == 1 and args[0] == 'help' \ - else True + need_to_override = False if len(args) == 1 and args[0] == 'help' else True if need_to_override: parsed_configs = configutils.get_configs(session) diff --git a/awscli/customizations/emr/config.py b/awscli/customizations/emr/config.py index f0b615fa8ab9..15ebc81f8a0c 100644 --- a/awscli/customizations/emr/config.py +++ b/awscli/customizations/emr/config.py @@ -12,20 +12,26 @@ # language governing permissions and limitations under the License. import logging -from awscli.customizations.emr import configutils -from awscli.customizations.emr import exceptions + +from awscli.customizations.emr import configutils, exceptions LOG = logging.getLogger(__name__) SUPPORTED_CONFIG_LIST = [ {'name': 'service_role'}, {'name': 'log_uri'}, - {'name': 'instance_profile', 'arg_name': 'ec2_attributes', - 'arg_value_key': 'InstanceProfile'}, - {'name': 'key_name', 'arg_name': 'ec2_attributes', - 'arg_value_key': 'KeyName'}, + { + 'name': 'instance_profile', + 'arg_name': 'ec2_attributes', + 'arg_value_key': 'InstanceProfile', + }, + { + 'name': 'key_name', + 'arg_name': 'ec2_attributes', + 'arg_value_key': 'KeyName', + }, {'name': 'enable_debugging', 'type': 'boolean'}, - {'name': 'key_pair_file'} + {'name': 'key_pair_file'}, ] TYPES = ['string', 'boolean'] @@ -39,27 +45,30 @@ def get_applicable_configurations(command): def _create_supported_configuration(config): config_type = config['type'] if 'type' in config else 'string' - if (config_type == 'string'): - config_arg_name = config['arg_name'] \ - if 'arg_name' in config else config['name'] - config_arg_value_key = config['arg_value_key'] \ - if 'arg_value_key' in config else None - configuration = StringConfiguration(config['name'], - config_arg_name, - config_arg_value_key) - elif (config_type == 'boolean'): + if config_type == 'string': + config_arg_name = ( + config['arg_name'] if 'arg_name' in config else config['name'] + ) + config_arg_value_key = ( + config['arg_value_key'] if 'arg_value_key' in config else None + ) + configuration = StringConfiguration( + config['name'], config_arg_name, config_arg_value_key + ) + elif config_type == 'boolean': configuration = BooleanConfiguration(config['name']) return configuration def _create_supported_configurations(): - return [_create_supported_configuration(config) - for config in SUPPORTED_CONFIG_LIST] + return [ + _create_supported_configuration(config) + for config in SUPPORTED_CONFIG_LIST + ] class Configuration(object): - def __init__(self, name, arg_name): self.name = name self.arg_name = arg_name @@ -78,7 +87,6 @@ def _check_arg(self, parsed_args, arg_name): class StringConfiguration(Configuration): - def __init__(self, name, arg_name, arg_value_key=None): super(StringConfiguration, self).__init__(name, arg_name) self.arg_value_key = arg_value_key @@ -87,40 +95,42 @@ def is_applicable(self, command): return command.supports_arg(self.arg_name.replace('_', '-')) def is_present(self, parsed_args): - if (not self.arg_value_key): + if not self.arg_value_key: return self._check_arg(parsed_args, self.arg_name) else: - return self._check_arg(parsed_args, self.arg_name) \ - and self.arg_value_key in getattr(parsed_args, self.arg_name) + return self._check_arg( + parsed_args, self.arg_name + ) and self.arg_value_key in getattr(parsed_args, self.arg_name) def add(self, command, parsed_args, value): - if (not self.arg_value_key): + if not self.arg_value_key: setattr(parsed_args, self.arg_name, value) else: - if (not self._check_arg(parsed_args, self.arg_name)): + if not self._check_arg(parsed_args, self.arg_name): setattr(parsed_args, self.arg_name, {}) getattr(parsed_args, self.arg_name)[self.arg_value_key] = value class BooleanConfiguration(Configuration): - def __init__(self, name): super(BooleanConfiguration, self).__init__(name, name) self.no_version_arg_name = "no_" + name def is_applicable(self, command): - return command.supports_arg(self.arg_name.replace('_', '-')) and \ - command.supports_arg(self.no_version_arg_name.replace('_', '-')) + return command.supports_arg( + self.arg_name.replace('_', '-') + ) and command.supports_arg(self.no_version_arg_name.replace('_', '-')) def is_present(self, parsed_args): - return self._check_arg(parsed_args, self.arg_name) \ - or self._check_arg(parsed_args, self.no_version_arg_name) + return self._check_arg(parsed_args, self.arg_name) or self._check_arg( + parsed_args, self.no_version_arg_name + ) def add(self, command, parsed_args, value): - if (value.lower() == 'true'): + if value.lower() == 'true': setattr(parsed_args, self.arg_name, True) setattr(parsed_args, self.no_version_arg_name, False) - elif (value.lower() == 'false'): + elif value.lower() == 'false': setattr(parsed_args, self.arg_name, False) setattr(parsed_args, self.no_version_arg_name, True) else: @@ -128,4 +138,6 @@ def add(self, command, parsed_args, value): config_value=value, config_key=self.arg_name, profile_var_name=configutils.get_current_profile_var_name( - command._session)) + command._session + ), + ) diff --git a/awscli/customizations/emr/configutils.py b/awscli/customizations/emr/configutils.py index b893de43d85b..cb4ee4f4d307 100644 --- a/awscli/customizations/emr/configutils.py +++ b/awscli/customizations/emr/configutils.py @@ -14,8 +14,7 @@ import os from awscli.customizations.configure.writer import ConfigFileWriter -from awscli.customizations.emr.constants import EC2_ROLE_NAME -from awscli.customizations.emr.constants import EMR_ROLE_NAME +from awscli.customizations.emr.constants import EC2_ROLE_NAME, EMR_ROLE_NAME LOG = logging.getLogger(__name__) @@ -35,21 +34,31 @@ def get_current_profile_var_name(session): def _get_profile_str(session, separator): profile_name = session.get_config_variable('profile') - return 'default' if profile_name is None \ + return ( + 'default' + if profile_name is None else 'profile%c%s' % (separator, profile_name) + ) def is_any_role_configured(session): parsed_configs = get_configs(session) - return True if ('instance_profile' in parsed_configs or - 'service_role' in parsed_configs) \ + return ( + True + if ( + 'instance_profile' in parsed_configs + or 'service_role' in parsed_configs + ) else False + ) def update_roles(session): if is_any_role_configured(session): - LOG.debug("At least one of the roles is already associated with " - "your current profile ") + LOG.debug( + "At least one of the roles is already associated with " + "your current profile " + ) else: config_writer = ConfigWriter(session) config_writer.update_config('service_role', EMR_ROLE_NAME) @@ -58,15 +67,14 @@ def update_roles(session): class ConfigWriter(object): - def __init__(self, session): self.session = session self.section = _get_profile_str(session, ' ') self.config_file_writer = ConfigFileWriter() def update_config(self, key, value): - config_filename = \ - os.path.expanduser(self.session.get_config_variable('config_file')) - updated_config = {'__section__': self.section, - 'emr': {key: value}} + config_filename = os.path.expanduser( + self.session.get_config_variable('config_file') + ) + updated_config = {'__section__': self.section, 'emr': {key: value}} self.config_file_writer.update_config(updated_config, config_filename) diff --git a/awscli/customizations/emr/constants.py b/awscli/customizations/emr/constants.py index 8d2bb51a6bbe..ff7a64b1ef78 100644 --- a/awscli/customizations/emr/constants.py +++ b/awscli/customizations/emr/constants.py @@ -16,7 +16,9 @@ EC2_ROLE_NAME = "EMR_EC2_DefaultRole" EMR_ROLE_NAME = "EMR_DefaultRole" EMR_AUTOSCALING_ROLE_NAME = "EMR_AutoScaling_DefaultRole" -ROLE_ARN_PATTERN = "arn:{{region_suffix}}:iam::aws:policy/service-role/{{policy_name}}" +ROLE_ARN_PATTERN = ( + "arn:{{region_suffix}}:iam::aws:policy/service-role/{{policy_name}}" +) EC2_ROLE_POLICY_NAME = "AmazonElasticMapReduceforEC2Role" EMR_ROLE_POLICY_NAME = "AmazonElasticMapReduceRole" EMR_AUTOSCALING_ROLE_POLICY_NAME = "AmazonElasticMapReduceforAutoScalingRole" @@ -57,12 +59,14 @@ EMRFS_RETRY_PERIOD_KEY = 'fs.s3.consistent.retryPeriodSeconds' EMRFS_CSE_KEY = 'fs.s3.cse.enabled' EMRFS_CSE_KMS_KEY_ID_KEY = 'fs.s3.cse.kms.keyId' -EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY = \ +EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY = ( 'fs.s3.cse.encryptionMaterialsProvider' +) EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY = 'fs.s3.cse.encryptionMaterialsProvider.uri' -EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME = ('com.amazon.ws.emr.hadoop.fs.cse.' - 'KMSEncryptionMaterialsProvider') +EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME = ( + 'com.amazon.ws.emr.hadoop.fs.cse.' 'KMSEncryptionMaterialsProvider' +) EMRFS_CSE_CUSTOM_S3_GET_BA_PATH = 'file:/usr/share/aws/emr/scripts/s3get' EMRFS_CUSTOM_DEST_PATH = '/usr/share/aws/emr/auxlib' @@ -181,16 +185,31 @@ APPLICATION_AUTOSCALING = 'application-autoscaling' LATEST = 'latest' -APPLICATIONS = ["HIVE", "PIG", "HBASE", "GANGLIA", "IMPALA", "SPARK", "MAPR", - "MAPR_M3", "MAPR_M5", "MAPR_M7"] +APPLICATIONS = [ + "HIVE", + "PIG", + "HBASE", + "GANGLIA", + "IMPALA", + "SPARK", + "MAPR", + "MAPR_M3", + "MAPR_M5", + "MAPR_M7", +] SSH_USER = 'hadoop' STARTING_STATES = ['STARTING', 'BOOTSTRAPPING'] TERMINATED_STATES = ['TERMINATED', 'TERMINATING', 'TERMINATED_WITH_ERRORS'] # list-clusters -LIST_CLUSTERS_ACTIVE_STATES = ['STARTING', 'BOOTSTRAPPING', 'RUNNING', - 'WAITING', 'TERMINATING'] +LIST_CLUSTERS_ACTIVE_STATES = [ + 'STARTING', + 'BOOTSTRAPPING', + 'RUNNING', + 'WAITING', + 'TERMINATING', +] LIST_CLUSTERS_TERMINATED_STATES = ['TERMINATED'] LIST_CLUSTERS_FAILED_STATES = ['TERMINATED_WITH_ERRORS'] diff --git a/awscli/customizations/emr/createcluster.py b/awscli/customizations/emr/createcluster.py index 17b780ee2ce0..22d333bbe733 100644 --- a/awscli/customizations/emr/createcluster.py +++ b/awscli/customizations/emr/createcluster.py @@ -13,22 +13,23 @@ import re -from awscli.customizations.exceptions import ParamValidationError from awscli.customizations.commands import BasicCommand -from awscli.customizations.emr import applicationutils -from awscli.customizations.emr import argumentschema -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrfsutils -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions -from awscli.customizations.emr import hbaseutils -from awscli.customizations.emr import helptext -from awscli.customizations.emr import instancegroupsutils -from awscli.customizations.emr import instancefleetsutils -from awscli.customizations.emr import steputils +from awscli.customizations.emr import ( + applicationutils, + argumentschema, + constants, + emrfsutils, + emrutils, + exceptions, + hbaseutils, + helptext, + instancefleetsutils, + instancegroupsutils, + steputils, +) from awscli.customizations.emr.command import Command -from awscli.customizations.emr.constants import EC2_ROLE_NAME -from awscli.customizations.emr.constants import EMR_ROLE_NAME +from awscli.customizations.emr.constants import EC2_ROLE_NAME, EMR_ROLE_NAME +from awscli.customizations.exceptions import ParamValidationError from botocore.compat import json @@ -36,114 +37,181 @@ class CreateCluster(Command): NAME = 'create-cluster' DESCRIPTION = helptext.CREATE_CLUSTER_DESCRIPTION ARG_TABLE = [ - {'name': 'release-label', - 'help_text': helptext.RELEASE_LABEL}, - {'name': 'os-release-label', - 'help_text': helptext.OS_RELEASE_LABEL}, - {'name': 'ami-version', - 'help_text': helptext.AMI_VERSION}, - {'name': 'instance-groups', - 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA, - 'help_text': helptext.INSTANCE_GROUPS}, - {'name': 'instance-type', - 'help_text': helptext.INSTANCE_TYPE}, - {'name': 'instance-count', - 'help_text': helptext.INSTANCE_COUNT}, - {'name': 'auto-terminate', 'action': 'store_true', - 'group_name': 'auto_terminate', - 'help_text': helptext.AUTO_TERMINATE}, - {'name': 'no-auto-terminate', 'action': 'store_true', - 'group_name': 'auto_terminate'}, - {'name': 'instance-fleets', - 'schema': argumentschema.INSTANCE_FLEETS_SCHEMA, - 'help_text': helptext.INSTANCE_FLEETS}, - {'name': 'name', - 'default': 'Development Cluster', - 'help_text': helptext.CLUSTER_NAME}, - {'name': 'log-uri', - 'help_text': helptext.LOG_URI}, - {'name': 'log-encryption-kms-key-id', - 'help_text': helptext.LOG_ENCRYPTION_KMS_KEY_ID}, - {'name': 'service-role', - 'help_text': helptext.SERVICE_ROLE}, - {'name': 'auto-scaling-role', - 'help_text': helptext.AUTOSCALING_ROLE}, - {'name': 'use-default-roles', 'action': 'store_true', - 'help_text': helptext.USE_DEFAULT_ROLES}, - {'name': 'configurations', - 'help_text': helptext.CONFIGURATIONS}, - {'name': 'ec2-attributes', - 'help_text': helptext.EC2_ATTRIBUTES, - 'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA}, - {'name': 'termination-protected', 'action': 'store_true', - 'group_name': 'termination_protected', - 'help_text': helptext.TERMINATION_PROTECTED}, - {'name': 'no-termination-protected', 'action': 'store_true', - 'group_name': 'termination_protected'}, - {'name': 'unhealthy-node-replacement', 'action': 'store_true', - 'group_name': 'unhealthy_node_replacement', - 'help_text': helptext.UNHEALTHY_NODE_REPLACEMENT}, - {'name': 'no-unhealthy-node-replacement', 'action': 'store_true', - 'group_name': 'unhealthy_node_replacement'}, - {'name': 'scale-down-behavior', - 'help_text': helptext.SCALE_DOWN_BEHAVIOR}, - {'name': 'visible-to-all-users', 'action': 'store_true', - 'group_name': 'visibility', - 'help_text': helptext.VISIBILITY}, - {'name': 'no-visible-to-all-users', 'action': 'store_true', - 'group_name': 'visibility'}, - {'name': 'enable-debugging', 'action': 'store_true', - 'group_name': 'debug', - 'help_text': helptext.DEBUGGING}, - {'name': 'no-enable-debugging', 'action': 'store_true', - 'group_name': 'debug'}, - {'name': 'tags', 'nargs': '+', - 'help_text': helptext.TAGS, - 'schema': argumentschema.TAGS_SCHEMA}, - {'name': 'bootstrap-actions', - 'help_text': helptext.BOOTSTRAP_ACTIONS, - 'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA}, - {'name': 'applications', - 'help_text': helptext.APPLICATIONS, - 'schema': argumentschema.APPLICATIONS_SCHEMA}, - {'name': 'emrfs', - 'help_text': helptext.EMR_FS, - 'schema': argumentschema.EMR_FS_SCHEMA}, - {'name': 'steps', - 'schema': argumentschema.STEPS_SCHEMA, - 'help_text': helptext.STEPS}, - {'name': 'additional-info', - 'help_text': helptext.ADDITIONAL_INFO}, - {'name': 'restore-from-hbase-backup', - 'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA, - 'help_text': helptext.RESTORE_FROM_HBASE}, - {'name': 'security-configuration', - 'help_text': helptext.SECURITY_CONFIG}, - {'name': 'custom-ami-id', - 'help_text' : helptext.CUSTOM_AMI_ID}, - {'name': 'ebs-root-volume-size', - 'help_text' : helptext.EBS_ROOT_VOLUME_SIZE}, - {'name': 'ebs-root-volume-iops', - 'help_text' : helptext.EBS_ROOT_VOLUME_IOPS}, - {'name': 'ebs-root-volume-throughput', - 'help_text' : helptext.EBS_ROOT_VOLUME_THROUGHPUT}, - {'name': 'repo-upgrade-on-boot', - 'help_text' : helptext.REPO_UPGRADE_ON_BOOT}, - {'name': 'kerberos-attributes', - 'schema': argumentschema.KERBEROS_ATTRIBUTES_SCHEMA, - 'help_text': helptext.KERBEROS_ATTRIBUTES}, - {'name': 'step-concurrency-level', - 'cli_type_name': 'integer', - 'help_text': helptext.STEP_CONCURRENCY_LEVEL}, - {'name': 'managed-scaling-policy', - 'schema': argumentschema.MANAGED_SCALING_POLICY_SCHEMA, - 'help_text': helptext.MANAGED_SCALING_POLICY}, - {'name': 'placement-group-configs', - 'schema': argumentschema.PLACEMENT_GROUP_CONFIGS_SCHEMA, - 'help_text': helptext.PLACEMENT_GROUP_CONFIGS}, - {'name': 'auto-termination-policy', - 'schema': argumentschema.AUTO_TERMINATION_POLICY_SCHEMA, - 'help_text': helptext.AUTO_TERMINATION_POLICY} + {'name': 'release-label', 'help_text': helptext.RELEASE_LABEL}, + {'name': 'os-release-label', 'help_text': helptext.OS_RELEASE_LABEL}, + {'name': 'ami-version', 'help_text': helptext.AMI_VERSION}, + { + 'name': 'instance-groups', + 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA, + 'help_text': helptext.INSTANCE_GROUPS, + }, + {'name': 'instance-type', 'help_text': helptext.INSTANCE_TYPE}, + {'name': 'instance-count', 'help_text': helptext.INSTANCE_COUNT}, + { + 'name': 'auto-terminate', + 'action': 'store_true', + 'group_name': 'auto_terminate', + 'help_text': helptext.AUTO_TERMINATE, + }, + { + 'name': 'no-auto-terminate', + 'action': 'store_true', + 'group_name': 'auto_terminate', + }, + { + 'name': 'instance-fleets', + 'schema': argumentschema.INSTANCE_FLEETS_SCHEMA, + 'help_text': helptext.INSTANCE_FLEETS, + }, + { + 'name': 'name', + 'default': 'Development Cluster', + 'help_text': helptext.CLUSTER_NAME, + }, + {'name': 'log-uri', 'help_text': helptext.LOG_URI}, + { + 'name': 'log-encryption-kms-key-id', + 'help_text': helptext.LOG_ENCRYPTION_KMS_KEY_ID, + }, + {'name': 'service-role', 'help_text': helptext.SERVICE_ROLE}, + {'name': 'auto-scaling-role', 'help_text': helptext.AUTOSCALING_ROLE}, + { + 'name': 'use-default-roles', + 'action': 'store_true', + 'help_text': helptext.USE_DEFAULT_ROLES, + }, + {'name': 'configurations', 'help_text': helptext.CONFIGURATIONS}, + { + 'name': 'ec2-attributes', + 'help_text': helptext.EC2_ATTRIBUTES, + 'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA, + }, + { + 'name': 'termination-protected', + 'action': 'store_true', + 'group_name': 'termination_protected', + 'help_text': helptext.TERMINATION_PROTECTED, + }, + { + 'name': 'no-termination-protected', + 'action': 'store_true', + 'group_name': 'termination_protected', + }, + { + 'name': 'unhealthy-node-replacement', + 'action': 'store_true', + 'group_name': 'unhealthy_node_replacement', + 'help_text': helptext.UNHEALTHY_NODE_REPLACEMENT, + }, + { + 'name': 'no-unhealthy-node-replacement', + 'action': 'store_true', + 'group_name': 'unhealthy_node_replacement', + }, + { + 'name': 'scale-down-behavior', + 'help_text': helptext.SCALE_DOWN_BEHAVIOR, + }, + { + 'name': 'visible-to-all-users', + 'action': 'store_true', + 'group_name': 'visibility', + 'help_text': helptext.VISIBILITY, + }, + { + 'name': 'no-visible-to-all-users', + 'action': 'store_true', + 'group_name': 'visibility', + }, + { + 'name': 'enable-debugging', + 'action': 'store_true', + 'group_name': 'debug', + 'help_text': helptext.DEBUGGING, + }, + { + 'name': 'no-enable-debugging', + 'action': 'store_true', + 'group_name': 'debug', + }, + { + 'name': 'tags', + 'nargs': '+', + 'help_text': helptext.TAGS, + 'schema': argumentschema.TAGS_SCHEMA, + }, + { + 'name': 'bootstrap-actions', + 'help_text': helptext.BOOTSTRAP_ACTIONS, + 'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA, + }, + { + 'name': 'applications', + 'help_text': helptext.APPLICATIONS, + 'schema': argumentschema.APPLICATIONS_SCHEMA, + }, + { + 'name': 'emrfs', + 'help_text': helptext.EMR_FS, + 'schema': argumentschema.EMR_FS_SCHEMA, + }, + { + 'name': 'steps', + 'schema': argumentschema.STEPS_SCHEMA, + 'help_text': helptext.STEPS, + }, + {'name': 'additional-info', 'help_text': helptext.ADDITIONAL_INFO}, + { + 'name': 'restore-from-hbase-backup', + 'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA, + 'help_text': helptext.RESTORE_FROM_HBASE, + }, + { + 'name': 'security-configuration', + 'help_text': helptext.SECURITY_CONFIG, + }, + {'name': 'custom-ami-id', 'help_text': helptext.CUSTOM_AMI_ID}, + { + 'name': 'ebs-root-volume-size', + 'help_text': helptext.EBS_ROOT_VOLUME_SIZE, + }, + { + 'name': 'ebs-root-volume-iops', + 'help_text': helptext.EBS_ROOT_VOLUME_IOPS, + }, + { + 'name': 'ebs-root-volume-throughput', + 'help_text': helptext.EBS_ROOT_VOLUME_THROUGHPUT, + }, + { + 'name': 'repo-upgrade-on-boot', + 'help_text': helptext.REPO_UPGRADE_ON_BOOT, + }, + { + 'name': 'kerberos-attributes', + 'schema': argumentschema.KERBEROS_ATTRIBUTES_SCHEMA, + 'help_text': helptext.KERBEROS_ATTRIBUTES, + }, + { + 'name': 'step-concurrency-level', + 'cli_type_name': 'integer', + 'help_text': helptext.STEP_CONCURRENCY_LEVEL, + }, + { + 'name': 'managed-scaling-policy', + 'schema': argumentschema.MANAGED_SCALING_POLICY_SCHEMA, + 'help_text': helptext.MANAGED_SCALING_POLICY, + }, + { + 'name': 'placement-group-configs', + 'schema': argumentschema.PLACEMENT_GROUP_CONFIGS_SCHEMA, + 'help_text': helptext.PLACEMENT_GROUP_CONFIGS, + }, + { + 'name': 'auto-termination-policy', + 'schema': argumentschema.AUTO_TERMINATION_POLICY_SCHEMA, + 'help_text': helptext.AUTO_TERMINATION_POLICY, + }, ] SYNOPSIS = BasicCommand.FROM_FILE('emr', 'create-cluster-synopsis.txt') EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-cluster-examples.rst') @@ -156,71 +224,95 @@ def _run_main_command(self, parsed_args, parsed_globals): service_role_validation_message = ( " Either choose --use-default-roles or use both --service-role " - " and --ec2-attributes InstanceProfile=.") + " and --ec2-attributes InstanceProfile=." + ) - if parsed_args.use_default_roles is True and \ - parsed_args.service_role is not None: + if ( + parsed_args.use_default_roles is True + and parsed_args.service_role is not None + ): raise exceptions.MutualExclusiveOptionError( option1="--use-default-roles", option2="--service-role", - message=service_role_validation_message) + message=service_role_validation_message, + ) - if parsed_args.use_default_roles is True and \ - parsed_args.ec2_attributes is not None and \ - 'InstanceProfile' in parsed_args.ec2_attributes: + if ( + parsed_args.use_default_roles is True + and parsed_args.ec2_attributes is not None + and 'InstanceProfile' in parsed_args.ec2_attributes + ): raise exceptions.MutualExclusiveOptionError( option1="--use-default-roles", option2="--ec2-attributes InstanceProfile", - message=service_role_validation_message) + message=service_role_validation_message, + ) - if parsed_args.instance_groups is not None and \ - parsed_args.instance_fleets is not None: + if ( + parsed_args.instance_groups is not None + and parsed_args.instance_fleets is not None + ): raise exceptions.MutualExclusiveOptionError( - option1="--instance-groups", - option2="--instance-fleets") + option1="--instance-groups", option2="--instance-fleets" + ) instances_config = {} if parsed_args.instance_fleets is not None: - instances_config['InstanceFleets'] = \ + instances_config['InstanceFleets'] = ( instancefleetsutils.validate_and_build_instance_fleets( - parsed_args.instance_fleets) + parsed_args.instance_fleets + ) + ) else: - instances_config['InstanceGroups'] = \ + instances_config['InstanceGroups'] = ( instancegroupsutils.validate_and_build_instance_groups( instance_groups=parsed_args.instance_groups, instance_type=parsed_args.instance_type, - instance_count=parsed_args.instance_count) + instance_count=parsed_args.instance_count, + ) + ) if parsed_args.release_label is not None: params["ReleaseLabel"] = parsed_args.release_label if parsed_args.configurations is not None: try: params["Configurations"] = json.loads( - parsed_args.configurations) + parsed_args.configurations + ) except ValueError: raise ParamValidationError( 'aws: error: invalid json argument for ' 'option --configurations' ) - if (parsed_args.release_label is None and - parsed_args.ami_version is not None): - is_valid_ami_version = re.match(r'\d?\..*', parsed_args.ami_version) + if ( + parsed_args.release_label is None + and parsed_args.ami_version is not None + ): + is_valid_ami_version = re.match( + r'\d?\..*', parsed_args.ami_version + ) if is_valid_ami_version is None: raise exceptions.InvalidAmiVersionError( - ami_version=parsed_args.ami_version) + ami_version=parsed_args.ami_version + ) params['AmiVersion'] = parsed_args.ami_version emrutils.apply_dict( - params, 'AdditionalInfo', parsed_args.additional_info) + params, 'AdditionalInfo', parsed_args.additional_info + ) emrutils.apply_dict(params, 'LogUri', parsed_args.log_uri) if parsed_args.os_release_label is not None: - emrutils.apply_dict(params, 'OSReleaseLabel', - parsed_args.os_release_label) + emrutils.apply_dict( + params, 'OSReleaseLabel', parsed_args.os_release_label + ) if parsed_args.log_encryption_kms_key_id is not None: - emrutils.apply_dict(params, 'LogEncryptionKmsKeyId', - parsed_args.log_encryption_kms_key_id) + emrutils.apply_dict( + params, + 'LogEncryptionKmsKeyId', + parsed_args.log_encryption_kms_key_id, + ) if parsed_args.use_default_roles is True: parsed_args.service_role = EMR_ROLE_NAME @@ -236,61 +328,79 @@ def _run_main_command(self, parsed_args, parsed_globals): if parsed_args.auto_scaling_role is None: raise exceptions.MissingAutoScalingRoleError() - emrutils.apply_dict(params, 'AutoScalingRole', parsed_args.auto_scaling_role) + emrutils.apply_dict( + params, 'AutoScalingRole', parsed_args.auto_scaling_role + ) if parsed_args.scale_down_behavior is not None: - emrutils.apply_dict(params, 'ScaleDownBehavior', parsed_args.scale_down_behavior) + emrutils.apply_dict( + params, 'ScaleDownBehavior', parsed_args.scale_down_behavior + ) if ( - parsed_args.no_auto_terminate is False and - parsed_args.auto_terminate is False): + parsed_args.no_auto_terminate is False + and parsed_args.auto_terminate is False + ): parsed_args.no_auto_terminate = True - instances_config['KeepJobFlowAliveWhenNoSteps'] = \ + instances_config['KeepJobFlowAliveWhenNoSteps'] = ( emrutils.apply_boolean_options( parsed_args.no_auto_terminate, '--no-auto-terminate', parsed_args.auto_terminate, - '--auto-terminate') + '--auto-terminate', + ) + ) - instances_config['TerminationProtected'] = \ + instances_config['TerminationProtected'] = ( emrutils.apply_boolean_options( parsed_args.termination_protected, '--termination-protected', parsed_args.no_termination_protected, - '--no-termination-protected') - - if (parsed_args.unhealthy_node_replacement or parsed_args.no_unhealthy_node_replacement): - instances_config['UnhealthyNodeReplacement'] = \ - emrutils.apply_boolean_options( - parsed_args.unhealthy_node_replacement, - '--unhealthy-node-replacement', - parsed_args.no_unhealthy_node_replacement, - '--no-unhealthy-node-replacement') + '--no-termination-protected', + ) + ) + + if ( + parsed_args.unhealthy_node_replacement + or parsed_args.no_unhealthy_node_replacement + ): + instances_config['UnhealthyNodeReplacement'] = ( + emrutils.apply_boolean_options( + parsed_args.unhealthy_node_replacement, + '--unhealthy-node-replacement', + parsed_args.no_unhealthy_node_replacement, + '--no-unhealthy-node-replacement', + ) + ) - if (parsed_args.visible_to_all_users is False and - parsed_args.no_visible_to_all_users is False): + if ( + parsed_args.visible_to_all_users is False + and parsed_args.no_visible_to_all_users is False + ): parsed_args.visible_to_all_users = True - params['VisibleToAllUsers'] = \ - emrutils.apply_boolean_options( - parsed_args.visible_to_all_users, - '--visible-to-all-users', - parsed_args.no_visible_to_all_users, - '--no-visible-to-all-users') + params['VisibleToAllUsers'] = emrutils.apply_boolean_options( + parsed_args.visible_to_all_users, + '--visible-to-all-users', + parsed_args.no_visible_to_all_users, + '--no-visible-to-all-users', + ) params['Tags'] = emrutils.parse_tags(parsed_args.tags) params['Instances'] = instances_config if parsed_args.ec2_attributes is not None: self._build_ec2_attributes( - cluster=params, parsed_attrs=parsed_args.ec2_attributes) + cluster=params, parsed_attrs=parsed_args.ec2_attributes + ) debugging_enabled = emrutils.apply_boolean_options( parsed_args.enable_debugging, '--enable-debugging', parsed_args.no_enable_debugging, - '--no-enable-debugging') + '--no-enable-debugging', + ) if parsed_args.log_uri is None and debugging_enabled is True: raise exceptions.LogUriError @@ -300,21 +410,24 @@ def _run_main_command(self, parsed_args, parsed_globals): cluster=params, key='Steps', value=[ - self._build_enable_debugging(parsed_args, parsed_globals)]) + self._build_enable_debugging(parsed_args, parsed_globals) + ], + ) if parsed_args.applications is not None: if parsed_args.release_label is None: - app_list, ba_list, step_list = \ + app_list, ba_list, step_list = ( applicationutils.build_applications( region=self.region, parsed_applications=parsed_args.applications, - ami_version=params['AmiVersion']) - self._update_cluster_dict( - params, 'NewSupportedProducts', app_list) - self._update_cluster_dict( - params, 'BootstrapActions', ba_list) + ami_version=params['AmiVersion'], + ) + ) self._update_cluster_dict( - params, 'Steps', step_list) + params, 'NewSupportedProducts', app_list + ) + self._update_cluster_dict(params, 'BootstrapActions', ba_list) + self._update_cluster_dict(params, 'Steps', step_list) else: params["Applications"] = [] for application in parsed_args.applications: @@ -324,37 +437,45 @@ def _run_main_command(self, parsed_args, parsed_globals): if hbase_restore_config is not None: args = hbaseutils.build_hbase_restore_from_backup_args( dir=hbase_restore_config.get('Dir'), - backup_version=hbase_restore_config.get('BackupVersion')) + backup_version=hbase_restore_config.get('BackupVersion'), + ) step_config = emrutils.build_step( jar=constants.HBASE_JAR_PATH, name=constants.HBASE_RESTORE_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) - self._update_cluster_dict( - params, 'Steps', [step_config]) + args=args, + ) + self._update_cluster_dict(params, 'Steps', [step_config]) if parsed_args.bootstrap_actions is not None: self._build_bootstrap_actions( cluster=params, - parsed_boostrap_actions=parsed_args.bootstrap_actions) + parsed_boostrap_actions=parsed_args.bootstrap_actions, + ) if parsed_args.emrfs is not None: self._handle_emrfs_parameters( cluster=params, emrfs_args=parsed_args.emrfs, - release_label=parsed_args.release_label) + release_label=parsed_args.release_label, + ) if parsed_args.steps is not None: steps_list = steputils.build_step_config_list( parsed_step_list=parsed_args.steps, region=self.region, - release_label=parsed_args.release_label) + release_label=parsed_args.release_label, + ) self._update_cluster_dict( - cluster=params, key='Steps', value=steps_list) + cluster=params, key='Steps', value=steps_list + ) if parsed_args.security_configuration is not None: emrutils.apply_dict( - params, 'SecurityConfiguration', parsed_args.security_configuration) + params, + 'SecurityConfiguration', + parsed_args.security_configuration, + ) if parsed_args.custom_ami_id is not None: emrutils.apply_dict( @@ -362,15 +483,21 @@ def _run_main_command(self, parsed_args, parsed_globals): ) if parsed_args.ebs_root_volume_size is not None: emrutils.apply_dict( - params, 'EbsRootVolumeSize', int(parsed_args.ebs_root_volume_size) + params, + 'EbsRootVolumeSize', + int(parsed_args.ebs_root_volume_size), ) if parsed_args.ebs_root_volume_iops is not None: emrutils.apply_dict( - params, 'EbsRootVolumeIops', int(parsed_args.ebs_root_volume_iops) + params, + 'EbsRootVolumeIops', + int(parsed_args.ebs_root_volume_iops), ) if parsed_args.ebs_root_volume_throughput is not None: emrutils.apply_dict( - params, 'EbsRootVolumeThroughput', int(parsed_args.ebs_root_volume_throughput) + params, + 'EbsRootVolumeThroughput', + int(parsed_args.ebs_root_volume_throughput), ) if parsed_args.repo_upgrade_on_boot is not None: @@ -380,34 +507,48 @@ def _run_main_command(self, parsed_args, parsed_globals): if parsed_args.kerberos_attributes is not None: emrutils.apply_dict( - params, 'KerberosAttributes', parsed_args.kerberos_attributes) + params, 'KerberosAttributes', parsed_args.kerberos_attributes + ) if parsed_args.step_concurrency_level is not None: params['StepConcurrencyLevel'] = parsed_args.step_concurrency_level if parsed_args.managed_scaling_policy is not None: emrutils.apply_dict( - params, 'ManagedScalingPolicy', parsed_args.managed_scaling_policy) + params, + 'ManagedScalingPolicy', + parsed_args.managed_scaling_policy, + ) if parsed_args.placement_group_configs is not None: emrutils.apply_dict( - params, 'PlacementGroupConfigs', - parsed_args.placement_group_configs) + params, + 'PlacementGroupConfigs', + parsed_args.placement_group_configs, + ) if parsed_args.auto_termination_policy is not None: emrutils.apply_dict( - params, 'AutoTerminationPolicy', - parsed_args.auto_termination_policy) + params, + 'AutoTerminationPolicy', + parsed_args.auto_termination_policy, + ) self._validate_required_applications(parsed_args) run_job_flow_response = emrutils.call( - self._session, 'run_job_flow', params, self.region, - parsed_globals.endpoint_url, parsed_globals.verify_ssl) + self._session, + 'run_job_flow', + params, + self.region, + parsed_globals.endpoint_url, + parsed_globals.verify_ssl, + ) constructed_result = self._construct_result(run_job_flow_response) - emrutils.display_response(self._session, 'run_job_flow', - constructed_result, parsed_globals) + emrutils.display_response( + self._session, 'run_job_flow', constructed_result, parsed_globals + ) return 0 @@ -419,8 +560,7 @@ def _construct_result(self, run_job_flow_result): clusterArn = run_job_flow_result.get('ClusterArn') if jobFlowId is not None: - return {'ClusterId': jobFlowId, - 'ClusterArn': clusterArn } + return {'ClusterId': jobFlowId, 'ClusterArn': clusterArn} else: return {} @@ -428,83 +568,118 @@ def _build_ec2_attributes(self, cluster, parsed_attrs): keys = parsed_attrs.keys() instances = cluster['Instances'] - if ('SubnetId' in keys and 'SubnetIds' in keys): + if 'SubnetId' in keys and 'SubnetIds' in keys: raise exceptions.MutualExclusiveOptionError( - option1="SubnetId", - option2="SubnetIds") + option1="SubnetId", option2="SubnetIds" + ) - if ('AvailabilityZone' in keys and 'AvailabilityZones' in keys): + if 'AvailabilityZone' in keys and 'AvailabilityZones' in keys: raise exceptions.MutualExclusiveOptionError( - option1="AvailabilityZone", - option2="AvailabilityZones") + option1="AvailabilityZone", option2="AvailabilityZones" + ) - if ('SubnetId' in keys or 'SubnetIds' in keys) \ - and ('AvailabilityZone' in keys or 'AvailabilityZones' in keys): + if ('SubnetId' in keys or 'SubnetIds' in keys) and ( + 'AvailabilityZone' in keys or 'AvailabilityZones' in keys + ): raise exceptions.SubnetAndAzValidationError emrutils.apply_params( - src_params=parsed_attrs, src_key='KeyName', - dest_params=instances, dest_key='Ec2KeyName') + src_params=parsed_attrs, + src_key='KeyName', + dest_params=instances, + dest_key='Ec2KeyName', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='SubnetId', - dest_params=instances, dest_key='Ec2SubnetId') + src_params=parsed_attrs, + src_key='SubnetId', + dest_params=instances, + dest_key='Ec2SubnetId', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='SubnetIds', - dest_params=instances, dest_key='Ec2SubnetIds') + src_params=parsed_attrs, + src_key='SubnetIds', + dest_params=instances, + dest_key='Ec2SubnetIds', + ) if 'AvailabilityZone' in keys: instances['Placement'] = dict() emrutils.apply_params( - src_params=parsed_attrs, src_key='AvailabilityZone', + src_params=parsed_attrs, + src_key='AvailabilityZone', dest_params=instances['Placement'], - dest_key='AvailabilityZone') + dest_key='AvailabilityZone', + ) if 'AvailabilityZones' in keys: instances['Placement'] = dict() emrutils.apply_params( - src_params=parsed_attrs, src_key='AvailabilityZones', + src_params=parsed_attrs, + src_key='AvailabilityZones', dest_params=instances['Placement'], - dest_key='AvailabilityZones') + dest_key='AvailabilityZones', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='InstanceProfile', - dest_params=cluster, dest_key='JobFlowRole') + src_params=parsed_attrs, + src_key='InstanceProfile', + dest_params=cluster, + dest_key='JobFlowRole', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='EmrManagedMasterSecurityGroup', - dest_params=instances, dest_key='EmrManagedMasterSecurityGroup') + src_params=parsed_attrs, + src_key='EmrManagedMasterSecurityGroup', + dest_params=instances, + dest_key='EmrManagedMasterSecurityGroup', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='EmrManagedSlaveSecurityGroup', - dest_params=instances, dest_key='EmrManagedSlaveSecurityGroup') + src_params=parsed_attrs, + src_key='EmrManagedSlaveSecurityGroup', + dest_params=instances, + dest_key='EmrManagedSlaveSecurityGroup', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='ServiceAccessSecurityGroup', - dest_params=instances, dest_key='ServiceAccessSecurityGroup') + src_params=parsed_attrs, + src_key='ServiceAccessSecurityGroup', + dest_params=instances, + dest_key='ServiceAccessSecurityGroup', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='AdditionalMasterSecurityGroups', - dest_params=instances, dest_key='AdditionalMasterSecurityGroups') + src_params=parsed_attrs, + src_key='AdditionalMasterSecurityGroups', + dest_params=instances, + dest_key='AdditionalMasterSecurityGroups', + ) emrutils.apply_params( - src_params=parsed_attrs, src_key='AdditionalSlaveSecurityGroups', - dest_params=instances, dest_key='AdditionalSlaveSecurityGroups') + src_params=parsed_attrs, + src_key='AdditionalSlaveSecurityGroups', + dest_params=instances, + dest_key='AdditionalSlaveSecurityGroups', + ) emrutils.apply(params=cluster, key='Instances', value=instances) return cluster - def _build_bootstrap_actions( - self, cluster, parsed_boostrap_actions): + def _build_bootstrap_actions(self, cluster, parsed_boostrap_actions): cluster_ba_list = cluster.get('BootstrapActions') if cluster_ba_list is None: cluster_ba_list = [] bootstrap_actions = [] - if len(cluster_ba_list) + len(parsed_boostrap_actions) \ - > constants.MAX_BOOTSTRAP_ACTION_NUMBER: - raise ParamValidationError('aws: error: maximum number of ' - 'bootstrap actions for a cluster exceeded.') + if ( + len(cluster_ba_list) + len(parsed_boostrap_actions) + > constants.MAX_BOOTSTRAP_ACTION_NUMBER + ): + raise ParamValidationError( + 'aws: error: maximum number of ' + 'bootstrap actions for a cluster exceeded.' + ) for ba in parsed_boostrap_actions: ba_config = {} @@ -514,15 +689,22 @@ def _build_bootstrap_actions( ba_config['Name'] = constants.BOOTSTRAP_ACTION_NAME script_arg_config = {} emrutils.apply_params( - src_params=ba, src_key='Path', - dest_params=script_arg_config, dest_key='Path') + src_params=ba, + src_key='Path', + dest_params=script_arg_config, + dest_key='Path', + ) emrutils.apply_params( - src_params=ba, src_key='Args', - dest_params=script_arg_config, dest_key='Args') + src_params=ba, + src_key='Args', + dest_params=script_arg_config, + dest_key='Args', + ) emrutils.apply( params=ba_config, key='ScriptBootstrapAction', - value=script_arg_config) + value=script_arg_config, + ) bootstrap_actions.append(ba_config) result = cluster_ba_list + bootstrap_actions @@ -537,15 +719,18 @@ def _build_enable_debugging(self, parsed_args, parsed_globals): args = [constants.DEBUGGING_COMMAND] else: jar = emrutils.get_script_runner(self.region) - args = [emrutils.build_s3_link( - relative_path=constants.DEBUGGING_PATH, - region=self.region)] + args = [ + emrutils.build_s3_link( + relative_path=constants.DEBUGGING_PATH, region=self.region + ) + ] return emrutils.build_step( name=constants.DEBUGGING_NAME, action_on_failure=constants.TERMINATE_CLUSTER, jar=jar, - args=args) + args=args, + ) def _update_cluster_dict(self, cluster, key, value): if key in cluster: @@ -555,29 +740,34 @@ def _update_cluster_dict(self, cluster, key, value): return cluster def _validate_release_label_ami_version(self, parsed_args): - if parsed_args.ami_version is not None and \ - parsed_args.release_label is not None: + if ( + parsed_args.ami_version is not None + and parsed_args.release_label is not None + ): raise exceptions.MutualExclusiveOptionError( - option1="--ami-version", - option2="--release-label") + option1="--ami-version", option2="--release-label" + ) - if parsed_args.ami_version is None and \ - parsed_args.release_label is None: + if ( + parsed_args.ami_version is None + and parsed_args.release_label is None + ): raise exceptions.RequiredOptionsError( - option1="--ami-version", - option2="--release-label") + option1="--ami-version", option2="--release-label" + ) # Checks if the applications required by steps are specified # using the --applications option. def _validate_required_applications(self, parsed_args): - specified_apps = set([]) if parsed_args.applications is not None: - specified_apps = \ - set([app['Name'].lower() for app in parsed_args.applications]) + specified_apps = set( + [app['Name'].lower() for app in parsed_args.applications] + ) - missing_apps = self._get_missing_applications_for_steps(specified_apps, - parsed_args) + missing_apps = self._get_missing_applications_for_steps( + specified_apps, parsed_args + ) # Check for HBase. if parsed_args.restore_from_hbase_backup is not None: if constants.HBASE not in specified_apps: @@ -585,11 +775,13 @@ def _validate_required_applications(self, parsed_args): if missing_apps: raise exceptions.MissingApplicationsError( - applications=missing_apps) + applications=missing_apps + ) def _get_missing_applications_for_steps(self, specified_apps, parsed_args): - allowed_app_steps = set([constants.HIVE, constants.PIG, - constants.IMPALA]) + allowed_app_steps = set( + [constants.HIVE, constants.PIG, constants.IMPALA] + ) missing_apps = set() if parsed_args.steps is not None: for step in parsed_args.steps: @@ -599,38 +791,51 @@ def _get_missing_applications_for_steps(self, specified_apps, parsed_args): if step_type is not None: step_type = step_type.lower() - if step_type in allowed_app_steps and \ - step_type not in specified_apps: + if ( + step_type in allowed_app_steps + and step_type not in specified_apps + ): missing_apps.add(step['Type'].title()) return missing_apps - def _filter_configurations_in_special_cases(self, configurations, - parsed_args, parsed_configs): + def _filter_configurations_in_special_cases( + self, configurations, parsed_args, parsed_configs + ): if parsed_args.use_default_roles: - configurations = [x for x in configurations - if x.name != 'service_role' and - x.name != 'instance_profile'] + configurations = [ + x + for x in configurations + if x.name != 'service_role' and x.name != 'instance_profile' + ] return configurations def _handle_emrfs_parameters(self, cluster, emrfs_args, release_label): if release_label: self.validate_no_emrfs_configuration(cluster) emrfs_configuration = emrfsutils.build_emrfs_confiuration( - emrfs_args) + emrfs_args + ) self._update_cluster_dict( - cluster=cluster, key='Configurations', - value=[emrfs_configuration]) + cluster=cluster, + key='Configurations', + value=[emrfs_configuration], + ) else: emrfs_ba_config_list = emrfsutils.build_bootstrap_action_configs( - self.region, emrfs_args) + self.region, emrfs_args + ) self._update_cluster_dict( - cluster=cluster, key='BootstrapActions', - value=emrfs_ba_config_list) + cluster=cluster, + key='BootstrapActions', + value=emrfs_ba_config_list, + ) def validate_no_emrfs_configuration(self, cluster): if 'Configurations' in cluster: for config in cluster['Configurations']: - if config is not None and \ - config.get('Classification') == constants.EMRFS_SITE: + if ( + config is not None + and config.get('Classification') == constants.EMRFS_SITE + ): raise exceptions.DuplicateEmrFsConfigurationError diff --git a/awscli/customizations/emr/createdefaultroles.py b/awscli/customizations/emr/createdefaultroles.py index 11c7c0e0580a..ac16d7bd0070 100644 --- a/awscli/customizations/emr/createdefaultroles.py +++ b/awscli/customizations/emr/createdefaultroles.py @@ -13,32 +13,29 @@ import logging import re + import botocore.exceptions import botocore.session -from botocore import xform_name - -from awscli.customizations.utils import get_policy_arn_suffix -from awscli.customizations.emr import configutils -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import configutils, emrutils, exceptions from awscli.customizations.emr.command import Command -from awscli.customizations.emr.constants import EC2 -from awscli.customizations.emr.constants import EC2_ROLE_NAME -from awscli.customizations.emr.constants import EC2_SERVICE_PRINCIPAL -from awscli.customizations.emr.constants import ROLE_ARN_PATTERN -from awscli.customizations.emr.constants import EMR -from awscli.customizations.emr.constants import EMR_ROLE_NAME -from awscli.customizations.emr.constants import EMR_AUTOSCALING_ROLE_NAME -from awscli.customizations.emr.constants import APPLICATION_AUTOSCALING -from awscli.customizations.emr.constants import EC2_ROLE_POLICY_NAME -from awscli.customizations.emr.constants import EMR_ROLE_POLICY_NAME -from awscli.customizations.emr.constants \ - import EMR_AUTOSCALING_ROLE_POLICY_NAME -from awscli.customizations.emr.constants import EMR_AUTOSCALING_SERVICE_NAME -from awscli.customizations.emr.constants \ - import EMR_AUTOSCALING_SERVICE_PRINCIPAL +from awscli.customizations.emr.constants import ( + APPLICATION_AUTOSCALING, + EC2, + EC2_ROLE_NAME, + EC2_ROLE_POLICY_NAME, + EC2_SERVICE_PRINCIPAL, + EMR, + EMR_AUTOSCALING_ROLE_NAME, + EMR_AUTOSCALING_ROLE_POLICY_NAME, + EMR_AUTOSCALING_SERVICE_NAME, + EMR_AUTOSCALING_SERVICE_PRINCIPAL, + EMR_ROLE_NAME, + EMR_ROLE_POLICY_NAME, + ROLE_ARN_PATTERN, +) from awscli.customizations.emr.exceptions import ResolveServicePrincipalError - +from awscli.customizations.utils import get_policy_arn_suffix +from botocore import xform_name LOG = logging.getLogger(__name__) @@ -51,9 +48,9 @@ def assume_role_policy(serviceprincipal): "Sid": "", "Effect": "Allow", "Principal": {"Service": serviceprincipal}, - "Action": "sts:AssumeRole" + "Action": "sts:AssumeRole", } - ] + ], } @@ -94,107 +91,138 @@ def _get_suffix_and_region_from_endpoint_host(endpoint_host): def _get_regex_match_from_endpoint_host(endpoint_host): if endpoint_host is None: return None - regex_match = re.match("(https?://)([^.]+).elasticmapreduce.([^/]*)", - endpoint_host) + regex_match = re.match( + "(https?://)([^.]+).elasticmapreduce.([^/]*)", endpoint_host + ) # Supports 'elasticmapreduce.{region}.' and '{region}.elasticmapreduce.' if regex_match is None: - regex_match = re.match("(https?://elasticmapreduce).([^.]+).([^/]*)", - endpoint_host) + regex_match = re.match( + "(https?://elasticmapreduce).([^.]+).([^/]*)", endpoint_host + ) return regex_match class CreateDefaultRoles(Command): NAME = "create-default-roles" - DESCRIPTION = ('Creates the default IAM role ' + - EC2_ROLE_NAME + ' and ' + - EMR_ROLE_NAME + ' which can be used when creating the' - ' cluster using the create-cluster command. The default' - ' roles for EMR use managed policies, which are updated' - ' automatically to support future EMR functionality.\n' - '\nIf you do not have a Service Role and Instance Profile ' - 'variable set for your create-cluster command in the AWS ' - 'CLI config file, create-default-roles will automatically ' - 'set the values for these variables with these default ' - 'roles. If you have already set a value for Service Role ' - 'or Instance Profile, create-default-roles will not ' - 'automatically set the defaults for these variables in the ' - 'AWS CLI config file. You can view settings for variables ' - 'in the config file using the "aws configure get" command.' - '\n') + DESCRIPTION = ( + 'Creates the default IAM role ' + + EC2_ROLE_NAME + + ' and ' + + EMR_ROLE_NAME + + ' which can be used when creating the' + ' cluster using the create-cluster command. The default' + ' roles for EMR use managed policies, which are updated' + ' automatically to support future EMR functionality.\n' + '\nIf you do not have a Service Role and Instance Profile ' + 'variable set for your create-cluster command in the AWS ' + 'CLI config file, create-default-roles will automatically ' + 'set the values for these variables with these default ' + 'roles. If you have already set a value for Service Role ' + 'or Instance Profile, create-default-roles will not ' + 'automatically set the defaults for these variables in the ' + 'AWS CLI config file. You can view settings for variables ' + 'in the config file using the "aws configure get" command.' + '\n' + ) ARG_TABLE = [ - {'name': 'iam-endpoint', - 'no_paramfile': True, - 'help_text': '

    The IAM endpoint to call for creating the roles.' - ' This is optional and should only be specified when a' - ' custom endpoint should be called for IAM operations' - '.

    '} + { + 'name': 'iam-endpoint', + 'no_paramfile': True, + 'help_text': '

    The IAM endpoint to call for creating the roles.' + ' This is optional and should only be specified when a' + ' custom endpoint should be called for IAM operations' + '.

    ', + } ] def _run_main_command(self, parsed_args, parsed_globals): - self.iam_endpoint_url = parsed_args.iam_endpoint self._check_for_iam_endpoint(self.region, self.iam_endpoint_url) - self.emr_endpoint_url = \ - self._session.create_client( - 'emr', - region_name=self.region, - endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl).meta.endpoint_url - - LOG.debug('elasticmapreduce endpoint used for resolving' - ' service principal: ' + self.emr_endpoint_url) + self.emr_endpoint_url = self._session.create_client( + 'emr', + region_name=self.region, + endpoint_url=parsed_globals.endpoint_url, + verify=parsed_globals.verify_ssl, + ).meta.endpoint_url + + LOG.debug( + 'elasticmapreduce endpoint used for resolving' + ' service principal: ' + self.emr_endpoint_url + ) # Create default EC2 Role for EMR if it does not exist. - ec2_result, ec2_policy = self._create_role_if_not_exists(parsed_globals, EC2_ROLE_NAME, - EC2_ROLE_POLICY_NAME, [EC2]) + ec2_result, ec2_policy = self._create_role_if_not_exists( + parsed_globals, EC2_ROLE_NAME, EC2_ROLE_POLICY_NAME, [EC2] + ) # Create default EC2 Instance Profile for EMR if it does not exist. instance_profile_name = EC2_ROLE_NAME - if self.check_if_instance_profile_exists(instance_profile_name, - parsed_globals): + if self.check_if_instance_profile_exists( + instance_profile_name, parsed_globals + ): LOG.debug('Instance Profile ' + instance_profile_name + ' exists.') else: - LOG.debug('Instance Profile ' + instance_profile_name + - 'does not exist. Creating default Instance Profile ' + - instance_profile_name) - self._create_instance_profile_with_role(instance_profile_name, - instance_profile_name, - parsed_globals) + LOG.debug( + 'Instance Profile ' + + instance_profile_name + + 'does not exist. Creating default Instance Profile ' + + instance_profile_name + ) + self._create_instance_profile_with_role( + instance_profile_name, instance_profile_name, parsed_globals + ) # Create default EMR Role if it does not exist. - emr_result, emr_policy = self._create_role_if_not_exists(parsed_globals, EMR_ROLE_NAME, - EMR_ROLE_POLICY_NAME, [EMR]) + emr_result, emr_policy = self._create_role_if_not_exists( + parsed_globals, EMR_ROLE_NAME, EMR_ROLE_POLICY_NAME, [EMR] + ) # Create default EMR AutoScaling Role if it does not exist. - emr_autoscaling_result, emr_autoscaling_policy = \ - self._create_role_if_not_exists(parsed_globals, EMR_AUTOSCALING_ROLE_NAME, - EMR_AUTOSCALING_ROLE_POLICY_NAME, [EMR, APPLICATION_AUTOSCALING]) + emr_autoscaling_result, emr_autoscaling_policy = ( + self._create_role_if_not_exists( + parsed_globals, + EMR_AUTOSCALING_ROLE_NAME, + EMR_AUTOSCALING_ROLE_POLICY_NAME, + [EMR, APPLICATION_AUTOSCALING], + ) + ) configutils.update_roles(self._session) emrutils.display_response( self._session, 'create_role', - self._construct_result(ec2_result, ec2_policy, - emr_result, emr_policy, - emr_autoscaling_result, emr_autoscaling_policy), - parsed_globals) + self._construct_result( + ec2_result, + ec2_policy, + emr_result, + emr_policy, + emr_autoscaling_result, + emr_autoscaling_policy, + ), + parsed_globals, + ) return 0 - def _create_role_if_not_exists(self, parsed_globals, role_name, policy_name, service_names): + def _create_role_if_not_exists( + self, parsed_globals, role_name, policy_name, service_names + ): result = None policy = None if self.check_if_role_exists(role_name, parsed_globals): LOG.debug('Role ' + role_name + ' exists.') else: - LOG.debug('Role ' + role_name + ' does not exist.' - ' Creating default role: ' + role_name) + LOG.debug( + 'Role ' + role_name + ' does not exist.' + ' Creating default role: ' + role_name + ) role_arn = get_role_policy_arn(self.region, policy_name) result = self._create_role_with_role_policy( - role_name, service_names, role_arn, parsed_globals) + role_name, service_names, role_arn, parsed_globals + ) policy = self._get_role_policy(role_arn, parsed_globals) return result, policy @@ -205,20 +233,30 @@ def _check_for_iam_endpoint(self, region, iam_endpoint): if iam_endpoint is None: raise exceptions.UnknownIamEndpointError(region=region) - def _construct_result(self, ec2_response, ec2_policy, - emr_response, emr_policy, - emr_autoscaling_response, emr_autoscaling_policy): + def _construct_result( + self, + ec2_response, + ec2_policy, + emr_response, + emr_policy, + emr_autoscaling_response, + emr_autoscaling_policy, + ): result = [] self._construct_role_and_role_policy_structure( - result, ec2_response, ec2_policy) + result, ec2_response, ec2_policy + ) self._construct_role_and_role_policy_structure( - result, emr_response, emr_policy) + result, emr_response, emr_policy + ) self._construct_role_and_role_policy_structure( - result, emr_autoscaling_response, emr_autoscaling_policy) + result, emr_autoscaling_response, emr_autoscaling_policy + ) return result def _construct_role_and_role_policy_structure( - self, list, response, policy): + self, list, response, policy + ): if response is not None and response['Role'] is not None: list.append({'Role': response['Role'], 'RolePolicy': policy}) return list @@ -240,12 +278,14 @@ def check_if_role_exists(self, role_name, parsed_globals): return True - def check_if_instance_profile_exists(self, instance_profile_name, - parsed_globals): + def check_if_instance_profile_exists( + self, instance_profile_name, parsed_globals + ): parameters = {'InstanceProfileName': instance_profile_name} try: - self._call_iam_operation('GetInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'GetInstanceProfile', parameters, parsed_globals + ) except botocore.exceptions.ClientError as e: profile_not_found_code = 'NoSuchEntity' error_code = e.response.get('Error', {}).get('Code') @@ -261,59 +301,74 @@ def check_if_instance_profile_exists(self, instance_profile_name, def _get_role_policy(self, arn, parsed_globals): parameters = {} parameters['PolicyArn'] = arn - policy_details = self._call_iam_operation('GetPolicy', parameters, - parsed_globals) + policy_details = self._call_iam_operation( + 'GetPolicy', parameters, parsed_globals + ) parameters["VersionId"] = policy_details["Policy"]["DefaultVersionId"] - policy_version_details = self._call_iam_operation('GetPolicyVersion', - parameters, - parsed_globals) + policy_version_details = self._call_iam_operation( + 'GetPolicyVersion', parameters, parsed_globals + ) return policy_version_details["PolicyVersion"]["Document"] def _create_role_with_role_policy( - self, role_name, service_names, role_arn, parsed_globals): - + self, role_name, service_names, role_arn, parsed_globals + ): if len(service_names) == 1: service_principal = get_service_principal( - service_names[0], self.emr_endpoint_url, self._session) + service_names[0], self.emr_endpoint_url, self._session + ) else: service_principal = [] for service in service_names: - service_principal.append(get_service_principal( - service, self.emr_endpoint_url, self._session)) + service_principal.append( + get_service_principal( + service, self.emr_endpoint_url, self._session + ) + ) - LOG.debug(f'Adding service principal(s) to trust policy: {service_principal}') + LOG.debug( + f'Adding service principal(s) to trust policy: {service_principal}' + ) parameters = {'RoleName': role_name} - _assume_role_policy = \ - emrutils.dict_to_string(assume_role_policy(service_principal)) + _assume_role_policy = emrutils.dict_to_string( + assume_role_policy(service_principal) + ) parameters['AssumeRolePolicyDocument'] = _assume_role_policy - create_role_response = self._call_iam_operation('CreateRole', - parameters, - parsed_globals) + create_role_response = self._call_iam_operation( + 'CreateRole', parameters, parsed_globals + ) parameters = {} parameters['PolicyArn'] = role_arn parameters['RoleName'] = role_name - self._call_iam_operation('AttachRolePolicy', - parameters, parsed_globals) + self._call_iam_operation( + 'AttachRolePolicy', parameters, parsed_globals + ) return create_role_response - def _create_instance_profile_with_role(self, instance_profile_name, - role_name, parsed_globals): + def _create_instance_profile_with_role( + self, instance_profile_name, role_name, parsed_globals + ): # Creating an Instance Profile parameters = {'InstanceProfileName': instance_profile_name} - self._call_iam_operation('CreateInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'CreateInstanceProfile', parameters, parsed_globals + ) # Adding the role to the Instance Profile parameters = {} parameters['InstanceProfileName'] = instance_profile_name parameters['RoleName'] = role_name - self._call_iam_operation('AddRoleToInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'AddRoleToInstanceProfile', parameters, parsed_globals + ) def _call_iam_operation(self, operation_name, parameters, parsed_globals): client = self._session.create_client( - 'iam', region_name=self.region, endpoint_url=self.iam_endpoint_url, - verify=parsed_globals.verify_ssl) + 'iam', + region_name=self.region, + endpoint_url=self.iam_endpoint_url, + verify=parsed_globals.verify_ssl, + ) return getattr(client, xform_name(operation_name))(**parameters) diff --git a/awscli/customizations/emr/describecluster.py b/awscli/customizations/emr/describecluster.py index 01f4b40b6d40..22478bde6b4a 100644 --- a/awscli/customizations/emr/describecluster.py +++ b/awscli/customizations/emr/describecluster.py @@ -12,9 +12,7 @@ # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext +from awscli.customizations.emr import constants, emrutils, helptext from awscli.customizations.emr.command import Command from botocore.exceptions import NoCredentialsError @@ -23,8 +21,11 @@ class DescribeCluster(Command): NAME = 'describe-cluster' DESCRIPTION = helptext.DESCRIBE_CLUSTER_DESCRIPTION ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + } ] def _run_main_command(self, parsed_args, parsed_globals): @@ -34,44 +35,61 @@ def _run_main_command(self, parsed_args, parsed_globals): is_fleet_based_cluster = False describe_cluster_result = self._call( - self._session, 'describe_cluster', parameters, parsed_globals) - + self._session, 'describe_cluster', parameters, parsed_globals + ) if 'Cluster' in describe_cluster_result: describe_cluster = describe_cluster_result['Cluster'] - if describe_cluster.get('InstanceCollectionType') == constants.INSTANCE_FLEET_TYPE: + if ( + describe_cluster.get('InstanceCollectionType') + == constants.INSTANCE_FLEET_TYPE + ): is_fleet_based_cluster = True if is_fleet_based_cluster: list_instance_fleets_result = self._call( - self._session, 'list_instance_fleets', parameters, - parsed_globals) + self._session, + 'list_instance_fleets', + parameters, + parsed_globals, + ) else: list_instance_groups_result = self._call( - self._session, 'list_instance_groups', parameters, - parsed_globals) + self._session, + 'list_instance_groups', + parameters, + parsed_globals, + ) list_bootstrap_actions_result = self._call( - self._session, 'list_bootstrap_actions', - parameters, parsed_globals) + self._session, 'list_bootstrap_actions', parameters, parsed_globals + ) constructed_result = self._construct_result( describe_cluster_result, list_instance_fleets_result, list_instance_groups_result, - list_bootstrap_actions_result) + list_bootstrap_actions_result, + ) - emrutils.display_response(self._session, 'describe_cluster', - constructed_result, parsed_globals) + emrutils.display_response( + self._session, + 'describe_cluster', + constructed_result, + parsed_globals, + ) return 0 def _call(self, session, operation_name, parameters, parsed_globals): return emrutils.call( - session, operation_name, parameters, + session, + operation_name, + parameters, region_name=self.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) def _get_key_of_result(self, keys): # Return the first key that is not "Marker" @@ -80,23 +98,36 @@ def _get_key_of_result(self, keys): return key def _construct_result( - self, describe_cluster_result, list_instance_fleets_result, - list_instance_groups_result, list_bootstrap_actions_result): + self, + describe_cluster_result, + list_instance_fleets_result, + list_instance_groups_result, + list_bootstrap_actions_result, + ): result = describe_cluster_result result['Cluster']['BootstrapActions'] = [] - if (list_instance_fleets_result is not None and - list_instance_fleets_result.get('InstanceFleets') is not None): - result['Cluster']['InstanceFleets'] = \ + if ( + list_instance_fleets_result is not None + and list_instance_fleets_result.get('InstanceFleets') is not None + ): + result['Cluster']['InstanceFleets'] = ( list_instance_fleets_result.get('InstanceFleets') - if (list_instance_groups_result is not None and - list_instance_groups_result.get('InstanceGroups') is not None): - result['Cluster']['InstanceGroups'] = \ + ) + if ( + list_instance_groups_result is not None + and list_instance_groups_result.get('InstanceGroups') is not None + ): + result['Cluster']['InstanceGroups'] = ( list_instance_groups_result.get('InstanceGroups') - if (list_bootstrap_actions_result is not None and - list_bootstrap_actions_result.get('BootstrapActions') - is not None): - result['Cluster']['BootstrapActions'] = \ + ) + if ( + list_bootstrap_actions_result is not None + and list_bootstrap_actions_result.get('BootstrapActions') + is not None + ): + result['Cluster']['BootstrapActions'] = ( list_bootstrap_actions_result['BootstrapActions'] + ) return result diff --git a/awscli/customizations/emr/emr.py b/awscli/customizations/emr/emr.py index fc42bfcecf39..361d129f4e76 100644 --- a/awscli/customizations/emr/emr.py +++ b/awscli/customizations/emr/emr.py @@ -11,20 +11,20 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import hbase -from awscli.customizations.emr import ssh +from awscli.customizations.emr import hbase, ssh +from awscli.customizations.emr.addinstancegroups import AddInstanceGroups from awscli.customizations.emr.addsteps import AddSteps +from awscli.customizations.emr.addtags import modify_tags_argument +from awscli.customizations.emr.command import override_args_required_option from awscli.customizations.emr.createcluster import CreateCluster -from awscli.customizations.emr.addinstancegroups import AddInstanceGroups from awscli.customizations.emr.createdefaultroles import CreateDefaultRoles -from awscli.customizations.emr.modifyclusterattributes import ModifyClusterAttr -from awscli.customizations.emr.installapplications import InstallApplications from awscli.customizations.emr.describecluster import DescribeCluster +from awscli.customizations.emr.installapplications import InstallApplications +from awscli.customizations.emr.listclusters import ( + modify_list_clusters_argument, +) +from awscli.customizations.emr.modifyclusterattributes import ModifyClusterAttr from awscli.customizations.emr.terminateclusters import TerminateClusters -from awscli.customizations.emr.addtags import modify_tags_argument -from awscli.customizations.emr.listclusters \ - import modify_list_clusters_argument -from awscli.customizations.emr.command import override_args_required_option def emr_initialize(cli): @@ -35,9 +35,12 @@ def emr_initialize(cli): cli.register('building-argument-table.emr.add-tags', modify_tags_argument) cli.register( 'building-argument-table.emr.list-clusters', - modify_list_clusters_argument) - cli.register('before-building-argument-table-parser.emr.*', - override_args_required_option) + modify_list_clusters_argument, + ) + cli.register( + 'before-building-argument-table-parser.emr.*', + override_args_required_option, + ) def register_commands(command_table, session, **kwargs): @@ -52,12 +55,12 @@ def register_commands(command_table, session, **kwargs): command_table['install-applications'] = InstallApplications(session) command_table['create-cluster'] = CreateCluster(session) command_table['add-steps'] = AddSteps(session) - command_table['restore-from-hbase-backup'] = \ - hbase.RestoreFromHBaseBackup(session) + command_table['restore-from-hbase-backup'] = hbase.RestoreFromHBaseBackup( + session + ) command_table['create-hbase-backup'] = hbase.CreateHBaseBackup(session) command_table['schedule-hbase-backup'] = hbase.ScheduleHBaseBackup(session) - command_table['disable-hbase-backups'] = \ - hbase.DisableHBaseBackups(session) + command_table['disable-hbase-backups'] = hbase.DisableHBaseBackups(session) command_table['create-default-roles'] = CreateDefaultRoles(session) command_table['add-instance-groups'] = AddInstanceGroups(session) command_table['ssh'] = ssh.SSH(session) diff --git a/awscli/customizations/emr/emrfsutils.py b/awscli/customizations/emr/emrfsutils.py index ab6bdadc85d0..2c040e0f9b8f 100644 --- a/awscli/customizations/emr/emrfsutils.py +++ b/awscli/customizations/emr/emrfsutils.py @@ -11,12 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, emrutils, exceptions from botocore.compat import OrderedDict - CONSISTENT_OPTIONAL_KEYS = ['RetryCount', 'RetryPeriod'] CSE_KMS_REQUIRED_KEYS = ['KMSKeyId'] CSE_CUSTOM_REQUIRED_KEYS = ['CustomProviderLocation', 'CustomProviderClass'] @@ -40,20 +37,26 @@ def build_bootstrap_action_configs(region, emrfs_args): emrutils.build_bootstrap_action( path=constants.EMRFS_CSE_CUSTOM_S3_GET_BA_PATH, name=constants.S3_GET_BA_NAME, - args=[constants.S3_GET_BA_SRC, - emrfs_args.get('CustomProviderLocation'), - constants.S3_GET_BA_DEST, - constants.EMRFS_CUSTOM_DEST_PATH, - constants.S3_GET_BA_FORCE])) + args=[ + constants.S3_GET_BA_SRC, + emrfs_args.get('CustomProviderLocation'), + constants.S3_GET_BA_DEST, + constants.EMRFS_CUSTOM_DEST_PATH, + constants.S3_GET_BA_FORCE, + ], + ) + ) emrfs_setup_ba_args = _build_ba_args_to_setup_emrfs(emrfs_args) bootstrap_actions.append( emrutils.build_bootstrap_action( path=emrutils.build_s3_link( - relative_path=constants.CONFIG_HADOOP_PATH, - region=region), + relative_path=constants.CONFIG_HADOOP_PATH, region=region + ), name=constants.EMRFS_BA_NAME, - args=emrfs_setup_ba_args)) + args=emrfs_setup_ba_args, + ) + ) return bootstrap_actions @@ -63,74 +66,95 @@ def build_emrfs_confiuration(emrfs_args): emrfs_properties = _build_emrfs_properties(emrfs_args) if _need_to_configure_cse(emrfs_args, 'CUSTOM'): - emrfs_properties[constants.EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY] = \ + emrfs_properties[constants.EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY] = ( emrfs_args.get('CustomProviderLocation') + ) emrfs_configuration = { 'Classification': constants.EMRFS_SITE, - 'Properties': emrfs_properties} + 'Properties': emrfs_properties, + } return emrfs_configuration def _verify_emrfs_args(emrfs_args): # Encryption should have a valid value - if 'Encryption' in emrfs_args \ - and emrfs_args['Encryption'].upper() not in ENCRYPTION_TYPES: + if ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() not in ENCRYPTION_TYPES + ): raise exceptions.UnknownEncryptionTypeError( - encryption=emrfs_args['Encryption']) + encryption=emrfs_args['Encryption'] + ) # Only one of SSE and Encryption should be configured if 'SSE' in emrfs_args and 'Encryption' in emrfs_args: raise exceptions.BothSseAndEncryptionConfiguredError( - sse=emrfs_args['SSE'], encryption=emrfs_args['Encryption']) + sse=emrfs_args['SSE'], encryption=emrfs_args['Encryption'] + ) # CSE should be configured correctly # ProviderType should be present and should have valid value # Given the type, the required parameters should be present - if ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE): + if ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE + ): if 'ProviderType' not in emrfs_args: raise exceptions.MissingParametersError( - object_name=CSE_OPTION_NAME, missing='ProviderType') + object_name=CSE_OPTION_NAME, missing='ProviderType' + ) elif emrfs_args['ProviderType'].upper() not in CSE_PROVIDER_TYPES: raise exceptions.UnknownCseProviderTypeError( - provider_type=emrfs_args['ProviderType']) + provider_type=emrfs_args['ProviderType'] + ) elif emrfs_args['ProviderType'].upper() == 'KMS': - _verify_required_args(emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, - CSE_KMS_OPTION_NAME) + _verify_required_args( + emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, CSE_KMS_OPTION_NAME + ) elif emrfs_args['ProviderType'].upper() == 'CUSTOM': - _verify_required_args(emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, - CSE_CUSTOM_OPTION_NAME) + _verify_required_args( + emrfs_args.keys(), + CSE_CUSTOM_REQUIRED_KEYS, + CSE_CUSTOM_OPTION_NAME, + ) # No child attributes should be present if the parent feature is not # configured if 'Consistent' not in emrfs_args: - _verify_child_args(emrfs_args.keys(), CONSISTENT_OPTIONAL_KEYS, - CONSISTENT_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CONSISTENT_OPTIONAL_KEYS, CONSISTENT_OPTION_NAME + ) if not _need_to_configure_cse(emrfs_args, 'KMS'): - _verify_child_args(emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, - CSE_KMS_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, CSE_KMS_OPTION_NAME + ) if not _need_to_configure_cse(emrfs_args, 'CUSTOM'): - _verify_child_args(emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, - CSE_CUSTOM_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, CSE_CUSTOM_OPTION_NAME + ) def _verify_required_args(actual_keys, required_keys, object_name): if any(x not in actual_keys for x in required_keys): missing_keys = list( - sorted(set(required_keys).difference(set(actual_keys)))) + sorted(set(required_keys).difference(set(actual_keys))) + ) raise exceptions.MissingParametersError( - object_name=object_name, missing=emrutils.join(missing_keys)) + object_name=object_name, missing=emrutils.join(missing_keys) + ) def _verify_child_args(actual_keys, child_keys, parent_object_name): if any(x in actual_keys for x in child_keys): invalid_keys = list( - sorted(set(child_keys).intersection(set(actual_keys)))) + sorted(set(child_keys).intersection(set(actual_keys))) + ) raise exceptions.InvalidEmrFsArgumentsError( invalid=emrutils.join(invalid_keys), - parent_object_name=parent_object_name) + parent_object_name=parent_object_name, + ) def _build_ba_args_to_setup_emrfs(emrfs_args): @@ -170,29 +194,35 @@ def _need_to_configure_consistent_view(emrfs_args): def _need_to_configure_sse(emrfs_args): - return 'SSE' in emrfs_args \ - or ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_SERVER_SIDE) + return 'SSE' in emrfs_args or ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_SERVER_SIDE + ) def _need_to_configure_cse(emrfs_args, cse_type): - return ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE and - 'ProviderType' in emrfs_args and - emrfs_args['ProviderType'].upper() == cse_type) + return ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE + and 'ProviderType' in emrfs_args + and emrfs_args['ProviderType'].upper() == cse_type + ) def _update_properties_for_consistent_view(emrfs_properties, emrfs_args): - emrfs_properties[constants.EMRFS_CONSISTENT_KEY] = \ - str(emrfs_args['Consistent']).lower() + emrfs_properties[constants.EMRFS_CONSISTENT_KEY] = str( + emrfs_args['Consistent'] + ).lower() if 'RetryCount' in emrfs_args: - emrfs_properties[constants.EMRFS_RETRY_COUNT_KEY] = \ - str(emrfs_args['RetryCount']) + emrfs_properties[constants.EMRFS_RETRY_COUNT_KEY] = str( + emrfs_args['RetryCount'] + ) if 'RetryPeriod' in emrfs_args: - emrfs_properties[constants.EMRFS_RETRY_PERIOD_KEY] = \ - str(emrfs_args['RetryPeriod']) + emrfs_properties[constants.EMRFS_RETRY_PERIOD_KEY] = str( + emrfs_args['RetryPeriod'] + ) def _update_properties_for_sse(emrfs_properties, emrfs_args): @@ -206,16 +236,17 @@ def _update_properties_for_cse(emrfs_properties, emrfs_args, cse_type): emrfs_properties[constants.EMRFS_CSE_KEY] = 'true' if cse_type == 'KMS': emrfs_properties[ - constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY] = \ - constants.EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME + constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY + ] = constants.EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME - emrfs_properties[constants.EMRFS_CSE_KMS_KEY_ID_KEY] =\ - emrfs_args['KMSKeyId'] + emrfs_properties[constants.EMRFS_CSE_KMS_KEY_ID_KEY] = emrfs_args[ + 'KMSKeyId' + ] elif cse_type == 'CUSTOM': emrfs_properties[ - constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY] = \ - emrfs_args['CustomProviderClass'] + constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY + ] = emrfs_args['CustomProviderClass'] def _update_emrfs_ba_args(ba_args, key_value): diff --git a/awscli/customizations/emr/emrutils.py b/awscli/customizations/emr/emrutils.py index d4fc1991147f..016cabe7bfa0 100644 --- a/awscli/customizations/emr/emrutils.py +++ b/awscli/customizations/emr/emrutils.py @@ -15,13 +15,11 @@ import logging import os - from awscli.clidriver import CLIOperationCaller +from awscli.customizations.emr import constants, exceptions from awscli.customizations.exceptions import ParamValidationError -from awscli.customizations.emr import constants -from awscli.customizations.emr import exceptions -from botocore.exceptions import WaiterError, NoCredentialsError from botocore import xform_name +from botocore.exceptions import NoCredentialsError, WaiterError LOG = logging.getLogger(__name__) @@ -57,11 +55,16 @@ def parse_key_value_string(key_value_string): def apply_boolean_options( - true_option, true_option_name, false_option, false_option_name): + true_option, true_option_name, false_option, false_option_name +): if true_option and false_option: - error_message = \ - 'aws: error: cannot use both ' + true_option_name + \ - ' and ' + false_option_name + ' options together.' + error_message = ( + 'aws: error: cannot use both ' + + true_option_name + + ' and ' + + false_option_name + + ' options together.' + ) raise ParamValidationError(error_message) elif true_option: return True @@ -92,13 +95,14 @@ def apply_params(src_params, src_key, dest_params, dest_key): def build_step( - jar, name='Step', - action_on_failure=constants.DEFAULT_FAILURE_ACTION, - args=None, - main_class=None, - properties=None): - check_required_field( - structure='HadoopJarStep', name='Jar', value=jar) + jar, + name='Step', + action_on_failure=constants.DEFAULT_FAILURE_ACTION, + args=None, + main_class=None, + properties=None, +): + check_required_field(structure='HadoopJarStep', name='Jar', value=jar) step = {} apply_dict(step, 'Name', name) @@ -113,13 +117,11 @@ def build_step( return step -def build_bootstrap_action( - path, - name='Bootstrap Action', - args=None): +def build_bootstrap_action(path, name='Bootstrap Action', args=None): if path is None: raise exceptions.MissingParametersError( - object_name='ScriptBootstrapActionConfig', missing='Path') + object_name='ScriptBootstrapActionConfig', missing='Path' + ) ba_config = {} apply_dict(ba_config, 'Name', name) script_config = {} @@ -140,13 +142,15 @@ def get_script_runner(region='us-east-1'): if region is None: region = 'us-east-1' return build_s3_link( - relative_path=constants.SCRIPT_RUNNER_PATH, region=region) + relative_path=constants.SCRIPT_RUNNER_PATH, region=region + ) def check_required_field(structure, name, value): if not value: raise exceptions.MissingParametersError( - object_name=structure, missing=name) + object_name=structure, missing=name + ) def check_empty_string_list(name, value): @@ -154,8 +158,14 @@ def check_empty_string_list(name, value): raise exceptions.EmptyListError(param=name) -def call(session, operation_name, parameters, region_name=None, - endpoint_url=None, verify=None): +def call( + session, + operation_name, + parameters, + region_name=None, + endpoint_url=None, + verify=None, +): # We could get an error from get_endpoint() about not having # a region configured. Before this happens we want to check # for credentials so we can give a good error message. @@ -163,8 +173,11 @@ def call(session, operation_name, parameters, region_name=None, raise NoCredentialsError() client = session.create_client( - 'emr', region_name=region_name, endpoint_url=endpoint_url, - verify=verify) + 'emr', + region_name=region_name, + endpoint_url=endpoint_url, + verify=verify, + ) LOG.debug('Calling ' + str(operation_name)) return getattr(client, operation_name)(**parameters) @@ -182,7 +195,8 @@ def get_client(session, parsed_globals): 'emr', region_name=get_region(session, parsed_globals), endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) def get_cluster_state(session, parsed_globals, cluster_id): @@ -210,12 +224,13 @@ def which(program): return None -def call_and_display_response(session, operation_name, parameters, - parsed_globals): +def call_and_display_response( + session, operation_name, parameters, parsed_globals +): cli_operation_caller = CLIOperationCaller(session) cli_operation_caller.invoke( - 'emr', operation_name, - parameters, parsed_globals) + 'emr', operation_name, parameters, parsed_globals + ) def display_response(session, operation_name, result, parsed_globals): @@ -223,7 +238,8 @@ def display_response(session, operation_name, result, parsed_globals): # Calling a private method. Should be changed after the functionality # is moved outside CliOperationCaller. cli_operation_caller._display_response( - operation_name, result, parsed_globals) + operation_name, result, parsed_globals + ) def get_region(session, parsed_globals): @@ -245,8 +261,9 @@ def join(values, separator=',', lastSeparator='and'): return values[0] else: separator = '%s ' % separator - return ' '.join([separator.join(values[:-1]), - lastSeparator, values[-1]]) + return ' '.join( + [separator.join(values[:-1]), lastSeparator, values[-1]] + ) def split_to_key_value(string): @@ -256,21 +273,24 @@ def split_to_key_value(string): return string.split('=', 1) -def get_cluster(cluster_id, session, region, - endpoint_url, verify_ssl): - describe_cluster_params = {'ClusterId': cluster_id} - describe_cluster_response = call( - session, 'describe_cluster', describe_cluster_params, - region, endpoint_url, - verify_ssl) +def get_cluster(cluster_id, session, region, endpoint_url, verify_ssl): + describe_cluster_params = {'ClusterId': cluster_id} + describe_cluster_response = call( + session, + 'describe_cluster', + describe_cluster_params, + region, + endpoint_url, + verify_ssl, + ) - if describe_cluster_response is not None: - return describe_cluster_response.get('Cluster') + if describe_cluster_response is not None: + return describe_cluster_response.get('Cluster') -def get_release_label(cluster_id, session, region, - endpoint_url, verify_ssl): - cluster = get_cluster(cluster_id, session, region, - endpoint_url, verify_ssl) - if cluster is not None: - return cluster.get('ReleaseLabel') +def get_release_label(cluster_id, session, region, endpoint_url, verify_ssl): + cluster = get_cluster( + cluster_id, session, region, endpoint_url, verify_ssl + ) + if cluster is not None: + return cluster.get('ReleaseLabel') diff --git a/awscli/customizations/emr/exceptions.py b/awscli/customizations/emr/exceptions.py index ec4a9ad95bf3..d1559886a498 100644 --- a/awscli/customizations/emr/exceptions.py +++ b/awscli/customizations/emr/exceptions.py @@ -14,12 +14,12 @@ class EmrError(Exception): - """ The base exception class for Emr exceptions. :ivar msg: The descriptive message associated with the error. """ + fmt = 'An unspecified error occurred' def __init__(self, **kwargs): @@ -29,7 +29,6 @@ def __init__(self, **kwargs): class MissingParametersError(EmrError, ParamValidationError): - """ One or more required parameters were not supplied. @@ -40,168 +39,193 @@ class MissingParametersError(EmrError, ParamValidationError): other than str(). :ivar missing: The names of the missing parameters. """ - fmt = ('aws: error: The following required parameters are missing for ' - '{object_name}: {missing}.') + fmt = ( + 'aws: error: The following required parameters are missing for ' + '{object_name}: {missing}.' + ) -class EmptyListError(EmrError, ParamValidationError): +class EmptyListError(EmrError, ParamValidationError): """ The provided list is empty. :ivar param: The provided list parameter """ - fmt = ('aws: error: The prameter {param} cannot be an empty list.') + fmt = 'aws: error: The prameter {param} cannot be an empty list.' -class MissingRequiredInstanceGroupsError(EmrError, ParamValidationError): +class MissingRequiredInstanceGroupsError(EmrError, ParamValidationError): """ In create-cluster command, none of --instance-group, --instance-count nor --instance-type were not supplied. """ - fmt = ('aws: error: Must specify either --instance-groups or ' - '--instance-type with --instance-count(optional) to ' - 'configure instance groups.') + fmt = ( + 'aws: error: Must specify either --instance-groups or ' + '--instance-type with --instance-count(optional) to ' + 'configure instance groups.' + ) -class InstanceGroupsValidationError(EmrError, ParamValidationError): +class InstanceGroupsValidationError(EmrError, ParamValidationError): """ --instance-type and --instance-count are shortcut option for --instance-groups and they cannot be specified together with --instance-groups """ - fmt = ('aws: error: You may not specify --instance-type ' - 'or --instance-count with --instance-groups, ' - 'because --instance-type and --instance-count are ' - 'shortcut options for --instance-groups.') + fmt = ( + 'aws: error: You may not specify --instance-type ' + 'or --instance-count with --instance-groups, ' + 'because --instance-type and --instance-count are ' + 'shortcut options for --instance-groups.' + ) -class InvalidAmiVersionError(EmrError, ParamValidationError): +class InvalidAmiVersionError(EmrError, ParamValidationError): """ The supplied ami-version is invalid. :ivar ami_version: The provided ami_version. """ - fmt = ('aws: error: The supplied AMI version "{ami_version}" is invalid.' - ' Please see AMI Versions Supported in Amazon EMR in ' - 'Amazon Elastic MapReduce Developer Guide: ' - 'http://docs.aws.amazon.com/ElasticMapReduce/' - 'latest/DeveloperGuide/ami-versions-supported.html') + fmt = ( + 'aws: error: The supplied AMI version "{ami_version}" is invalid.' + ' Please see AMI Versions Supported in Amazon EMR in ' + 'Amazon Elastic MapReduce Developer Guide: ' + 'http://docs.aws.amazon.com/ElasticMapReduce/' + 'latest/DeveloperGuide/ami-versions-supported.html' + ) -class MissingBooleanOptionsError(EmrError, ParamValidationError): +class MissingBooleanOptionsError(EmrError, ParamValidationError): """ Required boolean options are not supplied. :ivar true_option :ivar false_option """ - fmt = ('aws: error: Must specify one of the following boolean options: ' - '{true_option}|{false_option}.') + fmt = ( + 'aws: error: Must specify one of the following boolean options: ' + '{true_option}|{false_option}.' + ) -class UnknownStepTypeError(EmrError, ParamValidationError): +class UnknownStepTypeError(EmrError, ParamValidationError): """ The provided step type is not supported. :ivar step_type: the step_type provided. """ - fmt = ('aws: error: The step type {step_type} is not supported.') + fmt = 'aws: error: The step type {step_type} is not supported.' -class UnknownIamEndpointError(EmrError): +class UnknownIamEndpointError(EmrError): """ The IAM endpoint is not known for the specified region. :ivar region: The region specified. """ - fmt = 'IAM endpoint not known for region: {region}.' +\ - ' Specify the iam-endpoint using the --iam-endpoint option.' + fmt = ( + 'IAM endpoint not known for region: {region}.' + + ' Specify the iam-endpoint using the --iam-endpoint option.' + ) -class ResolveServicePrincipalError(EmrError): +class ResolveServicePrincipalError(EmrError): """ The service principal could not be resolved from the region or the endpoint. """ - fmt = 'Could not resolve the service principal from' +\ - ' the region or the endpoint.' + fmt = ( + 'Could not resolve the service principal from' + + ' the region or the endpoint.' + ) -class LogUriError(EmrError, ParamValidationError): +class LogUriError(EmrError, ParamValidationError): """ The LogUri is not specified and debugging is enabled for the cluster. """ - fmt = ('aws: error: LogUri not specified. You must specify a logUri ' - 'if you enable debugging when creating a cluster.') + fmt = ( + 'aws: error: LogUri not specified. You must specify a logUri ' + 'if you enable debugging when creating a cluster.' + ) -class MasterDNSNotAvailableError(EmrError): +class MasterDNSNotAvailableError(EmrError): """ Cannot get dns of master node on the cluster. """ - fmt = 'Cannot get DNS of master node on the cluster. '\ - ' Please try again after some time.' + fmt = ( + 'Cannot get DNS of master node on the cluster. ' + ' Please try again after some time.' + ) -class WrongPuttyKeyError(EmrError, ParamValidationError): +class WrongPuttyKeyError(EmrError, ParamValidationError): """ A wrong key has been used with a compatible program. """ - fmt = 'Key file file format is incorrect. Putty expects a ppk file. '\ - 'Please refer to documentation at http://docs.aws.amazon.com/'\ - 'ElasticMapReduce/latest/DeveloperGuide/EMR_SetUp_SSH.html. ' + fmt = ( + 'Key file file format is incorrect. Putty expects a ppk file. ' + 'Please refer to documentation at http://docs.aws.amazon.com/' + 'ElasticMapReduce/latest/DeveloperGuide/EMR_SetUp_SSH.html. ' + ) -class SSHNotFoundError(EmrError): +class SSHNotFoundError(EmrError): """ SSH or Putty not available. """ - fmt = 'SSH or Putty not available. Please refer to the documentation '\ - 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/'\ - 'DeveloperGuide/EMR_SetUp_SSH.html.' + fmt = ( + 'SSH or Putty not available. Please refer to the documentation ' + 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/' + 'DeveloperGuide/EMR_SetUp_SSH.html.' + ) -class SCPNotFoundError(EmrError): +class SCPNotFoundError(EmrError): """ SCP or Pscp not available. """ - fmt = 'SCP or Pscp not available. Please refer to the documentation '\ - 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/'\ - 'DeveloperGuide/EMR_SetUp_SSH.html. ' + fmt = ( + 'SCP or Pscp not available. Please refer to the documentation ' + 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/' + 'DeveloperGuide/EMR_SetUp_SSH.html. ' + ) -class SubnetAndAzValidationError(EmrError, ParamValidationError): +class SubnetAndAzValidationError(EmrError, ParamValidationError): """ SubnetId and AvailabilityZone are mutual exclusive in --ec2-attributes. """ - fmt = ('aws: error: You may not specify both a SubnetId and an Availabili' - 'tyZone (placement) because ec2SubnetId implies a placement.') + fmt = ( + 'aws: error: You may not specify both a SubnetId and an Availabili' + 'tyZone (placement) because ec2SubnetId implies a placement.' + ) -class RequiredOptionsError(EmrError, ParamValidationError): +class RequiredOptionsError(EmrError, ParamValidationError): """ Either of option1 or option2 is required. """ - fmt = ('aws: error: Either {option1} or {option2} is required.') + fmt = 'aws: error: Either {option1} or {option2} is required.' class MutualExclusiveOptionError(EmrError, ParamValidationError): - """ The provided option1 and option2 are mutually exclusive. @@ -211,15 +235,18 @@ class MutualExclusiveOptionError(EmrError, ParamValidationError): """ def __init__(self, **kwargs): - msg = ('aws: error: You cannot specify both ' + - kwargs.get('option1', '') + ' and ' + - kwargs.get('option2', '') + ' options together.' + - kwargs.get('message', '')) + msg = ( + 'aws: error: You cannot specify both ' + + kwargs.get('option1', '') + + ' and ' + + kwargs.get('option2', '') + + ' options together.' + + kwargs.get('message', '') + ) Exception.__init__(self, msg) class MissingApplicationsError(EmrError, ParamValidationError): - """ The application required for a step is not installed when creating a cluster. @@ -228,50 +255,56 @@ class MissingApplicationsError(EmrError, ParamValidationError): """ def __init__(self, **kwargs): - msg = ('aws: error: Some of the steps require the following' - ' applications to be installed: ' + - ', '.join(kwargs['applications']) + '. Please install the' - ' applications using --applications.') + msg = ( + 'aws: error: Some of the steps require the following' + ' applications to be installed: ' + + ', '.join(kwargs['applications']) + + '. Please install the' + ' applications using --applications.' + ) Exception.__init__(self, msg) class ClusterTerminatedError(EmrError): - """ The cluster is terminating or has already terminated. """ + fmt = 'aws: error: Cluster terminating or already terminated.' class ClusterStatesFilterValidationError(EmrError, ParamValidationError): - """ In the list-clusters command, customers can specify only one of the following states filters: --cluster-states, --active, --terminated, --failed """ - fmt = ('aws: error: You can specify only one of the cluster state ' - 'filters: --cluster-states, --active, --terminated, --failed.') + fmt = ( + 'aws: error: You can specify only one of the cluster state ' + 'filters: --cluster-states, --active, --terminated, --failed.' + ) -class MissingClusterAttributesError(EmrError, ParamValidationError): +class MissingClusterAttributesError(EmrError, ParamValidationError): """ In the modify-cluster-attributes command, customers need to provide at least one of the following cluster attributes: --visible-to-all-users, --no-visible-to-all-users, --termination-protected, --no-termination-protected, --auto-terminate and --no-auto-terminate """ - fmt = ('aws: error: Must specify one of the following boolean options: ' - '--visible-to-all-users|--no-visible-to-all-users, ' - '--termination-protected|--no-termination-protected, ' - '--auto-terminate|--no-auto-terminate, ' - '--unhealthy-node-replacement|--no-unhealthy-node-replacement.') + fmt = ( + 'aws: error: Must specify one of the following boolean options: ' + '--visible-to-all-users|--no-visible-to-all-users, ' + '--termination-protected|--no-termination-protected, ' + '--auto-terminate|--no-auto-terminate, ' + '--unhealthy-node-replacement|--no-unhealthy-node-replacement.' + ) -class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): +class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): """ The provided EMRFS parameters are invalid as parent feature e.g., Consistent View, CSE, SSE is not configured @@ -280,40 +313,46 @@ class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): :ivar parent_object_name: Parent feature name """ - fmt = ('aws: error: {parent_object_name} is not specified. Thus, ' - ' following parameters are invalid: {invalid}') + fmt = ( + 'aws: error: {parent_object_name} is not specified. Thus, ' + ' following parameters are invalid: {invalid}' + ) class DuplicateEmrFsConfigurationError(EmrError, ParamValidationError): - - fmt = ('aws: error: EMRFS should be configured either using ' - '--configuration or --emrfs but not both') + fmt = ( + 'aws: error: EMRFS should be configured either using ' + '--configuration or --emrfs but not both' + ) class UnknownCseProviderTypeError(EmrError, ParamValidationError): - """ The provided EMRFS client-side encryption provider type is not supported. :ivar provider_type: the provider_type provided. """ - fmt = ('aws: error: The client side encryption type "{provider_type}" is ' - 'not supported. You must specify either KMS or Custom') + fmt = ( + 'aws: error: The client side encryption type "{provider_type}" is ' + 'not supported. You must specify either KMS or Custom' + ) -class UnknownEncryptionTypeError(EmrError, ParamValidationError): +class UnknownEncryptionTypeError(EmrError, ParamValidationError): """ The provided encryption type is not supported. :ivar provider_type: the provider_type provided. """ - fmt = ('aws: error: The encryption type "{encryption}" is invalid. ' - 'You must specify either ServerSide or ClientSide') + fmt = ( + 'aws: error: The encryption type "{encryption}" is invalid. ' + 'You must specify either ServerSide or ClientSide' + ) -class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): +class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): """ Only one of SSE or Encryption can be configured. @@ -321,25 +360,30 @@ class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): :ivar encryption: Value for encryption """ - fmt = ('aws: error: Both SSE={sse} and Encryption={encryption} are ' - 'configured for --emrfs. You must specify only one of the two.') + fmt = ( + 'aws: error: Both SSE={sse} and Encryption={encryption} are ' + 'configured for --emrfs. You must specify only one of the two.' + ) class InvalidBooleanConfigError(EmrError, ParamValidationError): - - fmt = ("aws: error: {config_value} for {config_key} in the config file is " - "invalid. The value should be either 'True' or 'False'. Use " - "'aws configure set {profile_var_name}.emr.{config_key} ' " - "command to set a valid value.") + fmt = ( + "aws: error: {config_value} for {config_key} in the config file is " + "invalid. The value should be either 'True' or 'False'. Use " + "'aws configure set {profile_var_name}.emr.{config_key} ' " + "command to set a valid value." + ) class UnsupportedCommandWithReleaseError(EmrError, ParamValidationError): + fmt = ( + "aws: error: {command} is not supported with " + "'{release_label}' release." + ) - fmt = ("aws: error: {command} is not supported with " - "'{release_label}' release.") class MissingAutoScalingRoleError(EmrError, ParamValidationError): - - fmt = ("aws: error: Must specify --auto-scaling-role when configuring an " - "AutoScaling policy for an instance group.") - + fmt = ( + "aws: error: Must specify --auto-scaling-role when configuring an " + "AutoScaling policy for an instance group." + ) diff --git a/awscli/customizations/emr/hbase.py b/awscli/customizations/emr/hbase.py index 6d6b50be4bb7..5963b6070afb 100644 --- a/awscli/customizations/emr/hbase.py +++ b/awscli/customizations/emr/hbase.py @@ -11,71 +11,98 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import hbaseutils -from awscli.customizations.emr import helptext +from awscli.customizations.emr import constants, emrutils, hbaseutils, helptext from awscli.customizations.emr.command import Command from awscli.customizations.exceptions import ParamValidationError class RestoreFromHBaseBackup(Command): NAME = 'restore-from-hbase-backup' - DESCRIPTION = ('Restores HBase from S3. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Restores HBase from S3. ' + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'dir', 'required': True, - 'help_text': helptext.HBASE_BACKUP_DIR}, - {'name': 'backup-version', - 'help_text': helptext.HBASE_BACKUP_VERSION} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'dir', + 'required': True, + 'help_text': helptext.HBASE_BACKUP_DIR, + }, + {'name': 'backup-version', 'help_text': helptext.HBASE_BACKUP_VERSION}, ] def _run_main_command(self, parsed_args, parsed_globals): steps = [] args = hbaseutils.build_hbase_restore_from_backup_args( - parsed_args.dir, parsed_args.backup_version) + parsed_args.dir, parsed_args.backup_version + ) step_config = emrutils.build_step( jar=constants.HBASE_JAR_PATH, name=constants.HBASE_RESTORE_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) + args=args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 class ScheduleHBaseBackup(Command): NAME = 'schedule-hbase-backup' - DESCRIPTION = ('Adds a step to schedule automated HBase backup. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Adds a step to schedule automated HBase backup. ' + + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'type', 'required': True, - 'help_text': "

    Backup type. You can specify 'incremental' or " - "'full'.

    "}, - {'name': 'dir', 'required': True, - 'help_text': helptext.HBASE_BACKUP_DIR}, - {'name': 'interval', 'required': True, - 'help_text': '

    The time between backups.

    '}, - {'name': 'unit', 'required': True, - 'help_text': "

    The time unit for backup's time-interval. " - "You can specify one of the following values:" - " 'minutes', 'hours', or 'days'.

    "}, - {'name': 'start-time', - 'help_text': '

    The time of the first backup in ISO format.

    ' - ' e.g. 2014-04-21T05:26:10Z. Default is now.'}, - {'name': 'consistent', 'action': 'store_true', - 'help_text': '

    Performs a consistent backup.' - ' Pauses all write operations to the HBase cluster' - ' during the backup process.

    '} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'type', + 'required': True, + 'help_text': "

    Backup type. You can specify 'incremental' or " + "'full'.

    ", + }, + { + 'name': 'dir', + 'required': True, + 'help_text': helptext.HBASE_BACKUP_DIR, + }, + { + 'name': 'interval', + 'required': True, + 'help_text': '

    The time between backups.

    ', + }, + { + 'name': 'unit', + 'required': True, + 'help_text': "

    The time unit for backup's time-interval. " + "You can specify one of the following values:" + " 'minutes', 'hours', or 'days'.

    ", + }, + { + 'name': 'start-time', + 'help_text': '

    The time of the first backup in ISO format.

    ' + ' e.g. 2014-04-21T05:26:10Z. Default is now.', + }, + { + 'name': 'consistent', + 'action': 'store_true', + 'help_text': '

    Performs a consistent backup.' + ' Pauses all write operations to the HBase cluster' + ' during the backup process.

    ', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -88,37 +115,54 @@ def _run_main_command(self, parsed_args, parsed_globals): jar=constants.HBASE_JAR_PATH, name=constants.HBASE_SCHEDULE_BACKUP_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) + args=args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _check_type(self, type): type = type.lower() if type != constants.FULL and type != constants.INCREMENTAL: - raise ParamValidationError('aws: error: invalid type. ' - 'type should be either ' + - constants.FULL + ' or ' + constants.INCREMENTAL + - '.') + raise ParamValidationError( + 'aws: error: invalid type. ' + 'type should be either ' + + constants.FULL + + ' or ' + + constants.INCREMENTAL + + '.' + ) def _check_unit(self, unit): unit = unit.lower() - if (unit != constants.MINUTES and - unit != constants.HOURS and - unit != constants.DAYS): + if ( + unit != constants.MINUTES + and unit != constants.HOURS + and unit != constants.DAYS + ): raise ParamValidationError( 'aws: error: invalid unit. unit should be one of' - ' the following values: ' + constants.MINUTES + - ', ' + constants.HOURS + ' or ' + constants.DAYS + '.' + ' the following values: ' + + constants.MINUTES + + ', ' + + constants.HOURS + + ' or ' + + constants.DAYS + + '.' ) def _build_hbase_schedule_backup_args(self, parsed_args): - args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP, - constants.TRUE, constants.HBASE_BACKUP_DIR, parsed_args.dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_SCHEDULED_BACKUP, + constants.TRUE, + constants.HBASE_BACKUP_DIR, + parsed_args.dir, + ] type = parsed_args.type.lower() unit = parsed_args.unit.lower() @@ -151,17 +195,28 @@ def _build_hbase_schedule_backup_args(self, parsed_args): class CreateHBaseBackup(Command): NAME = 'create-hbase-backup' - DESCRIPTION = ('Creates a HBase backup in S3. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Creates a HBase backup in S3. ' + + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'dir', 'required': True, - 'help_text': helptext.HBASE_BACKUP_DIR}, - {'name': 'consistent', 'action': 'store_true', - 'help_text': '

    Performs a consistent backup. Pauses all write' - ' operations to the HBase cluster during the backup' - ' process.

    '} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'dir', + 'required': True, + 'help_text': helptext.HBASE_BACKUP_DIR, + }, + { + 'name': 'consistent', + 'action': 'store_true', + 'help_text': '

    Performs a consistent backup. Pauses all write' + ' operations to the HBase cluster during the backup' + ' process.

    ', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -172,19 +227,23 @@ def _run_main_command(self, parsed_args, parsed_globals): jar=constants.HBASE_JAR_PATH, name=constants.HBASE_BACKUP_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) + args=args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _build_hbase_backup_args(self, parsed_args): - args = [constants.HBASE_MAIN, - constants.HBASE_BACKUP, - constants.HBASE_BACKUP_DIR, parsed_args.dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_BACKUP, + constants.HBASE_BACKUP_DIR, + parsed_args.dir, + ] if parsed_args.consistent is True: args.append(constants.HBASE_BACKUP_CONSISTENT) @@ -194,15 +253,26 @@ def _build_hbase_backup_args(self, parsed_args): class DisableHBaseBackups(Command): NAME = 'disable-hbase-backups' - DESCRIPTION = ('Add a step to disable automated HBase backups. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Add a step to disable automated HBase backups. ' + + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'full', 'action': 'store_true', - 'help_text': 'Disables full backup.'}, - {'name': 'incremental', 'action': 'store_true', - 'help_text': 'Disables incremental backup.'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'full', + 'action': 'store_true', + 'help_text': 'Disables full backup.', + }, + { + 'name': 'incremental', + 'action': 'store_true', + 'help_text': 'Disables incremental backup.', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -214,22 +284,30 @@ def _run_main_command(self, parsed_args, parsed_globals): constants.HBASE_JAR_PATH, constants.HBASE_SCHEDULE_BACKUP_STEP_NAME, constants.CANCEL_AND_WAIT, - args) + args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _build_hbase_disable_backups_args(self, parsed_args): - args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP, - constants.FALSE] + args = [ + constants.HBASE_MAIN, + constants.HBASE_SCHEDULED_BACKUP, + constants.FALSE, + ] if parsed_args.full is False and parsed_args.incremental is False: - error_message = 'Should specify at least one of --' +\ - constants.FULL + ' and --' +\ - constants.INCREMENTAL + '.' + error_message = ( + 'Should specify at least one of --' + + constants.FULL + + ' and --' + + constants.INCREMENTAL + + '.' + ) raise ParamValidationError(error_message) if parsed_args.full is True: args.append(constants.HBASE_DISABLE_FULL_BACKUP) diff --git a/awscli/customizations/emr/hbaseutils.py b/awscli/customizations/emr/hbaseutils.py index 0376dda6a4eb..e3050717fe0f 100644 --- a/awscli/customizations/emr/hbaseutils.py +++ b/awscli/customizations/emr/hbaseutils.py @@ -15,9 +15,12 @@ def build_hbase_restore_from_backup_args(dir, backup_version=None): - args = [constants.HBASE_MAIN, - constants.HBASE_RESTORE, - constants.HBASE_BACKUP_DIR, dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_RESTORE, + constants.HBASE_BACKUP_DIR, + dir, + ] if backup_version is not None: args.append(constants.HBASE_BACKUP_VERSION_FOR_RESTORE) diff --git a/awscli/customizations/emr/helptext.py b/awscli/customizations/emr/helptext.py index cf8f587bfa7a..fed90bb8266b 100755 --- a/awscli/customizations/emr/helptext.py +++ b/awscli/customizations/emr/helptext.py @@ -11,8 +11,10 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr.createdefaultroles import EMR_ROLE_NAME -from awscli.customizations.emr.createdefaultroles import EC2_ROLE_NAME +from awscli.customizations.emr.createdefaultroles import ( + EC2_ROLE_NAME, + EMR_ROLE_NAME, +) TERMINATE_CLUSTERS = ( 'Shuts down one or more clusters, each specified by cluster ID. ' @@ -27,28 +29,33 @@ 'The command is asynchronous. Depending on the ' 'configuration of the cluster, it may take from 5 to 20 minutes for the ' 'cluster to terminate completely and release allocated resources such as ' - 'Amazon EC2 instances.') + 'Amazon EC2 instances.' +) CLUSTER_ID = ( '

    A unique string that identifies a cluster. The ' 'create-cluster command returns this identifier. You can ' - 'use the list-clusters command to get cluster IDs.

    ') + 'use the list-clusters command to get cluster IDs.

    ' +) HBASE_BACKUP_DIR = ( '

    The Amazon S3 location of the Hbase backup. Example: ' 's3://mybucket/mybackup, where mybucket is the ' 'specified Amazon S3 bucket and mybackup is the specified backup ' 'location. The path argument must begin with s3://, which ' - 'refers to an Amazon S3 bucket.

    ') + 'refers to an Amazon S3 bucket.

    ' +) HBASE_BACKUP_VERSION = ( '

    The backup version to restore from. If not specified, the latest backup ' - 'in the specified location is used.

    ') + 'in the specified location is used.

    ' +) # create-cluster options help text CREATE_CLUSTER_DESCRIPTION = ( - 'Creates an Amazon EMR cluster with the specified configurations.') + 'Creates an Amazon EMR cluster with the specified configurations.' +) DESCRIBE_CLUSTER_DESCRIPTION = ( 'Provides cluster-level details including status, hardware ' @@ -58,22 +65,24 @@ 'elasticmapreduce:ListBootstrapActions, ' 'elasticmapreduce:ListInstanceFleets, ' 'elasticmapreduce:DescribeCluster, ' - 'and elasticmapreduce:ListInstanceGroups.') + 'and elasticmapreduce:ListInstanceGroups.' +) -CLUSTER_NAME = ( - '

    The name of the cluster. If not provided, the default is "Development Cluster".

    ') +CLUSTER_NAME = '

    The name of the cluster. If not provided, the default is "Development Cluster".

    ' LOG_URI = ( '

    Specifies the location in Amazon S3 to which log files ' 'are periodically written. If a value is not provided, ' 'logs files are not written to Amazon S3 from the master node ' - 'and are lost if the master node terminates.

    ') + 'and are lost if the master node terminates.

    ' +) LOG_ENCRYPTION_KMS_KEY_ID = ( '

    Specifies the KMS Id utilized for log encryption. If a value is ' 'not provided, log files will be encrypted by default encryption method ' 'AES-256. This attribute is only available with EMR version 5.30.0 and later, ' - 'excluding EMR 6.0.0.

    ') + 'excluding EMR 6.0.0.

    ' +) SERVICE_ROLE = ( '

    Specifies an IAM service role, which Amazon EMR requires to call other AWS services ' @@ -82,28 +91,32 @@ 'To specify the default service role, as well as the default instance ' 'profile, use the --use-default-roles parameter. ' 'If the role and instance profile do not already exist, use the ' - 'aws emr create-default-roles command to create them.

    ') + 'aws emr create-default-roles command to create them.

    ' +) AUTOSCALING_ROLE = ( '

    Specify --auto-scaling-role EMR_AutoScaling_DefaultRole' ' if an automatic scaling policy is specified for an instance group' ' using the --instance-groups parameter. This default' ' IAM role allows the automatic scaling feature' - ' to launch and terminate Amazon EC2 instances during scaling operations.

    ') + ' to launch and terminate Amazon EC2 instances during scaling operations.

    ' +) USE_DEFAULT_ROLES = ( '

    Specifies that the cluster should use the default' ' service role (EMR_DefaultRole) and instance profile (EMR_EC2_DefaultRole)' ' for permissions to access other AWS services.

    ' '

    Make sure that the role and instance profile exist first. To create them,' - ' use the create-default-roles command.

    ') + ' use the create-default-roles command.

    ' +) AMI_VERSION = ( '

    Applies only to Amazon EMR release versions earlier than 4.0. Use' ' --release-label for 4.0 and later. Specifies' ' the version of Amazon Linux Amazon Machine Image (AMI)' ' to use when launching Amazon EC2 instances in the cluster.' - ' For example, --ami-version 3.1.0.') + ' For example, --ami-version 3.1.0.' +) RELEASE_LABEL = ( '

    Specifies the Amazon EMR release version, which determines' @@ -115,12 +128,14 @@ '

    https://docs.aws.amazon.com/emr/latest/ReleaseGuide

    ' '

    Use --release-label only for Amazon EMR release version 4.0' ' and later. Use --ami-version for earlier versions.' - ' You cannot specify both a release label and AMI version.

    ') + ' You cannot specify both a release label and AMI version.

    ' +) OS_RELEASE_LABEL = ( '

    Specifies a particular Amazon Linux release for all nodes in a cluster' - ' launch request. If a release is not specified, EMR uses the latest validated' - ' Amazon Linux release for cluster launch.

    ') + ' launch request. If a release is not specified, EMR uses the latest validated' + ' Amazon Linux release for cluster launch.

    ' +) CONFIGURATIONS = ( '

    Specifies a JSON file that contains configuration classifications,' @@ -134,7 +149,8 @@ ' file for an application, such as yarn-site for YARN. For a list of' ' available configuration classifications and example JSON, see' ' the following topic in the Amazon EMR Release Guide:

    ' - '

    https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html

    ') + '

    https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html

    ' +) INSTANCE_GROUPS = ( '

    Specifies the number and type of Amazon EC2 instances' @@ -163,7 +179,8 @@ '

  • [EbsConfiguration] - Specifies additional Amazon EBS storage volumes attached' ' to EC2 instances using an inline JSON structure.
  • ' '
  • [AutoScalingPolicy] - Specifies an automatic scaling policy for the' - ' instance group using an inline JSON structure.
  • ') + ' instance group using an inline JSON structure.' +) INSTANCE_FLEETS = ( '

    Applies only to Amazon EMR release version 5.0 and later. Specifies' @@ -195,7 +212,8 @@ '

  • InstanceTypeConfigs - Specify up to five EC2 instance types to' ' use in the instance fleet, including details such as Spot price and Amazon EBS configuration.' ' When you use an On-Demand or Spot Instance allocation strategy,' - ' you can specify up to 30 instance types per instance fleet.
  • ') + ' you can specify up to 30 instance types per instance fleet.' +) INSTANCE_TYPE = ( '

    Shortcut parameter as an alternative to --instance-groups.' @@ -204,18 +222,21 @@ ' the cluster consists of a single master node running on the EC2 instance type' ' specified. When used together with --instance-count,' ' one instance is used for the master node, and the remainder' - ' are used for the core node type.

    ') + ' are used for the core node type.

    ' +) INSTANCE_COUNT = ( '

    Shortcut parameter as an alternative to --instance-groups' ' when used together with --instance-type. Specifies the' ' number of Amazon EC2 instances to create for a cluster.' ' One instance is used for the master node, and the remainder' - ' are used for the core node type.

    ') + ' are used for the core node type.

    ' +) ADDITIONAL_INFO = ( '

    Specifies additional information during cluster creation. To set development mode when starting your EMR cluster,' - ' set this parameter to {"clusterType":"development"}.

    ') + ' set this parameter to {"clusterType":"development"}.

    ' +) EC2_ATTRIBUTES = ( '

    Configures cluster and Amazon EC2 instance configurations. Accepts' @@ -227,10 +248,10 @@ ' For example, us-west-1b. AvailabilityZone is used for uniform instance groups,' ' while AvailabilityZones (plural) is used for instance fleets.' '

  • AvailabilityZones - Applies to clusters that use the instance fleet configuration.' - ' When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances' + ' When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances' ' in the optimal Availability Zone. AvailabilityZone is used for uniform instance groups,' ' while AvailabilityZones (plural) is used for instance fleets.
  • ' - '
  • SubnetId - Applies to clusters that use the uniform instance group configuration.' + '
  • SubnetId - Applies to clusters that use the uniform instance group configuration.' ' Specify the VPC subnet in which to create the cluster. SubnetId is used for uniform instance groups,' ' while SubnetIds (plural) is used for instance fleets.
  • ' '
  • SubnetIds - Applies to clusters that use the instance fleet configuration.' @@ -249,16 +270,19 @@ '
  • AdditionalMasterSecurityGroups - A list of additional Amazon EC2' ' security group IDs for the master node.
  • ' '
  • AdditionalSlaveSecurityGroups - A list of additional Amazon EC2' - ' security group IDs for the slave nodes.
  • ') + ' security group IDs for the slave nodes.' +) AUTO_TERMINATE = ( '

    Specifies whether the cluster should terminate after' - ' completing all the steps. Auto termination is off by default.

    ') + ' completing all the steps. Auto termination is off by default.

    ' +) TERMINATION_PROTECTED = ( '

    Specifies whether to lock the cluster to prevent the' ' Amazon EC2 instances from being terminated by API call,' - ' user intervention, or an error.

    ') + ' user intervention, or an error.

    ' +) SCALE_DOWN_BEHAVIOR = ( '

    Specifies the way that individual Amazon EC2 instances terminate' @@ -276,7 +300,8 @@ ' of the AWS account associated with the cluster. If a user' ' has the proper policy permissions set, they can also manage the cluster.

    ' '

    Visibility is on by default. The --no-visible-to-all-users option' - ' is no longer supported. To restrict cluster visibility, use an IAM policy.

    ') + ' is no longer supported. To restrict cluster visibility, use an IAM policy.

    ' +) DEBUGGING = ( '

    Specifies that the debugging tool is enabled for the cluster,' @@ -284,7 +309,8 @@ ' Turning debugging on requires that you specify --log-uri' ' because log files must be stored in Amazon S3 so that' ' Amazon EMR can index them for viewing in the console.' - ' Effective January 23, 2023, Amazon EMR will discontinue the debugging tool for all versions.

    ') + ' Effective January 23, 2023, Amazon EMR will discontinue the debugging tool for all versions.

    ' +) TAGS = ( '

    A list of tags to associate with a cluster, which apply to' @@ -294,7 +320,8 @@ ' with a maximum of 256 characters.

    ' '

    You can specify tags in key=value format or you can add a' ' tag without a value using only the key name, for example key.' - ' Use a space to separate multiple tags.

    ') + ' Use a space to separate multiple tags.

    ' +) BOOTSTRAP_ACTIONS = ( '

    Specifies a list of bootstrap actions to run on each EC2 instance when' @@ -317,7 +344,8 @@ ' to pass to the bootstrap action script. Arguments can be' ' either a list of values (Args=arg1,arg2,arg3)' ' or a list of key-value pairs, as well as optional values,' - ' enclosed in square brackets (Args=[arg1,arg2=arg2value,arg3]).') + ' enclosed in square brackets (Args=[arg1,arg2=arg2value,arg3]).' +) APPLICATIONS = ( '

    Specifies the applications to install on the cluster.' @@ -329,7 +357,8 @@ ' some applications take optional arguments for configuration.' ' Arguments should either be a comma-separated list of values' ' (Args=arg1,arg2,arg3) or a bracket-enclosed list of values' - ' and key-value pairs (Args=[arg1,arg2=arg3,arg4]).

    ') + ' and key-value pairs (Args=[arg1,arg2=arg3,arg4]).

    ' +) EMR_FS = ( '

    Specifies EMRFS configuration options, such as consistent view' @@ -340,13 +369,15 @@ ' to configure EMRFS, and use security configurations' ' to configure encryption for EMRFS data in Amazon S3 instead.' ' For more information, see the following topic in the Amazon EMR Management Guide:

    ' - '

    https://docs.aws.amazon.com/emr/latest/ManagementGuide/emrfs-configure-consistent-view.html

    ') + '

    https://docs.aws.amazon.com/emr/latest/ManagementGuide/emrfs-configure-consistent-view.html

    ' +) RESTORE_FROM_HBASE = ( '

    Applies only when using Amazon EMR release versions earlier than 4.0.' ' Launches a new HBase cluster and populates it with' ' data from a previous backup of an HBase cluster. HBase' - ' must be installed using the --applications option.

    ') + ' must be installed using the --applications option.

    ' +) STEPS = ( '

    Specifies a list of steps to be executed by the cluster. Steps run' @@ -356,27 +387,32 @@ ' or by specifying an inline JSON structure. Args supplied with steps' ' should be a comma-separated list of values (Args=arg1,arg2,arg3) or' ' a bracket-enclosed list of values and key-value' - ' pairs (Args=[arg1,arg2=value,arg4).

    ') + ' pairs (Args=[arg1,arg2=value,arg4).

    ' +) INSTALL_APPLICATIONS = ( '

    The applications to be installed.' ' Takes the following parameters: ' - 'Name and Args.

    ') + 'Name and Args.

    ' +) EBS_ROOT_VOLUME_SIZE = ( '

    This option is available only with Amazon EMR version 4.x and later. Specifies the size,' ' in GiB, of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.

    ') + ' that is used for each EC2 instance in the cluster.

    ' +) EBS_ROOT_VOLUME_IOPS = ( '

    This option is available only with Amazon EMR version 6.15.0 and later. Specifies the IOPS,' ' of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.

    ') + ' that is used for each EC2 instance in the cluster.

    ' +) EBS_ROOT_VOLUME_THROUGHPUT = ( '

    This option is available only with Amazon EMR version 6.15.0 and later. Specifies the throughput,' ' in MiB/s, of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.

    ') + ' that is used for each EC2 instance in the cluster.

    ' +) SECURITY_CONFIG = ( @@ -386,7 +422,8 @@ ' the following topic in the Amazon EMR Management Guide:

    ' '

    https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-encryption-enable-security-configuration.html

    ' '

    Use list-security-configurations to get a list of available' - ' security configurations in the active account.

    ') + ' security configurations in the active account.

    ' +) CUSTOM_AMI_ID = ( '

    Applies only to Amazon EMR release version 5.7.0 and later.' @@ -396,7 +433,8 @@ ' can also be used instead of bootstrap actions to customize' ' cluster node configurations. For more information, see' ' the following topic in the Amazon EMR Management Guide:

    ' - '

    https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html

    ') + '

    https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html

    ' +) REPO_UPGRADE_ON_BOOT = ( '

    Applies only when a --custom-ami-id is' @@ -405,24 +443,26 @@ ' before other services start. You can set this parameter' ' using --rep-upgrade-on-boot NONE to' ' disable these updates. CAUTION: This creates additional' - ' security risks.

    ') + ' security risks.

    ' +) KERBEROS_ATTRIBUTES = ( - '

    Specifies required cluster attributes for Kerberos when Kerberos authentication' - ' is enabled in the specified --security-configuration.' - ' Takes the following arguments:

    ' - '
  • Realm - Specifies the name of the Kerberos' - ' realm to which all nodes in a cluster belong. For example,' - ' Realm=EC2.INTERNAL.
  • ' - '
  • KdcAdminPassword - Specifies the password used within the cluster' - ' for the kadmin service, which maintains Kerberos principals, password' - ' policies, and keytabs for the cluster.
  • ' - '
  • CrossRealmTrustPrincipalPassword - Required when establishing a cross-realm trust' - ' with a KDC in a different realm. This is the cross-realm principal password,' - ' which must be identical across realms.
  • ' - '
  • ADDomainJoinUser - Required when establishing trust with an Active Directory' - ' domain. This is the User logon name of an AD account with sufficient privileges to join resources to the domain.
  • ' - '
  • ADDomainJoinPassword - The AD password for ADDomainJoinUser.
  • ') + '

    Specifies required cluster attributes for Kerberos when Kerberos authentication' + ' is enabled in the specified --security-configuration.' + ' Takes the following arguments:

    ' + '
  • Realm - Specifies the name of the Kerberos' + ' realm to which all nodes in a cluster belong. For example,' + ' Realm=EC2.INTERNAL.
  • ' + '
  • KdcAdminPassword - Specifies the password used within the cluster' + ' for the kadmin service, which maintains Kerberos principals, password' + ' policies, and keytabs for the cluster.
  • ' + '
  • CrossRealmTrustPrincipalPassword - Required when establishing a cross-realm trust' + ' with a KDC in a different realm. This is the cross-realm principal password,' + ' which must be identical across realms.
  • ' + '
  • ADDomainJoinUser - Required when establishing trust with an Active Directory' + ' domain. This is the User logon name of an AD account with sufficient privileges to join resources to the domain.
  • ' + '
  • ADDomainJoinPassword - The AD password for ADDomainJoinUser.
  • ' +) # end create-cluster options help descriptions @@ -437,7 +477,8 @@ '
  • WAITING
  • ' '
  • TERMINATING
  • ' '
  • TERMINATED
  • ' - '
  • TERMINATED_WITH_ERRORS
  • ') + '
  • TERMINATED_WITH_ERRORS
  • ' +) LIST_CLUSTERS_STATE_FILTERS = ( '

    Shortcut options for --cluster-states. The' @@ -446,41 +487,50 @@ ' are STARTING,BOOTSTRAPPING,' ' RUNNING, WAITING, or TERMINATING. ' '

  • --terminated - list only clusters that are TERMINATED.
  • ' - '
  • --failed - list only clusters that are TERMINATED_WITH_ERRORS.
  • ') + '
  • --failed - list only clusters that are TERMINATED_WITH_ERRORS.
  • ' +) LIST_CLUSTERS_CREATED_AFTER = ( '

    List only those clusters created after the date and time' ' specified in the format yyyy-mm-ddThh:mm:ss. For example,' - ' --created-after 2017-07-04T00:01:30.

    ') + ' --created-after 2017-07-04T00:01:30.

    ' +) LIST_CLUSTERS_CREATED_BEFORE = ( '

    List only those clusters created before the date and time' ' specified in the format yyyy-mm-ddThh:mm:ss. For example,' - ' --created-before 2017-07-04T00:01:30.

    ') + ' --created-before 2017-07-04T00:01:30.

    ' +) EMR_MANAGED_MASTER_SECURITY_GROUP = ( '

    The identifier of the Amazon EC2 security group ' - 'for the master node.

    ') + 'for the master node.

    ' +) EMR_MANAGED_SLAVE_SECURITY_GROUP = ( '

    The identifier of the Amazon EC2 security group ' - 'for the slave nodes.

    ') + 'for the slave nodes.

    ' +) SERVICE_ACCESS_SECURITY_GROUP = ( '

    The identifier of the Amazon EC2 security group ' - 'for Amazon EMR to access clusters in VPC private subnets.

    ') + 'for Amazon EMR to access clusters in VPC private subnets.

    ' +) ADDITIONAL_MASTER_SECURITY_GROUPS = ( '

    A list of additional Amazon EC2 security group IDs for ' - 'the master node

    ') + 'the master node

    ' +) ADDITIONAL_SLAVE_SECURITY_GROUPS = ( '

    A list of additional Amazon EC2 security group IDs for ' - 'the slave nodes.

    ') + 'the slave nodes.

    ' +) AVAILABLE_ONLY_FOR_AMI_VERSIONS = ( 'This command is only available when using Amazon EMR versions' - 'earlier than 4.0.') + 'earlier than 4.0.' +) STEP_CONCURRENCY_LEVEL = ( 'This command specifies the step concurrency level of the cluster.' @@ -498,10 +548,10 @@ ) PLACEMENT_GROUP_CONFIGS = ( - '

    Placement group configuration for an Amazon EMR ' - 'cluster. The configuration specifies the EC2 placement group ' - 'strategy associated with each EMR Instance Role.

    ' - '

    Currently, we support placement group only for MASTER ' + '

    Placement group configuration for an Amazon EMR ' + 'cluster. The configuration specifies the EC2 placement group ' + 'strategy associated with each EMR Instance Role.

    ' + '

    Currently, we support placement group only for MASTER ' 'role with SPREAD strategy by default. You can opt-in by ' 'passing --placement-group-configs InstanceRole=MASTER ' 'during cluster creation.

    ' diff --git a/awscli/customizations/emr/installapplications.py b/awscli/customizations/emr/installapplications.py index e73e57b9d22e..6212a6b14c99 100644 --- a/awscli/customizations/emr/installapplications.py +++ b/awscli/customizations/emr/installapplications.py @@ -12,41 +12,52 @@ # language governing permissions and limitations under the License. -from awscli.customizations.emr import applicationutils -from awscli.customizations.emr import argumentschema -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext +from awscli.customizations.emr import ( + applicationutils, + argumentschema, + constants, + emrutils, + helptext, +) from awscli.customizations.emr.command import Command from awscli.customizations.exceptions import ParamValidationError class InstallApplications(Command): NAME = 'install-applications' - DESCRIPTION = ('Installs applications on a running cluster. Currently only' - ' Hive and Pig can be installed using this command, and' - ' this command is only supported by AMI versions' - ' (3.x and 2.x).') + DESCRIPTION = ( + 'Installs applications on a running cluster. Currently only' + ' Hive and Pig can be installed using this command, and' + ' this command is only supported by AMI versions' + ' (3.x and 2.x).' + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'applications', 'required': True, - 'help_text': helptext.INSTALL_APPLICATIONS, - 'schema': argumentschema.APPLICATIONS_SCHEMA}, + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'applications', + 'required': True, + 'help_text': helptext.INSTALL_APPLICATIONS, + 'schema': argumentschema.APPLICATIONS_SCHEMA, + }, ] # Applications supported by the install-applications command. supported_apps = ['HIVE', 'PIG'] def _run_main_command(self, parsed_args, parsed_globals): - parameters = {'JobFlowId': parsed_args.cluster_id} self._check_for_supported_apps(parsed_args.applications) parameters['Steps'] = applicationutils.build_applications( - self.region, parsed_args.applications)[2] + self.region, parsed_args.applications + )[2] - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _check_for_supported_apps(self, parsed_applications): @@ -58,10 +69,12 @@ def _check_for_supported_apps(self, parsed_applications): raise ParamValidationError( "aws: error: " + app_config['Name'] + " cannot be" " installed on a running cluster. 'Name' should be one" - " of the following: " + - ', '.join(self.supported_apps)) + " of the following: " + ', '.join(self.supported_apps) + ) else: raise ParamValidationError( - "aws: error: Unknown application: " + app_config['Name'] + - ". 'Name' should be one of the following: " + - ', '.join(constants.APPLICATIONS)) + "aws: error: Unknown application: " + + app_config['Name'] + + ". 'Name' should be one of the following: " + + ', '.join(constants.APPLICATIONS) + ) diff --git a/awscli/customizations/emr/instancefleetsutils.py b/awscli/customizations/emr/instancefleetsutils.py index 02d8f0b7a826..4c60599487d3 100644 --- a/awscli/customizations/emr/instancefleetsutils.py +++ b/awscli/customizations/emr/instancefleetsutils.py @@ -11,8 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, exceptions def validate_and_build_instance_fleets(parsed_instance_fleets): @@ -31,41 +30,71 @@ def validate_and_build_instance_fleets(parsed_instance_fleets): instance_fleet_config['Name'] = instance_fleet['Name'] else: instance_fleet_config['Name'] = instance_fleet['InstanceFleetType'] - instance_fleet_config['InstanceFleetType'] = instance_fleet['InstanceFleetType'] + instance_fleet_config['InstanceFleetType'] = instance_fleet[ + 'InstanceFleetType' + ] if 'TargetOnDemandCapacity' in keys: - instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet['TargetOnDemandCapacity'] + instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet[ + 'TargetOnDemandCapacity' + ] if 'TargetSpotCapacity' in keys: - instance_fleet_config['TargetSpotCapacity'] = instance_fleet['TargetSpotCapacity'] + instance_fleet_config['TargetSpotCapacity'] = instance_fleet[ + 'TargetSpotCapacity' + ] if 'InstanceTypeConfigs' in keys: - instance_fleet_config['InstanceTypeConfigs'] = instance_fleet['InstanceTypeConfigs'] + instance_fleet_config['InstanceTypeConfigs'] = instance_fleet[ + 'InstanceTypeConfigs' + ] if 'LaunchSpecifications' in keys: - instanceFleetProvisioningSpecifications = instance_fleet['LaunchSpecifications'] + instanceFleetProvisioningSpecifications = instance_fleet[ + 'LaunchSpecifications' + ] instance_fleet_config['LaunchSpecifications'] = {} if 'SpotSpecification' in instanceFleetProvisioningSpecifications: - instance_fleet_config['LaunchSpecifications']['SpotSpecification'] = \ - instanceFleetProvisioningSpecifications['SpotSpecification'] + instance_fleet_config['LaunchSpecifications'][ + 'SpotSpecification' + ] = instanceFleetProvisioningSpecifications[ + 'SpotSpecification' + ] - if 'OnDemandSpecification' in instanceFleetProvisioningSpecifications: - instance_fleet_config['LaunchSpecifications']['OnDemandSpecification'] = \ - instanceFleetProvisioningSpecifications['OnDemandSpecification'] + if ( + 'OnDemandSpecification' + in instanceFleetProvisioningSpecifications + ): + instance_fleet_config['LaunchSpecifications'][ + 'OnDemandSpecification' + ] = instanceFleetProvisioningSpecifications[ + 'OnDemandSpecification' + ] if 'ResizeSpecifications' in keys: - instanceFleetResizeSpecifications = instance_fleet['ResizeSpecifications'] + instanceFleetResizeSpecifications = instance_fleet[ + 'ResizeSpecifications' + ] instance_fleet_config['ResizeSpecifications'] = {} if 'SpotResizeSpecification' in instanceFleetResizeSpecifications: - instance_fleet_config['ResizeSpecifications']['SpotResizeSpecification'] = \ - instanceFleetResizeSpecifications['SpotResizeSpecification'] + instance_fleet_config['ResizeSpecifications'][ + 'SpotResizeSpecification' + ] = instanceFleetResizeSpecifications[ + 'SpotResizeSpecification' + ] + + if ( + 'OnDemandResizeSpecification' + in instanceFleetResizeSpecifications + ): + instance_fleet_config['ResizeSpecifications'][ + 'OnDemandResizeSpecification' + ] = instanceFleetResizeSpecifications[ + 'OnDemandResizeSpecification' + ] - if 'OnDemandResizeSpecification' in instanceFleetResizeSpecifications: - instance_fleet_config['ResizeSpecifications']['OnDemandResizeSpecification'] = \ - instanceFleetResizeSpecifications['OnDemandResizeSpecification'] - if 'Context' in keys: instance_fleet_config['Context'] = instance_fleet['Context'] diff --git a/awscli/customizations/emr/instancegroupsutils.py b/awscli/customizations/emr/instancegroupsutils.py index 258032fef502..470b5eb50bc5 100644 --- a/awscli/customizations/emr/instancegroupsutils.py +++ b/awscli/customizations/emr/instancegroupsutils.py @@ -11,8 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, exceptions def build_instance_groups(parsed_instance_groups): @@ -44,7 +43,9 @@ def build_instance_groups(parsed_instance_groups): ig_config['EbsConfiguration'] = instance_group['EbsConfiguration'] if 'AutoScalingPolicy' in keys: - ig_config['AutoScalingPolicy'] = instance_group['AutoScalingPolicy'] + ig_config['AutoScalingPolicy'] = instance_group[ + 'AutoScalingPolicy' + ] if 'Configurations' in keys: ig_config['Configurations'] = instance_group['Configurations'] @@ -56,8 +57,7 @@ def build_instance_groups(parsed_instance_groups): return instance_groups -def _build_instance_group( - instance_type, instance_count, instance_group_type): +def _build_instance_group(instance_type, instance_count, instance_group_type): ig_config = {} ig_config['InstanceType'] = instance_type ig_config['InstanceCount'] = instance_count @@ -68,13 +68,14 @@ def _build_instance_group( def validate_and_build_instance_groups( - instance_groups, instance_type, instance_count): - if (instance_groups is None and instance_type is None): + instance_groups, instance_type, instance_count +): + if instance_groups is None and instance_type is None: raise exceptions.MissingRequiredInstanceGroupsError - if (instance_groups is not None and - (instance_type is not None or - instance_count is not None)): + if instance_groups is not None and ( + instance_type is not None or instance_count is not None + ): raise exceptions.InstanceGroupsValidationError if instance_groups is not None: @@ -84,13 +85,15 @@ def validate_and_build_instance_groups( master_ig = _build_instance_group( instance_type=instance_type, instance_count=1, - instance_group_type="MASTER") + instance_group_type="MASTER", + ) instance_groups.append(master_ig) if instance_count is not None and int(instance_count) > 1: core_ig = _build_instance_group( instance_type=instance_type, instance_count=int(instance_count) - 1, - instance_group_type="CORE") + instance_group_type="CORE", + ) instance_groups.append(core_ig) return instance_groups diff --git a/awscli/customizations/emr/listclusters.py b/awscli/customizations/emr/listclusters.py index 04b69f3f57ae..cec17e9c78e7 100644 --- a/awscli/customizations/emr/listclusters.py +++ b/awscli/customizations/emr/listclusters.py @@ -13,41 +13,46 @@ from awscli.arguments import CustomArgument -from awscli.customizations.emr import helptext -from awscli.customizations.emr import exceptions -from awscli.customizations.emr import constants +from awscli.customizations.emr import constants, exceptions, helptext def modify_list_clusters_argument(argument_table, **kwargs): - argument_table['cluster-states'] = \ - ClusterStatesArgument( - name='cluster-states', - help_text=helptext.LIST_CLUSTERS_CLUSTER_STATES, - nargs='+') - argument_table['active'] = \ - ActiveStateArgument( - name='active', help_text=helptext.LIST_CLUSTERS_STATE_FILTERS, - action='store_true', group_name='states_filter') - argument_table['terminated'] = \ - TerminatedStateArgument( - name='terminated', - action='store_true', group_name='states_filter') - argument_table['failed'] = \ - FailedStateArgument( - name='failed', action='store_true', group_name='states_filter') + argument_table['cluster-states'] = ClusterStatesArgument( + name='cluster-states', + help_text=helptext.LIST_CLUSTERS_CLUSTER_STATES, + nargs='+', + ) + argument_table['active'] = ActiveStateArgument( + name='active', + help_text=helptext.LIST_CLUSTERS_STATE_FILTERS, + action='store_true', + group_name='states_filter', + ) + argument_table['terminated'] = TerminatedStateArgument( + name='terminated', action='store_true', group_name='states_filter' + ) + argument_table['failed'] = FailedStateArgument( + name='failed', action='store_true', group_name='states_filter' + ) argument_table['created-before'] = CreatedBefore( - name='created-before', help_text=helptext.LIST_CLUSTERS_CREATED_BEFORE, - cli_type_name='timestamp') + name='created-before', + help_text=helptext.LIST_CLUSTERS_CREATED_BEFORE, + cli_type_name='timestamp', + ) argument_table['created-after'] = CreatedAfter( - name='created-after', help_text=helptext.LIST_CLUSTERS_CREATED_AFTER, - cli_type_name='timestamp') + name='created-after', + help_text=helptext.LIST_CLUSTERS_CREATED_AFTER, + cli_type_name='timestamp', + ) class ClusterStatesArgument(CustomArgument): def add_to_params(self, parameters, value): if value is not None: - if (parameters.get('ClusterStates') is not None and - len(parameters.get('ClusterStates')) > 0): + if ( + parameters.get('ClusterStates') is not None + and len(parameters.get('ClusterStates')) > 0 + ): raise exceptions.ClusterStatesFilterValidationError() parameters['ClusterStates'] = value @@ -55,8 +60,10 @@ def add_to_params(self, parameters, value): class ActiveStateArgument(CustomArgument): def add_to_params(self, parameters, value): if value is True: - if (parameters.get('ClusterStates') is not None and - len(parameters.get('ClusterStates')) > 0): + if ( + parameters.get('ClusterStates') is not None + and len(parameters.get('ClusterStates')) > 0 + ): raise exceptions.ClusterStatesFilterValidationError() parameters['ClusterStates'] = constants.LIST_CLUSTERS_ACTIVE_STATES @@ -64,18 +71,23 @@ def add_to_params(self, parameters, value): class TerminatedStateArgument(CustomArgument): def add_to_params(self, parameters, value): if value is True: - if (parameters.get('ClusterStates') is not None and - len(parameters.get('ClusterStates')) > 0): + if ( + parameters.get('ClusterStates') is not None + and len(parameters.get('ClusterStates')) > 0 + ): raise exceptions.ClusterStatesFilterValidationError() - parameters['ClusterStates'] = \ + parameters['ClusterStates'] = ( constants.LIST_CLUSTERS_TERMINATED_STATES + ) class FailedStateArgument(CustomArgument): def add_to_params(self, parameters, value): if value is True: - if (parameters.get('ClusterStates') is not None and - len(parameters.get('ClusterStates')) > 0): + if ( + parameters.get('ClusterStates') is not None + and len(parameters.get('ClusterStates')) > 0 + ): raise exceptions.ClusterStatesFilterValidationError() parameters['ClusterStates'] = constants.LIST_CLUSTERS_FAILED_STATES diff --git a/awscli/customizations/emr/modifyclusterattributes.py b/awscli/customizations/emr/modifyclusterattributes.py index 888dce8489d7..c5e6035c5a64 100644 --- a/awscli/customizations/emr/modifyclusterattributes.py +++ b/awscli/customizations/emr/modifyclusterattributes.py @@ -11,103 +11,176 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions -from awscli.customizations.emr import helptext +from awscli.customizations.emr import emrutils, exceptions, helptext from awscli.customizations.emr.command import Command class ModifyClusterAttr(Command): NAME = 'modify-cluster-attributes' - DESCRIPTION = ("Modifies the cluster attributes 'visible-to-all-users', " - " 'termination-protected' and 'unhealthy-node-replacement'.") + DESCRIPTION = ( + "Modifies the cluster attributes 'visible-to-all-users', " + " 'termination-protected' and 'unhealthy-node-replacement'." + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'visible-to-all-users', 'required': False, 'action': - 'store_true', 'group_name': 'visible', - 'help_text': helptext.VISIBILITY}, - {'name': 'no-visible-to-all-users', 'required': False, 'action': - 'store_true', 'group_name': 'visible', - 'help_text': helptext.VISIBILITY}, - {'name': 'termination-protected', 'required': False, 'action': - 'store_true', 'group_name': 'terminate', - 'help_text': 'Set termination protection on or off'}, - {'name': 'no-termination-protected', 'required': False, 'action': - 'store_true', 'group_name': 'terminate', - 'help_text': 'Set termination protection on or off'}, - {'name': 'auto-terminate', 'required': False, 'action': - 'store_true', 'group_name': 'auto_terminate', - 'help_text': 'Set cluster auto terminate after completing all the steps on or off'}, - {'name': 'no-auto-terminate', 'required': False, 'action': - 'store_true', 'group_name': 'auto_terminate', - 'help_text': 'Set cluster auto terminate after completing all the steps on or off'}, - {'name': 'unhealthy-node-replacement', 'required': False, 'action': - 'store_true', 'group_name': 'UnhealthyReplacement', - 'help_text': 'Set Unhealthy Node Replacement on or off'}, - {'name': 'no-unhealthy-node-replacement', 'required': False, 'action': - 'store_true', 'group_name': 'UnhealthyReplacement', - 'help_text': 'Set Unhealthy Node Replacement on or off'}, + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'visible-to-all-users', + 'required': False, + 'action': 'store_true', + 'group_name': 'visible', + 'help_text': helptext.VISIBILITY, + }, + { + 'name': 'no-visible-to-all-users', + 'required': False, + 'action': 'store_true', + 'group_name': 'visible', + 'help_text': helptext.VISIBILITY, + }, + { + 'name': 'termination-protected', + 'required': False, + 'action': 'store_true', + 'group_name': 'terminate', + 'help_text': 'Set termination protection on or off', + }, + { + 'name': 'no-termination-protected', + 'required': False, + 'action': 'store_true', + 'group_name': 'terminate', + 'help_text': 'Set termination protection on or off', + }, + { + 'name': 'auto-terminate', + 'required': False, + 'action': 'store_true', + 'group_name': 'auto_terminate', + 'help_text': 'Set cluster auto terminate after completing all the steps on or off', + }, + { + 'name': 'no-auto-terminate', + 'required': False, + 'action': 'store_true', + 'group_name': 'auto_terminate', + 'help_text': 'Set cluster auto terminate after completing all the steps on or off', + }, + { + 'name': 'unhealthy-node-replacement', + 'required': False, + 'action': 'store_true', + 'group_name': 'UnhealthyReplacement', + 'help_text': 'Set Unhealthy Node Replacement on or off', + }, + { + 'name': 'no-unhealthy-node-replacement', + 'required': False, + 'action': 'store_true', + 'group_name': 'UnhealthyReplacement', + 'help_text': 'Set Unhealthy Node Replacement on or off', + }, ] def _run_main_command(self, args, parsed_globals): - - if (args.visible_to_all_users and args.no_visible_to_all_users): + if args.visible_to_all_users and args.no_visible_to_all_users: raise exceptions.MutualExclusiveOptionError( option1='--visible-to-all-users', - option2='--no-visible-to-all-users') - if (args.termination_protected and args.no_termination_protected): + option2='--no-visible-to-all-users', + ) + if args.termination_protected and args.no_termination_protected: raise exceptions.MutualExclusiveOptionError( option1='--termination-protected', - option2='--no-termination-protected') - if (args.auto_terminate and args.no_auto_terminate): + option2='--no-termination-protected', + ) + if args.auto_terminate and args.no_auto_terminate: raise exceptions.MutualExclusiveOptionError( - option1='--auto-terminate', - option2='--no-auto-terminate') - if (args.unhealthy_node_replacement and args.no_unhealthy_node_replacement): + option1='--auto-terminate', option2='--no-auto-terminate' + ) + if ( + args.unhealthy_node_replacement + and args.no_unhealthy_node_replacement + ): raise exceptions.MutualExclusiveOptionError( option1='--unhealthy-node-replacement', - option2='--no-unhealthy-node-replacement') - if not(args.termination_protected or args.no_termination_protected or - args.visible_to_all_users or args.no_visible_to_all_users or - args.auto_terminate or args.no_auto_terminate or - args.unhealthy_node_replacement or args.no_unhealthy_node_replacement): + option2='--no-unhealthy-node-replacement', + ) + if not ( + args.termination_protected + or args.no_termination_protected + or args.visible_to_all_users + or args.no_visible_to_all_users + or args.auto_terminate + or args.no_auto_terminate + or args.unhealthy_node_replacement + or args.no_unhealthy_node_replacement + ): raise exceptions.MissingClusterAttributesError() - if (args.visible_to_all_users or args.no_visible_to_all_users): - visible = (args.visible_to_all_users and - not args.no_visible_to_all_users) - parameters = {'JobFlowIds': [args.cluster_id], - 'VisibleToAllUsers': visible} - emrutils.call_and_display_response(self._session, - 'SetVisibleToAllUsers', - parameters, parsed_globals) + if args.visible_to_all_users or args.no_visible_to_all_users: + visible = ( + args.visible_to_all_users and not args.no_visible_to_all_users + ) + parameters = { + 'JobFlowIds': [args.cluster_id], + 'VisibleToAllUsers': visible, + } + emrutils.call_and_display_response( + self._session, + 'SetVisibleToAllUsers', + parameters, + parsed_globals, + ) + + if args.termination_protected or args.no_termination_protected: + protected = ( + args.termination_protected + and not args.no_termination_protected + ) + parameters = { + 'JobFlowIds': [args.cluster_id], + 'TerminationProtected': protected, + } + emrutils.call_and_display_response( + self._session, + 'SetTerminationProtection', + parameters, + parsed_globals, + ) - if (args.termination_protected or args.no_termination_protected): - protected = (args.termination_protected and - not args.no_termination_protected) - parameters = {'JobFlowIds': [args.cluster_id], - 'TerminationProtected': protected} - emrutils.call_and_display_response(self._session, - 'SetTerminationProtection', - parameters, parsed_globals) + if args.auto_terminate or args.no_auto_terminate: + auto_terminate = args.auto_terminate and not args.no_auto_terminate + parameters = { + 'JobFlowIds': [args.cluster_id], + 'KeepJobFlowAliveWhenNoSteps': not auto_terminate, + } + emrutils.call_and_display_response( + self._session, + 'SetKeepJobFlowAliveWhenNoSteps', + parameters, + parsed_globals, + ) - if (args.auto_terminate or args.no_auto_terminate): - auto_terminate = (args.auto_terminate and - not args.no_auto_terminate) - parameters = {'JobFlowIds': [args.cluster_id], - 'KeepJobFlowAliveWhenNoSteps': not auto_terminate} - emrutils.call_and_display_response(self._session, - 'SetKeepJobFlowAliveWhenNoSteps', - parameters, parsed_globals) - - if (args.unhealthy_node_replacement or args.no_unhealthy_node_replacement): - protected = (args.unhealthy_node_replacement and - not args.no_unhealthy_node_replacement) - parameters = {'JobFlowIds': [args.cluster_id], - 'UnhealthyNodeReplacement': protected} - emrutils.call_and_display_response(self._session, - 'SetUnhealthyNodeReplacement', - parameters, parsed_globals) + if ( + args.unhealthy_node_replacement + or args.no_unhealthy_node_replacement + ): + protected = ( + args.unhealthy_node_replacement + and not args.no_unhealthy_node_replacement + ) + parameters = { + 'JobFlowIds': [args.cluster_id], + 'UnhealthyNodeReplacement': protected, + } + emrutils.call_and_display_response( + self._session, + 'SetUnhealthyNodeReplacement', + parameters, + parsed_globals, + ) return 0 diff --git a/awscli/customizations/emr/ssh.py b/awscli/customizations/emr/ssh.py index ae4cb71ceb17..3f2d3edbfa30 100644 --- a/awscli/customizations/emr/ssh.py +++ b/awscli/customizations/emr/ssh.py @@ -15,25 +15,33 @@ import subprocess import tempfile -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import sshutils +from awscli.customizations.emr import constants, emrutils, sshutils from awscli.customizations.emr.command import Command -KEY_PAIR_FILE_HELP_TEXT = '\nA value for the variable Key Pair File ' \ - 'can be set in the AWS CLI config file using the ' \ +KEY_PAIR_FILE_HELP_TEXT = ( + '\nA value for the variable Key Pair File ' + 'can be set in the AWS CLI config file using the ' '"aws configure set emr.key_pair_file " command.\n' +) class Socks(Command): NAME = 'socks' - DESCRIPTION = ('Create a socks tunnel on port 8157 from your machine ' - 'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT) + DESCRIPTION = ( + 'Create a socks tunnel on port 8157 from your machine ' + 'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': 'Cluster Id of cluster you want to ssh into'}, - {'name': 'key-pair-file', 'required': True, - 'help_text': 'Private key file to use for login'}, + { + 'name': 'cluster-id', + 'required': True, + 'help_text': 'Cluster Id of cluster you want to ssh into', + }, + { + 'name': 'key-pair-file', + 'required': True, + 'help_text': 'Private key file to use for login', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -41,20 +49,36 @@ def _run_main_command(self, parsed_args, parsed_globals): master_dns = sshutils.validate_and_find_master_dns( session=self._session, parsed_globals=parsed_globals, - cluster_id=parsed_args.cluster_id) + cluster_id=parsed_args.cluster_id, + ) key_file = parsed_args.key_pair_file sshutils.validate_ssh_with_key_file(key_file) f = tempfile.NamedTemporaryFile(delete=False) - if (emrutils.which('ssh') or emrutils.which('ssh.exe')): - command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o', - 'ServerAliveInterval=10', '-ND', '8157', '-i', - parsed_args.key_pair_file, constants.SSH_USER + - '@' + master_dns] + if emrutils.which('ssh') or emrutils.which('ssh.exe'): + command = [ + 'ssh', + '-o', + 'StrictHostKeyChecking=no', + '-o', + 'ServerAliveInterval=10', + '-ND', + '8157', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns, + ] else: - command = ['putty', '-ssh', '-i', parsed_args.key_pair_file, - constants.SSH_USER + '@' + master_dns, '-N', '-D', - '8157'] + command = [ + 'putty', + '-ssh', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns, + '-N', + '-D', + '8157', + ] print(' '.join(command)) rc = subprocess.call(command) @@ -66,35 +90,56 @@ def _run_main_command(self, parsed_args, parsed_globals): class SSH(Command): NAME = 'ssh' - DESCRIPTION = ('SSH into master node of the cluster.\n%s' % - KEY_PAIR_FILE_HELP_TEXT) + DESCRIPTION = ( + 'SSH into master node of the cluster.\n%s' % KEY_PAIR_FILE_HELP_TEXT + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': 'Cluster Id of cluster you want to ssh into'}, - {'name': 'key-pair-file', 'required': True, - 'help_text': 'Private key file to use for login'}, - {'name': 'command', 'help_text': 'Command to execute on Master Node'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': 'Cluster Id of cluster you want to ssh into', + }, + { + 'name': 'key-pair-file', + 'required': True, + 'help_text': 'Private key file to use for login', + }, + {'name': 'command', 'help_text': 'Command to execute on Master Node'}, ] def _run_main_command(self, parsed_args, parsed_globals): master_dns = sshutils.validate_and_find_master_dns( session=self._session, parsed_globals=parsed_globals, - cluster_id=parsed_args.cluster_id) + cluster_id=parsed_args.cluster_id, + ) key_file = parsed_args.key_pair_file sshutils.validate_ssh_with_key_file(key_file) f = tempfile.NamedTemporaryFile(delete=False) - if (emrutils.which('ssh') or emrutils.which('ssh.exe')): - command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o', - 'ServerAliveInterval=10', '-i', - parsed_args.key_pair_file, constants.SSH_USER + - '@' + master_dns, '-t'] + if emrutils.which('ssh') or emrutils.which('ssh.exe'): + command = [ + 'ssh', + '-o', + 'StrictHostKeyChecking=no', + '-o', + 'ServerAliveInterval=10', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns, + '-t', + ] if parsed_args.command: command.append(parsed_args.command) else: - command = ['putty', '-ssh', '-i', parsed_args.key_pair_file, - constants.SSH_USER + '@' + master_dns, '-t'] + command = [ + 'putty', + '-ssh', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns, + '-t', + ] if parsed_args.command: f.write(parsed_args.command) f.write('\nread -n1 -r -p "Command completed. Press any key."') @@ -110,33 +155,57 @@ def _run_main_command(self, parsed_args, parsed_globals): class Put(Command): NAME = 'put' - DESCRIPTION = ('Put file onto the master node.\n%s' % - KEY_PAIR_FILE_HELP_TEXT) + DESCRIPTION = ( + 'Put file onto the master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': 'Cluster Id of cluster you want to put file onto'}, - {'name': 'key-pair-file', 'required': True, - 'help_text': 'Private key file to use for login'}, - {'name': 'src', 'required': True, - 'help_text': 'Source file path on local machine'}, - {'name': 'dest', 'help_text': 'Destination file path on remote host'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': 'Cluster Id of cluster you want to put file onto', + }, + { + 'name': 'key-pair-file', + 'required': True, + 'help_text': 'Private key file to use for login', + }, + { + 'name': 'src', + 'required': True, + 'help_text': 'Source file path on local machine', + }, + {'name': 'dest', 'help_text': 'Destination file path on remote host'}, ] def _run_main_command(self, parsed_args, parsed_globals): master_dns = sshutils.validate_and_find_master_dns( session=self._session, parsed_globals=parsed_globals, - cluster_id=parsed_args.cluster_id) + cluster_id=parsed_args.cluster_id, + ) key_file = parsed_args.key_pair_file sshutils.validate_scp_with_key_file(key_file) - if (emrutils.which('scp') or emrutils.which('scp.exe')): - command = ['scp', '-r', '-o StrictHostKeyChecking=no', - '-i', parsed_args.key_pair_file, parsed_args.src, - constants.SSH_USER + '@' + master_dns] + if emrutils.which('scp') or emrutils.which('scp.exe'): + command = [ + 'scp', + '-r', + '-o StrictHostKeyChecking=no', + '-i', + parsed_args.key_pair_file, + parsed_args.src, + constants.SSH_USER + '@' + master_dns, + ] else: - command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file, - parsed_args.src, constants.SSH_USER + '@' + master_dns] + command = [ + 'pscp', + '-scp', + '-r', + '-i', + parsed_args.key_pair_file, + parsed_args.src, + constants.SSH_USER + '@' + master_dns, + ] # if the instance is not terminated if parsed_args.dest: @@ -150,33 +219,53 @@ def _run_main_command(self, parsed_args, parsed_globals): class Get(Command): NAME = 'get' - DESCRIPTION = ('Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT) + DESCRIPTION = 'Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': 'Cluster Id of cluster you want to get file from'}, - {'name': 'key-pair-file', 'required': True, - 'help_text': 'Private key file to use for login'}, - {'name': 'src', 'required': True, - 'help_text': 'Source file path on remote host'}, - {'name': 'dest', 'help_text': 'Destination file path on your machine'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': 'Cluster Id of cluster you want to get file from', + }, + { + 'name': 'key-pair-file', + 'required': True, + 'help_text': 'Private key file to use for login', + }, + { + 'name': 'src', + 'required': True, + 'help_text': 'Source file path on remote host', + }, + {'name': 'dest', 'help_text': 'Destination file path on your machine'}, ] def _run_main_command(self, parsed_args, parsed_globals): master_dns = sshutils.validate_and_find_master_dns( session=self._session, parsed_globals=parsed_globals, - cluster_id=parsed_args.cluster_id) + cluster_id=parsed_args.cluster_id, + ) key_file = parsed_args.key_pair_file sshutils.validate_scp_with_key_file(key_file) - if (emrutils.which('scp') or emrutils.which('scp.exe')): - command = ['scp', '-r', '-o StrictHostKeyChecking=no', '-i', - parsed_args.key_pair_file, constants.SSH_USER + '@' + - master_dns + ':' + parsed_args.src] + if emrutils.which('scp') or emrutils.which('scp.exe'): + command = [ + 'scp', + '-r', + '-o StrictHostKeyChecking=no', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns + ':' + parsed_args.src, + ] else: - command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file, - constants.SSH_USER + '@' + master_dns + ':' + - parsed_args.src] + command = [ + 'pscp', + '-scp', + '-r', + '-i', + parsed_args.key_pair_file, + constants.SSH_USER + '@' + master_dns + ':' + parsed_args.src, + ] if parsed_args.dest: command.append(parsed_args.dest) diff --git a/awscli/customizations/emr/sshutils.py b/awscli/customizations/emr/sshutils.py index 443f64b472d0..81d8b3fa9626 100644 --- a/awscli/customizations/emr/sshutils.py +++ b/awscli/customizations/emr/sshutils.py @@ -13,9 +13,7 @@ import logging -from awscli.customizations.emr import exceptions -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import constants +from awscli.customizations.emr import constants, emrutils, exceptions from botocore.exceptions import WaiterError LOG = logging.getLogger(__name__) @@ -32,7 +30,8 @@ def validate_and_find_master_dns(session, parsed_globals, cluster_id): Throw MasterDNSNotAvailableError or ClusterTerminatedError. """ cluster_state = emrutils.get_cluster_state( - session, parsed_globals, cluster_id) + session, parsed_globals, cluster_id + ) if cluster_state in constants.TERMINATED_STATES: raise exceptions.ClusterTerminatedError @@ -48,21 +47,27 @@ def validate_and_find_master_dns(session, parsed_globals, cluster_id): raise exceptions.MasterDNSNotAvailableError return emrutils.find_master_dns( - session=session, cluster_id=cluster_id, - parsed_globals=parsed_globals) + session=session, cluster_id=cluster_id, parsed_globals=parsed_globals + ) def validate_ssh_with_key_file(key_file): - if (emrutils.which('putty.exe') or emrutils.which('ssh') or - emrutils.which('ssh.exe')) is None: + if ( + emrutils.which('putty.exe') + or emrutils.which('ssh') + or emrutils.which('ssh.exe') + ) is None: raise exceptions.SSHNotFoundError else: check_ssh_key_format(key_file) def validate_scp_with_key_file(key_file): - if (emrutils.which('pscp.exe') or emrutils.which('scp') or - emrutils.which('scp.exe')) is None: + if ( + emrutils.which('pscp.exe') + or emrutils.which('scp') + or emrutils.which('scp.exe') + ) is None: raise exceptions.SCPNotFoundError else: check_scp_key_format(key_file) @@ -70,8 +75,10 @@ def validate_scp_with_key_file(key_file): def check_scp_key_format(key_file): # If only pscp is present and the file format is incorrect - if (emrutils.which('pscp.exe') is not None and - (emrutils.which('scp.exe') or emrutils.which('scp')) is None): + if ( + emrutils.which('pscp.exe') is not None + and (emrutils.which('scp.exe') or emrutils.which('scp')) is None + ): if check_command_key_format(key_file, ['ppk']) is False: raise exceptions.WrongPuttyKeyError else: @@ -80,8 +87,10 @@ def check_scp_key_format(key_file): def check_ssh_key_format(key_file): # If only putty is present and the file format is incorrect - if (emrutils.which('putty.exe') is not None and - (emrutils.which('ssh.exe') or emrutils.which('ssh')) is None): + if ( + emrutils.which('putty.exe') is not None + and (emrutils.which('ssh.exe') or emrutils.which('ssh')) is None + ): if check_command_key_format(key_file, ['ppk']) is False: raise exceptions.WrongPuttyKeyError else: diff --git a/awscli/customizations/emr/steputils.py b/awscli/customizations/emr/steputils.py index 3a9e6b99bfa2..e6b343b7a91a 100644 --- a/awscli/customizations/emr/steputils.py +++ b/awscli/customizations/emr/steputils.py @@ -11,9 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import constants -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, emrutils, exceptions def build_step_config_list(parsed_step_list, region, release_label): @@ -29,23 +27,24 @@ def build_step_config_list(parsed_step_list, region, release_label): step_config = build_custom_jar_step(parsed_step=step) elif step_type == constants.STREAMING: step_config = build_streaming_step( - parsed_step=step, release_label=release_label) + parsed_step=step, release_label=release_label + ) elif step_type == constants.HIVE: step_config = build_hive_step( - parsed_step=step, region=region, - release_label=release_label) + parsed_step=step, region=region, release_label=release_label + ) elif step_type == constants.PIG: step_config = build_pig_step( - parsed_step=step, region=region, - release_label=release_label) + parsed_step=step, region=region, release_label=release_label + ) elif step_type == constants.IMPALA: step_config = build_impala_step( - parsed_step=step, region=region, - release_label=release_label) + parsed_step=step, region=region, release_label=release_label + ) elif step_type == constants.SPARK: step_config = build_spark_step( - parsed_step=step, region=region, - release_label=release_label) + parsed_step=step, region=region, release_label=release_label + ) else: raise exceptions.UnknownStepTypeError(step_type=step_type) @@ -57,14 +56,17 @@ def build_step_config_list(parsed_step_list, region, release_label): def build_custom_jar_step(parsed_step): name = _apply_default_value( arg=parsed_step.get('Name'), - value=constants.DEFAULT_CUSTOM_JAR_STEP_NAME) + value=constants.DEFAULT_CUSTOM_JAR_STEP_NAME, + ) action_on_failure = _apply_default_value( arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + value=constants.DEFAULT_FAILURE_ACTION, + ) emrutils.check_required_field( structure=constants.CUSTOM_JAR_STEP_CONFIG, name='Jar', - value=parsed_step.get('Jar')) + value=parsed_step.get('Jar'), + ) return emrutils.build_step( jar=parsed_step.get('Jar'), args=parsed_step.get('Args'), @@ -72,22 +74,25 @@ def build_custom_jar_step(parsed_step): action_on_failure=action_on_failure, main_class=parsed_step.get('MainClass'), properties=emrutils.parse_key_value_string( - parsed_step.get('Properties'))) + parsed_step.get('Properties') + ), + ) def build_streaming_step(parsed_step, release_label): name = _apply_default_value( arg=parsed_step.get('Name'), - value=constants.DEFAULT_STREAMING_STEP_NAME) + value=constants.DEFAULT_STREAMING_STEP_NAME, + ) action_on_failure = _apply_default_value( arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + value=constants.DEFAULT_FAILURE_ACTION, + ) args = parsed_step.get('Args') emrutils.check_required_field( - structure=constants.STREAMING_STEP_CONFIG, - name='Args', - value=args) + structure=constants.STREAMING_STEP_CONFIG, name='Args', value=args + ) emrutils.check_empty_string_list(name='Args', value=args) args_list = [] @@ -100,30 +105,30 @@ def build_streaming_step(parsed_step, release_label): args_list += args return emrutils.build_step( - jar=jar, - args=args_list, - name=name, - action_on_failure=action_on_failure) + jar=jar, args=args_list, name=name, action_on_failure=action_on_failure + ) def build_hive_step(parsed_step, release_label, region=None): args = parsed_step.get('Args') emrutils.check_required_field( - structure=constants.HIVE_STEP_CONFIG, name='Args', value=args) + structure=constants.HIVE_STEP_CONFIG, name='Args', value=args + ) emrutils.check_empty_string_list(name='Args', value=args) name = _apply_default_value( - arg=parsed_step.get('Name'), - value=constants.DEFAULT_HIVE_STEP_NAME) - action_on_failure = \ - _apply_default_value( - arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + arg=parsed_step.get('Name'), value=constants.DEFAULT_HIVE_STEP_NAME + ) + action_on_failure = _apply_default_value( + arg=parsed_step.get('ActionOnFailure'), + value=constants.DEFAULT_FAILURE_ACTION, + ) return emrutils.build_step( jar=_get_runner_jar(release_label, region), args=_build_hive_args(args, release_label, region), name=name, - action_on_failure=action_on_failure) + action_on_failure=action_on_failure, + ) def _build_hive_args(args, release_label, region): @@ -131,8 +136,11 @@ def _build_hive_args(args, release_label, region): if release_label: args_list.append(constants.HIVE_SCRIPT_COMMAND) else: - args_list.append(emrutils.build_s3_link( - relative_path=constants.HIVE_SCRIPT_PATH, region=region)) + args_list.append( + emrutils.build_s3_link( + relative_path=constants.HIVE_SCRIPT_PATH, region=region + ) + ) args_list.append(constants.RUN_HIVE_SCRIPT) @@ -149,20 +157,23 @@ def _build_hive_args(args, release_label, region): def build_pig_step(parsed_step, release_label, region=None): args = parsed_step.get('Args') emrutils.check_required_field( - structure=constants.PIG_STEP_CONFIG, name='Args', value=args) + structure=constants.PIG_STEP_CONFIG, name='Args', value=args + ) emrutils.check_empty_string_list(name='Args', value=args) name = _apply_default_value( - arg=parsed_step.get('Name'), - value=constants.DEFAULT_PIG_STEP_NAME) + arg=parsed_step.get('Name'), value=constants.DEFAULT_PIG_STEP_NAME + ) action_on_failure = _apply_default_value( arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + value=constants.DEFAULT_FAILURE_ACTION, + ) return emrutils.build_step( jar=_get_runner_jar(release_label, region), args=_build_pig_args(args, release_label, region), name=name, - action_on_failure=action_on_failure) + action_on_failure=action_on_failure, + ) def _build_pig_args(args, release_label, region): @@ -170,8 +181,11 @@ def _build_pig_args(args, release_label, region): if release_label: args_list.append(constants.PIG_SCRIPT_COMMAND) else: - args_list.append(emrutils.build_s3_link( - relative_path=constants.PIG_SCRIPT_PATH, region=region)) + args_list.append( + emrutils.build_s3_link( + relative_path=constants.PIG_SCRIPT_PATH, region=region + ) + ) args_list.append(constants.RUN_PIG_SCRIPT) @@ -189,43 +203,51 @@ def build_impala_step(parsed_step, release_label, region=None): if release_label: raise exceptions.UnknownStepTypeError(step_type=constants.IMPALA) name = _apply_default_value( - arg=parsed_step.get('Name'), - value=constants.DEFAULT_IMPALA_STEP_NAME) + arg=parsed_step.get('Name'), value=constants.DEFAULT_IMPALA_STEP_NAME + ) action_on_failure = _apply_default_value( arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + value=constants.DEFAULT_FAILURE_ACTION, + ) args_list = [ emrutils.build_s3_link( - relative_path=constants.IMPALA_INSTALL_PATH, region=region), - constants.RUN_IMPALA_SCRIPT] + relative_path=constants.IMPALA_INSTALL_PATH, region=region + ), + constants.RUN_IMPALA_SCRIPT, + ] args = parsed_step.get('Args') emrutils.check_required_field( - structure=constants.IMPALA_STEP_CONFIG, name='Args', value=args) + structure=constants.IMPALA_STEP_CONFIG, name='Args', value=args + ) args_list += args return emrutils.build_step( jar=emrutils.get_script_runner(region), args=args_list, name=name, - action_on_failure=action_on_failure) + action_on_failure=action_on_failure, + ) def build_spark_step(parsed_step, release_label, region=None): name = _apply_default_value( - arg=parsed_step.get('Name'), - value=constants.DEFAULT_SPARK_STEP_NAME) + arg=parsed_step.get('Name'), value=constants.DEFAULT_SPARK_STEP_NAME + ) action_on_failure = _apply_default_value( arg=parsed_step.get('ActionOnFailure'), - value=constants.DEFAULT_FAILURE_ACTION) + value=constants.DEFAULT_FAILURE_ACTION, + ) args = parsed_step.get('Args') emrutils.check_required_field( - structure=constants.SPARK_STEP_CONFIG, name='Args', value=args) + structure=constants.SPARK_STEP_CONFIG, name='Args', value=args + ) return emrutils.build_step( jar=_get_runner_jar(release_label, region), args=_build_spark_args(args, release_label, region), name=name, - action_on_failure=action_on_failure) + action_on_failure=action_on_failure, + ) def _build_spark_args(args, release_label, region): @@ -247,5 +269,8 @@ def _apply_default_value(arg, value): def _get_runner_jar(release_label, region): - return constants.COMMAND_RUNNER if release_label \ + return ( + constants.COMMAND_RUNNER + if release_label else emrutils.get_script_runner(region) + ) diff --git a/awscli/customizations/emr/terminateclusters.py b/awscli/customizations/emr/terminateclusters.py index b3d7234dc2bb..a4d28b9f14e8 100644 --- a/awscli/customizations/emr/terminateclusters.py +++ b/awscli/customizations/emr/terminateclusters.py @@ -12,23 +12,26 @@ # language governing permissions and limitations under the License. -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext +from awscli.customizations.emr import emrutils, helptext from awscli.customizations.emr.command import Command class TerminateClusters(Command): NAME = 'terminate-clusters' DESCRIPTION = helptext.TERMINATE_CLUSTERS - ARG_TABLE = [{ - 'name': 'cluster-ids', 'nargs': '+', 'required': True, - 'help_text': '

    A list of clusters to terminate.

    ', - 'schema': {'type': 'array', 'items': {'type': 'string'}}, - }] + ARG_TABLE = [ + { + 'name': 'cluster-ids', + 'nargs': '+', + 'required': True, + 'help_text': '

    A list of clusters to terminate.

    ', + 'schema': {'type': 'array', 'items': {'type': 'string'}}, + } + ] def _run_main_command(self, parsed_args, parsed_globals): parameters = {'JobFlowIds': parsed_args.cluster_ids} - emrutils.call_and_display_response(self._session, - 'TerminateJobFlows', parameters, - parsed_globals) + emrutils.call_and_display_response( + self._session, 'TerminateJobFlows', parameters, parsed_globals + ) return 0 diff --git a/awscli/customizations/emrcontainers/__init__.py b/awscli/customizations/emrcontainers/__init__.py index dc93cf5c1c3d..9fb1e96dc80b 100644 --- a/awscli/customizations/emrcontainers/__init__.py +++ b/awscli/customizations/emrcontainers/__init__.py @@ -11,8 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emrcontainers.update_role_trust_policy \ - import UpdateRoleTrustPolicyCommand +from awscli.customizations.emrcontainers.update_role_trust_policy import ( + UpdateRoleTrustPolicyCommand, +) def initialize(cli): @@ -28,4 +29,5 @@ def inject_commands(command_table, session, **kwargs): Used to inject new high level commands into the command list. """ command_table['update-role-trust-policy'] = UpdateRoleTrustPolicyCommand( - session) + session + ) diff --git a/awscli/customizations/emrcontainers/constants.py b/awscli/customizations/emrcontainers/constants.py index a8e23e95f941..9be41aff5ea4 100644 --- a/awscli/customizations/emrcontainers/constants.py +++ b/awscli/customizations/emrcontainers/constants.py @@ -14,24 +14,28 @@ # Declare all the constants used by Lifecycle in this file # Lifecycle role names -TRUST_POLICY_STATEMENT_FORMAT = '{ \ +TRUST_POLICY_STATEMENT_FORMAT = ( + '{ \ "Effect": "Allow", \ "Principal": { \ - "Federated": "arn:%(AWS_PARTITION)s:iam::%(AWS_ACCOUNT_ID)s:oidc-provider/' \ - '%(OIDC_PROVIDER)s" \ + "Federated": "arn:%(AWS_PARTITION)s:iam::%(AWS_ACCOUNT_ID)s:oidc-provider/' + '%(OIDC_PROVIDER)s" \ }, \ "Action": "sts:AssumeRoleWithWebIdentity", \ "Condition": { \ "StringLike": { \ - "%(OIDC_PROVIDER)s:sub": "system:serviceaccount:%(NAMESPACE)s' \ - ':emr-containers-sa-*-*-%(AWS_ACCOUNT_ID)s-' \ - '%(BASE36_ENCODED_ROLE_NAME)s" \ + "%(OIDC_PROVIDER)s:sub": "system:serviceaccount:%(NAMESPACE)s' + ':emr-containers-sa-*-*-%(AWS_ACCOUNT_ID)s-' + '%(BASE36_ENCODED_ROLE_NAME)s" \ } \ } \ }' +) -TRUST_POLICY_STATEMENT_ALREADY_EXISTS = "Trust policy statement already " \ - "exists for role %s. No changes " \ - "were made!" +TRUST_POLICY_STATEMENT_ALREADY_EXISTS = ( + "Trust policy statement already " + "exists for role %s. No changes " + "were made!" +) TRUST_POLICY_UPDATE_SUCCESSFUL = "Successfully updated trust policy of role %s" diff --git a/awscli/customizations/emrcontainers/eks.py b/awscli/customizations/emrcontainers/eks.py index 148785193489..d3d9d80fb28b 100644 --- a/awscli/customizations/emrcontainers/eks.py +++ b/awscli/customizations/emrcontainers/eks.py @@ -24,8 +24,13 @@ def get_oidc_issuer_id(self, cluster_name): name=cluster_name ) - oidc_issuer = self.cluster_info[cluster_name].get("cluster", {}).get( - "identity", {}).get("oidc", {}).get("issuer", "") + oidc_issuer = ( + self.cluster_info[cluster_name] + .get("cluster", {}) + .get("identity", {}) + .get("oidc", {}) + .get("issuer", "") + ) return oidc_issuer.split('https://')[1] @@ -36,7 +41,8 @@ def get_account_id(self, cluster_name): name=cluster_name ) - cluster_arn = self.cluster_info[cluster_name].get("cluster", {}).get( - "arn", "") + cluster_arn = ( + self.cluster_info[cluster_name].get("cluster", {}).get("arn", "") + ) return cluster_arn.split(':')[4] diff --git a/awscli/customizations/emrcontainers/iam.py b/awscli/customizations/emrcontainers/iam.py index 141a40536135..92cf0f14bb59 100644 --- a/awscli/customizations/emrcontainers/iam.py +++ b/awscli/customizations/emrcontainers/iam.py @@ -26,6 +26,5 @@ def get_assume_role_policy(self, role_name): def update_assume_role_policy(self, role_name, assume_role_policy): """Method to update trust policy of given role name""" return self.iam_client.update_assume_role_policy( - RoleName=role_name, - PolicyDocument=json.dumps(assume_role_policy) + RoleName=role_name, PolicyDocument=json.dumps(assume_role_policy) ) diff --git a/awscli/customizations/emrcontainers/update_role_trust_policy.py b/awscli/customizations/emrcontainers/update_role_trust_policy.py index 036382c9ec16..191c5b59259e 100644 --- a/awscli/customizations/emrcontainers/update_role_trust_policy.py +++ b/awscli/customizations/emrcontainers/update_role_trust_policy.py @@ -15,14 +15,15 @@ import logging from awscli.customizations.commands import BasicCommand -from awscli.customizations.emrcontainers.constants \ - import TRUST_POLICY_STATEMENT_FORMAT, \ - TRUST_POLICY_STATEMENT_ALREADY_EXISTS, \ - TRUST_POLICY_UPDATE_SUCCESSFUL from awscli.customizations.emrcontainers.base36 import Base36 +from awscli.customizations.emrcontainers.constants import ( + TRUST_POLICY_STATEMENT_ALREADY_EXISTS, + TRUST_POLICY_STATEMENT_FORMAT, + TRUST_POLICY_UPDATE_SUCCESSFUL, +) from awscli.customizations.emrcontainers.eks import EKS from awscli.customizations.emrcontainers.iam import IAM -from awscli.customizations.utils import uni_print, get_policy_arn_suffix +from awscli.customizations.utils import get_policy_arn_suffix, uni_print LOG = logging.getLogger(__name__) @@ -71,48 +72,56 @@ class UpdateRoleTrustPolicyCommand(BasicCommand): NAME = 'update-role-trust-policy' DESCRIPTION = BasicCommand.FROM_FILE( - 'emr-containers', - 'update-role-trust-policy', - '_description.rst' + 'emr-containers', 'update-role-trust-policy', '_description.rst' ) ARG_TABLE = [ { 'name': 'cluster-name', - 'help_text': ("Specify the name of the Amazon EKS cluster with " - "which the IAM Role would be used."), - 'required': True + 'help_text': ( + "Specify the name of the Amazon EKS cluster with " + "which the IAM Role would be used." + ), + 'required': True, }, { 'name': 'namespace', - 'help_text': ("Specify the namespace from the Amazon EKS cluster " - "with which the IAM Role would be used."), - 'required': True + 'help_text': ( + "Specify the namespace from the Amazon EKS cluster " + "with which the IAM Role would be used." + ), + 'required': True, }, { 'name': 'role-name', - 'help_text': ("Specify the IAM Role name that you want to use" - "with Amazon EMR on EKS."), - 'required': True + 'help_text': ( + "Specify the IAM Role name that you want to use" + "with Amazon EMR on EKS." + ), + 'required': True, }, { 'name': 'iam-endpoint', 'no_paramfile': True, - 'help_text': ("The IAM endpoint to call for updating the role " - "trust policy. This is optional and should only be" - "specified when a custom endpoint should be called" - "for IAM operations."), - 'required': False + 'help_text': ( + "The IAM endpoint to call for updating the role " + "trust policy. This is optional and should only be" + "specified when a custom endpoint should be called" + "for IAM operations." + ), + 'required': False, }, { 'name': 'dry-run', 'action': 'store_true', 'default': False, - 'help_text': ("Print the merged trust policy document to" - "stdout instead of updating the role trust" - "policy directly."), - 'required': False - } + 'help_text': ( + "Print the merged trust policy document to" + "stdout instead of updating the role trust" + "policy directly." + ), + 'required': False, + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -136,42 +145,55 @@ def _update_role_trust_policy(self, parsed_globals): base36 = Base36() - eks_client = EKS(self._session.create_client( - 'eks', - region_name=self._region, - verify=parsed_globals.verify_ssl - )) + eks_client = EKS( + self._session.create_client( + 'eks', + region_name=self._region, + verify=parsed_globals.verify_ssl, + ) + ) account_id = eks_client.get_account_id(self._cluster_name) oidc_provider = eks_client.get_oidc_issuer_id(self._cluster_name) base36_encoded_role_name = base36.encode(self._role_name) LOG.debug('Base36 encoded role name: %s', base36_encoded_role_name) - trust_policy_statement = json.loads(TRUST_POLICY_STATEMENT_FORMAT % { - "AWS_ACCOUNT_ID": account_id, - "OIDC_PROVIDER": oidc_provider, - "NAMESPACE": self._namespace, - "BASE36_ENCODED_ROLE_NAME": base36_encoded_role_name, - "AWS_PARTITION": get_policy_arn_suffix(self._region) - }) - - LOG.debug('Computed Trust Policy Statement:\n%s', json.dumps( - trust_policy_statement, indent=2)) - iam_client = IAM(self._session.create_client( - 'iam', - region_name=self._region, - endpoint_url=self._endpoint_url, - verify=parsed_globals.verify_ssl - )) + trust_policy_statement = json.loads( + TRUST_POLICY_STATEMENT_FORMAT + % { + "AWS_ACCOUNT_ID": account_id, + "OIDC_PROVIDER": oidc_provider, + "NAMESPACE": self._namespace, + "BASE36_ENCODED_ROLE_NAME": base36_encoded_role_name, + "AWS_PARTITION": get_policy_arn_suffix(self._region), + } + ) + + LOG.debug( + 'Computed Trust Policy Statement:\n%s', + json.dumps(trust_policy_statement, indent=2), + ) + iam_client = IAM( + self._session.create_client( + 'iam', + region_name=self._region, + endpoint_url=self._endpoint_url, + verify=parsed_globals.verify_ssl, + ) + ) assume_role_document = iam_client.get_assume_role_policy( - self._role_name) - matches = check_if_statement_exists(trust_policy_statement, - assume_role_document) + self._role_name + ) + matches = check_if_statement_exists( + trust_policy_statement, assume_role_document + ) if not matches: - LOG.debug('Role %s does not have the required trust policy ', - self._role_name) + LOG.debug( + 'Role %s does not have the required trust policy ', + self._role_name, + ) existing_statements = assume_role_document.get("Statement") if existing_statements is None: @@ -183,8 +205,9 @@ def _update_role_trust_policy(self, parsed_globals): return json.dumps(assume_role_document, indent=2) else: LOG.debug('Updating trust policy of role %s', self._role_name) - iam_client.update_assume_role_policy(self._role_name, - assume_role_document) + iam_client.update_assume_role_policy( + self._role_name, assume_role_document + ) return TRUST_POLICY_UPDATE_SUCCESSFUL % self._role_name else: return TRUST_POLICY_STATEMENT_ALREADY_EXISTS % self._role_name diff --git a/awscli/customizations/flatten.py b/awscli/customizations/flatten.py index a7b893fa077c..5b1348c8311b 100644 --- a/awscli/customizations/flatten.py +++ b/awscli/customizations/flatten.py @@ -30,15 +30,26 @@ class FlattenedArgument(CustomArgument): Supports both an object and a list of objects, in which case the flattened parameters will hydrate a list with a single object in it. """ - def __init__(self, name, container, prop, help_text='', required=None, - type=None, hydrate=None, hydrate_value=None): + + def __init__( + self, + name, + container, + prop, + help_text='', + required=None, + type=None, + hydrate=None, + hydrate_value=None, + ): self.type = type self._container = container self._property = prop self._hydrate = hydrate self._hydrate_value = hydrate_value - super(FlattenedArgument, self).__init__(name=name, help_text=help_text, - required=required) + super(FlattenedArgument, self).__init__( + name=name, help_text=help_text, required=required + ) @property def cli_type_name(self): @@ -151,6 +162,7 @@ def my_hydrate(params, container, cli_type, key, value): ensure that a list of one or more objects is hydrated rather than a single object. """ + def __init__(self, service_name, configs): self.configs = configs self.service_name = service_name @@ -163,9 +175,10 @@ def register(self, cli): # Flatten each configured operation when they are built service = self.service_name for operation in self.configs: - cli.register('building-argument-table.{0}.{1}'.format(service, - operation), - self.flatten_args) + cli.register( + 'building-argument-table.{0}.{1}'.format(service, operation), + self.flatten_args, + ) def flatten_args(self, command, argument_table, **kwargs): # For each argument with a bag of parameters @@ -173,10 +186,15 @@ def flatten_args(self, command, argument_table, **kwargs): argument_from_table = argument_table[name] overwritten = False - LOG.debug('Flattening {0} argument {1} into {2}'.format( - command.name, name, - ', '.join([v['name'] for k, v in argument['flatten'].items()]) - )) + LOG.debug( + 'Flattening {0} argument {1} into {2}'.format( + command.name, + name, + ', '.join( + [v['name'] for k, v in argument['flatten'].items()] + ), + ) + ) # For each parameter to flatten out for sub_argument, new_config in argument['flatten'].items(): @@ -200,8 +218,9 @@ def flatten_args(self, command, argument_table, **kwargs): overwritten = True # Delete the original argument? - if not overwritten and ('keep' not in argument or - not argument['keep']): + if not overwritten and ( + 'keep' not in argument or not argument['keep'] + ): del argument_table[name] def _find_nested_arg(self, argument, name): @@ -239,7 +258,9 @@ def _merge_member_config(self, argument, name, config): config['help_text'] = member.documentation if 'required' not in config: - config['required'] = member_name in argument.required_members + config['required'] = ( + member_name in argument.required_members + ) if 'type' not in config: config['type'] = member.type_name diff --git a/awscli/customizations/gamelift/__init__.py b/awscli/customizations/gamelift/__init__.py index 6a4857a9e69b..e7a3022e7adc 100644 --- a/awscli/customizations/gamelift/__init__.py +++ b/awscli/customizations/gamelift/__init__.py @@ -10,8 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.gamelift.uploadbuild import UploadBuildCommand from awscli.customizations.gamelift.getlog import GetGameSessionLogCommand +from awscli.customizations.gamelift.uploadbuild import UploadBuildCommand def register_gamelift_commands(event_emitter): diff --git a/awscli/customizations/gamelift/getlog.py b/awscli/customizations/gamelift/getlog.py index 3bded0e9dc13..15339089d588 100644 --- a/awscli/customizations/gamelift/getlog.py +++ b/awscli/customizations/gamelift/getlog.py @@ -21,30 +21,38 @@ class GetGameSessionLogCommand(BasicCommand): NAME = 'get-game-session-log' DESCRIPTION = 'Download a compressed log file for a game session.' ARG_TABLE = [ - {'name': 'game-session-id', 'required': True, - 'help_text': 'The game session ID'}, - {'name': 'save-as', 'required': True, - 'help_text': 'The filename to which the file should be saved (.zip)'} + { + 'name': 'game-session-id', + 'required': True, + 'help_text': 'The game session ID', + }, + { + 'name': 'save-as', + 'required': True, + 'help_text': 'The filename to which the file should be saved (.zip)', + }, ] def _run_main(self, args, parsed_globals): client = self._session.create_client( - 'gamelift', region_name=parsed_globals.region, + 'gamelift', + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) # Retrieve a signed url. response = client.get_game_session_log_url( - GameSessionId=args.game_session_id) + GameSessionId=args.game_session_id + ) url = response['PreSignedUrl'] # Retrieve the content from the presigned url and save it locally. contents = urlopen(url) sys.stdout.write( - 'Downloading log archive for game session %s...\r' % - args.game_session_id + 'Downloading log archive for game session %s...\r' + % args.game_session_id ) with open(args.save_as, 'wb') as f: @@ -53,6 +61,7 @@ def _run_main(self, args, parsed_globals): sys.stdout.write( 'Successfully downloaded log archive for game ' - 'session %s to %s\n' % (args.game_session_id, args.save_as)) + 'session %s to %s\n' % (args.game_session_id, args.save_as) + ) return 0 diff --git a/awscli/customizations/gamelift/uploadbuild.py b/awscli/customizations/gamelift/uploadbuild.py index 369317c5ffd9..b71ec860b4f1 100644 --- a/awscli/customizations/gamelift/uploadbuild.py +++ b/awscli/customizations/gamelift/uploadbuild.py @@ -10,43 +10,56 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import threading import contextlib import os -import tempfile import sys +import tempfile +import threading import zipfile -from s3transfer import S3Transfer - from awscli.customizations.commands import BasicCommand from awscli.customizations.s3.utils import human_readable_size +from s3transfer import S3Transfer class UploadBuildCommand(BasicCommand): NAME = 'upload-build' DESCRIPTION = 'Upload a new build to AWS GameLift.' ARG_TABLE = [ - {'name': 'name', 'required': True, - 'help_text': 'The name of the build'}, - {'name': 'build-version', 'required': True, - 'help_text': 'The version of the build'}, - {'name': 'build-root', 'required': True, - 'help_text': - 'The path to the directory containing the build to upload'}, - {'name': 'server-sdk-version', 'required': False, - 'help_text': - 'The version of the GameLift server SDK used to ' - 'create the game server'}, - {'name': 'operating-system', 'required': False, - 'help_text': 'The operating system the build runs on'} + { + 'name': 'name', + 'required': True, + 'help_text': 'The name of the build', + }, + { + 'name': 'build-version', + 'required': True, + 'help_text': 'The version of the build', + }, + { + 'name': 'build-root', + 'required': True, + 'help_text': 'The path to the directory containing the build to upload', + }, + { + 'name': 'server-sdk-version', + 'required': False, + 'help_text': 'The version of the GameLift server SDK used to ' + 'create the game server', + }, + { + 'name': 'operating-system', + 'required': False, + 'help_text': 'The operating system the build runs on', + }, ] def _run_main(self, args, parsed_globals): gamelift_client = self._session.create_client( - 'gamelift', region_name=parsed_globals.region, + 'gamelift', + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) # Validate a build directory if not validate_directory(args.build_root): @@ -60,7 +73,7 @@ def _run_main(self, args, parsed_globals): # Create a build based on the operating system given. create_build_kwargs = { 'Name': args.name, - 'Version': args.build_version + 'Version': args.build_version, } if args.operating_system: create_build_kwargs['OperatingSystem'] = args.operating_system @@ -70,8 +83,7 @@ def _run_main(self, args, parsed_globals): build_id = response['Build']['BuildId'] # Retrieve a set of credentials and the s3 bucket and key. - response = gamelift_client.request_upload_credentials( - BuildId=build_id) + response = gamelift_client.request_upload_credentials(BuildId=build_id) upload_credentials = response['UploadCredentials'] bucket = response['StorageLocation']['Bucket'] key = response['StorageLocation']['Key'] @@ -82,11 +94,12 @@ def _run_main(self, args, parsed_globals): secret_key = upload_credentials['SecretAccessKey'] session_token = upload_credentials['SessionToken'] s3_client = self._session.create_client( - 's3', aws_access_key_id=access_key, + 's3', + aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=session_token, region_name=parsed_globals.region, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) s3_transfer_mgr = S3Transfer(s3_client) @@ -95,11 +108,13 @@ def _run_main(self, args, parsed_globals): fd, temporary_zipfile = tempfile.mkstemp('%s.zip' % build_id) zip_directory(temporary_zipfile, args.build_root) s3_transfer_mgr.upload_file( - temporary_zipfile, bucket, key, + temporary_zipfile, + bucket, + key, callback=ProgressPercentage( temporary_zipfile, - label='Uploading ' + args.build_root + ':' - ) + label='Uploading ' + args.build_root + ':', + ), ) finally: os.close(fd) @@ -107,7 +122,8 @@ def _run_main(self, args, parsed_globals): sys.stdout.write( 'Successfully uploaded %s to AWS GameLift\n' - 'Build ID: %s\n' % (args.build_root, build_id)) + 'Build ID: %s\n' % (args.build_root, build_id) + ) return 0 @@ -120,8 +136,7 @@ def zip_directory(zipfile_name, source_root): for root, dirs, files in os.walk(source_root): for filename in files: full_path = os.path.join(root, filename) - relative_path = os.path.relpath( - full_path, source_root) + relative_path = os.path.relpath(full_path, source_root) zf.write(full_path, relative_path) @@ -156,9 +171,12 @@ def __call__(self, bytes_amount): if self._size > 0: percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( - "\r%s %s / %s (%.2f%%)" % ( - self._label, human_readable_size(self._seen_so_far), - human_readable_size(self._size), percentage + "\r%s %s / %s (%.2f%%)" + % ( + self._label, + human_readable_size(self._seen_so_far), + human_readable_size(self._size), + percentage, ) ) sys.stdout.flush() diff --git a/awscli/customizations/generatecliskeleton.py b/awscli/customizations/generatecliskeleton.py index 3f0f12dd6588..552badf3dba7 100644 --- a/awscli/customizations/generatecliskeleton.py +++ b/awscli/customizations/generatecliskeleton.py @@ -13,15 +13,15 @@ import json import sys -from botocore import xform_name -from botocore.stub import Stubber -from botocore.utils import ArgumentGenerator from ruamel.yaml import YAML from awscli.clidriver import CLIOperationCaller from awscli.customizations.arguments import OverrideRequiredArgsArgument from awscli.customizations.utils import get_shape_doc_overview from awscli.utils import json_encoder +from botocore import xform_name +from botocore.stub import Stubber +from botocore.utils import ArgumentGenerator def register_generate_cli_skeleton(cli): @@ -33,7 +33,8 @@ def add_generate_skeleton(session, operation_model, argument_table, **kwargs): # is designated by the argument name `outfile`. if 'outfile' not in argument_table: generate_cli_skeleton_argument = GenerateCliSkeletonArgument( - session, operation_model) + session, operation_model + ) generate_cli_skeleton_argument.add_to_arg_table(argument_table) @@ -44,6 +45,7 @@ class GenerateCliSkeletonArgument(OverrideRequiredArgsArgument): command from taking place. Instead, it will generate a JSON skeleton and print it to standard output. """ + ARG_DATA = { 'name': 'generate-cli-skeleton', 'help_text': ( @@ -86,17 +88,18 @@ def override_required_args(self, argument_table, args, **kwargs): except IndexError: pass super(GenerateCliSkeletonArgument, self).override_required_args( - argument_table, args, **kwargs) + argument_table, args, **kwargs + ) - def generate_skeleton(self, call_parameters, parsed_args, - parsed_globals, **kwargs): + def generate_skeleton( + self, call_parameters, parsed_args, parsed_globals, **kwargs + ): if not getattr(parsed_args, 'generate_cli_skeleton', None): return arg_value = parsed_args.generate_cli_skeleton return getattr( - self, '_generate_%s_skeleton' % arg_value.replace('-', '_'))( - call_parameters=call_parameters, parsed_globals=parsed_globals - ) + self, '_generate_%s_skeleton' % arg_value.replace('-', '_') + )(call_parameters=call_parameters, parsed_globals=parsed_globals) def _generate_yaml_input_skeleton(self, **kwargs): input_shape = self._operation_model.input_shape @@ -120,13 +123,14 @@ def _generate_input_skeleton(self, **kwargs): outfile.write('\n') return 0 - def _generate_output_skeleton(self, call_parameters, parsed_globals, - **kwargs): + def _generate_output_skeleton( + self, call_parameters, parsed_globals, **kwargs + ): service_name = self._operation_model.service_model.service_name operation_name = self._operation_model.name return StubbedCLIOperationCaller(self._session).invoke( - service_name, operation_name, call_parameters, - parsed_globals) + service_name, operation_name, call_parameters, parsed_globals + ) class StubbedCLIOperationCaller(CLIOperationCaller): @@ -135,16 +139,20 @@ class StubbedCLIOperationCaller(CLIOperationCaller): It generates a fake response and uses the response and provided parameters to make a stubbed client call for an operation command. """ - def _make_client_call(self, client, operation_name, parameters, - parsed_globals): + + def _make_client_call( + self, client, operation_name, parameters, parsed_globals + ): method_name = xform_name(operation_name) operation_model = client.meta.service_model.operation_model( - operation_name) + operation_name + ) fake_response = {} if operation_model.output_shape: argument_generator = ArgumentGenerator(use_member_names=True) fake_response = argument_generator.generate_skeleton( - operation_model.output_shape) + operation_model.output_shape + ) with Stubber(client) as stubber: stubber.add_response(method_name, fake_response) return getattr(client, method_name)(**parameters) @@ -153,13 +161,14 @@ def _make_client_call(self, client, operation_name, parameters, class _Bytes(object): @classmethod def represent(cls, dumper, data): - return dumper.represent_scalar(u'tag:yaml.org,2002:binary', '') + return dumper.represent_scalar('tag:yaml.org,2002:binary', '') class YAMLArgumentGenerator(ArgumentGenerator): def __init__(self, use_member_names=False, yaml=None): super(YAMLArgumentGenerator, self).__init__( - use_member_names=use_member_names) + use_member_names=use_member_names + ) self._yaml = yaml if self._yaml is None: self._yaml = YAML() @@ -181,14 +190,17 @@ def _generate_type_structure(self, shape, stack): skeleton = self._yaml.map() for member_name, member_shape in shape.members.items(): skeleton[member_name] = self._generate_skeleton( - member_shape, stack, name=member_name) + member_shape, stack, name=member_name + ) is_required = member_name in shape.required_members self._add_member_comments( - skeleton, member_name, member_shape, is_required) + skeleton, member_name, member_shape, is_required + ) return skeleton - def _add_member_comments(self, skeleton, member_name, member_shape, - is_required): + def _add_member_comments( + self, skeleton, member_name, member_shape, is_required + ): comment_components = [] if is_required: comment_components.append('[REQUIRED]') @@ -208,6 +220,6 @@ def _generate_type_map(self, shape, stack): # YAML has support for ordered maps, so don't use ordereddicts # because that isn't necessary and it makes the output harder to # understand and read. - return dict(super(YAMLArgumentGenerator, self)._generate_type_map( - shape, stack - )) + return dict( + super(YAMLArgumentGenerator, self)._generate_type_map(shape, stack) + ) diff --git a/awscli/customizations/globalargs.py b/awscli/customizations/globalargs.py index 3a84223f93ce..056b09a5fe33 100644 --- a/awscli/customizations/globalargs.py +++ b/awscli/customizations/globalargs.py @@ -10,29 +10,38 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import sys import os +import sys -from botocore.client import Config -from botocore.endpoint import DEFAULT_TIMEOUT -from botocore.handlers import disable_signing import jmespath from awscli.compat import urlparse from awscli.customizations.exceptions import ParamValidationError +from botocore.client import Config +from botocore.endpoint import DEFAULT_TIMEOUT +from botocore.handlers import disable_signing def register_parse_global_args(cli): - cli.register('top-level-args-parsed', resolve_types, - unique_id='resolve-types') - cli.register('top-level-args-parsed', no_sign_request, - unique_id='no-sign') - cli.register('top-level-args-parsed', resolve_verify_ssl, - unique_id='resolve-verify-ssl') - cli.register('top-level-args-parsed', resolve_cli_read_timeout, - unique_id='resolve-cli-read-timeout') - cli.register('top-level-args-parsed', resolve_cli_connect_timeout, - unique_id='resolve-cli-connect-timeout') + cli.register( + 'top-level-args-parsed', resolve_types, unique_id='resolve-types' + ) + cli.register('top-level-args-parsed', no_sign_request, unique_id='no-sign') + cli.register( + 'top-level-args-parsed', + resolve_verify_ssl, + unique_id='resolve-verify-ssl', + ) + cli.register( + 'top-level-args-parsed', + resolve_cli_read_timeout, + unique_id='resolve-cli-read-timeout', + ) + cli.register( + 'top-level-args-parsed', + resolve_cli_connect_timeout, + unique_id='resolve-cli-connect-timeout', + ) def resolve_types(parsed_args, **kwargs): @@ -94,7 +103,9 @@ def no_sign_request(parsed_args, session, **kwargs): # Register this first to override other handlers. emitter = session.get_component('event_emitter') emitter.register_first( - 'choose-signer', disable_signing, unique_id='disable-signing', + 'choose-signer', + disable_signing, + unique_id='disable-signing', ) diff --git a/awscli/customizations/history/__init__.py b/awscli/customizations/history/__init__.py index 68da5710c323..3a21c48d82e3 100644 --- a/awscli/customizations/history/__init__.py +++ b/awscli/customizations/history/__init__.py @@ -10,37 +10,39 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import logging import os import sys -import logging - -from botocore.history import get_global_history_recorder -from botocore.exceptions import ProfileNotFound from awscli.compat import sqlite3 from awscli.customizations.commands import BasicCommand -from awscli.customizations.history.constants import HISTORY_FILENAME_ENV_VAR -from awscli.customizations.history.constants import DEFAULT_HISTORY_FILENAME -from awscli.customizations.history.db import DatabaseConnection -from awscli.customizations.history.db import DatabaseRecordWriter -from awscli.customizations.history.db import RecordBuilder -from awscli.customizations.history.db import DatabaseHistoryHandler -from awscli.customizations.history.show import ShowCommand +from awscli.customizations.history.constants import ( + DEFAULT_HISTORY_FILENAME, + HISTORY_FILENAME_ENV_VAR, +) +from awscli.customizations.history.db import ( + DatabaseConnection, + DatabaseHistoryHandler, + DatabaseRecordWriter, + RecordBuilder, +) from awscli.customizations.history.list import ListCommand - +from awscli.customizations.history.show import ShowCommand +from botocore.exceptions import ProfileNotFound +from botocore.history import get_global_history_recorder LOG = logging.getLogger(__name__) HISTORY_RECORDER = get_global_history_recorder() def register_history_mode(event_handlers): - event_handlers.register( - 'session-initialized', attach_history_handler) + event_handlers.register('session-initialized', attach_history_handler) def register_history_commands(event_handlers): event_handlers.register( - "building-command-table.main", add_history_commands) + "building-command-table.main", add_history_commands + ) def attach_history_handler(session, parsed_args, **kwargs): @@ -48,7 +50,8 @@ def attach_history_handler(session, parsed_args, **kwargs): LOG.debug('Enabling CLI history') history_filename = os.environ.get( - HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME) + HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME + ) if not os.path.isdir(os.path.dirname(history_filename)): os.makedirs(os.path.dirname(history_filename)) @@ -98,7 +101,7 @@ class HistoryCommand(BasicCommand): ) SUBCOMMANDS = [ {'name': 'show', 'command_class': ShowCommand}, - {'name': 'list', 'command_class': ListCommand} + {'name': 'list', 'command_class': ListCommand}, ] def _run_main(self, parsed_args, parsed_globals): diff --git a/awscli/customizations/history/commands.py b/awscli/customizations/history/commands.py index 42c8de1af8c8..41973aa67940 100644 --- a/awscli/customizations/history/commands.py +++ b/awscli/customizations/history/commands.py @@ -13,14 +13,16 @@ import os from awscli.compat import is_windows -from awscli.utils import is_a_tty -from awscli.utils import OutputStreamFactory - from awscli.customizations.commands import BasicCommand -from awscli.customizations.history.db import DatabaseConnection -from awscli.customizations.history.constants import HISTORY_FILENAME_ENV_VAR -from awscli.customizations.history.constants import DEFAULT_HISTORY_FILENAME -from awscli.customizations.history.db import DatabaseRecordReader +from awscli.customizations.history.constants import ( + DEFAULT_HISTORY_FILENAME, + HISTORY_FILENAME_ENV_VAR, +) +from awscli.customizations.history.db import ( + DatabaseConnection, + DatabaseRecordReader, +) +from awscli.utils import OutputStreamFactory, is_a_tty class HistorySubcommand(BasicCommand): @@ -29,8 +31,9 @@ def __init__(self, session, db_reader=None, output_stream_factory=None): self._db_reader = db_reader self._output_stream_factory = output_stream_factory if output_stream_factory is None: - self._output_stream_factory = \ + self._output_stream_factory = ( self._get_default_output_stream_factory() + ) def _get_default_output_stream_factory(self): return OutputStreamFactory(self._session) @@ -45,7 +48,8 @@ def _close_history_db(self): def _get_history_db_filename(self): filename = os.environ.get( - HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME) + HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME + ) if not os.path.exists(filename): raise RuntimeError( 'Could not locate history. Make sure cli_history is set to ' diff --git a/awscli/customizations/history/constants.py b/awscli/customizations/history/constants.py index 486e274f612b..48558bb05b1c 100644 --- a/awscli/customizations/history/constants.py +++ b/awscli/customizations/history/constants.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import os - HISTORY_FILENAME_ENV_VAR = 'AWS_CLI_HISTORY_FILE' DEFAULT_HISTORY_FILENAME = os.path.expanduser( - os.path.join('~', '.aws', 'cli', 'history', 'history.db')) + os.path.join('~', '.aws', 'cli', 'history', 'history.db') +) diff --git a/awscli/customizations/history/db.py b/awscli/customizations/history/db.py index bdb96d1dc4bd..2b1e270779c0 100644 --- a/awscli/customizations/history/db.py +++ b/awscli/customizations/history/db.py @@ -10,20 +10,16 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import uuid -import time -import json import datetime -import threading +import json import logging -from awscli.compat import collections_abc +import threading +import time +import uuid +from awscli.compat import binary_type, collections_abc, sqlite3 from botocore.history import BaseHistoryHandler -from awscli.compat import sqlite3 -from awscli.compat import binary_type - - LOG = logging.getLogger(__name__) @@ -41,7 +37,8 @@ class DatabaseConnection(object): def __init__(self, db_filename): self._connection = sqlite3.connect( - db_filename, check_same_thread=False, isolation_level=None) + db_filename, check_same_thread=False, isolation_level=None + ) self._ensure_database_setup() def close(self): @@ -92,8 +89,9 @@ def _remove_non_unicode_stings(self, obj): if isinstance(obj, str): obj = self._try_decode_bytes(obj) elif isinstance(obj, dict): - obj = dict((k, self._remove_non_unicode_stings(v)) for k, v - in obj.items()) + obj = dict( + (k, self._remove_non_unicode_stings(v)) for k, v in obj.items() + ) elif isinstance(obj, (list, tuple)): obj = [self._remove_non_unicode_stings(o) for o in obj] return obj @@ -152,26 +150,30 @@ def write_record(self, record): def _create_db_record(self, record): event_type = record['event_type'] - json_serialized_payload = json.dumps(record['payload'], - cls=PayloadSerializer) + json_serialized_payload = json.dumps( + record['payload'], cls=PayloadSerializer + ) db_record = ( record['command_id'], record.get('request_id'), record['source'], event_type, record['timestamp'], - json_serialized_payload + json_serialized_payload, ) return db_record class DatabaseRecordReader(object): _ORDERING = 'ORDER BY timestamp' - _GET_LAST_ID_RECORDS = """ + _GET_LAST_ID_RECORDS = ( + """ SELECT * FROM records WHERE id = (SELECT id FROM records WHERE timestamp = - (SELECT max(timestamp) FROM records)) %s;""" % _ORDERING + (SELECT max(timestamp) FROM records)) %s;""" + % _ORDERING + ) _GET_RECORDS_BY_ID = 'SELECT * from records where id = ? %s' % _ORDERING _GET_ALL_RECORDS = ( 'SELECT a.id AS id_a, ' @@ -220,7 +222,8 @@ def iter_all_records(self): class RecordBuilder(object): _REQUEST_LIFECYCLE_EVENTS = set( - ['API_CALL', 'HTTP_REQUEST', 'HTTP_RESPONSE', 'PARSED_RESPONSE']) + ['API_CALL', 'HTTP_REQUEST', 'HTTP_RESPONSE', 'PARSED_RESPONSE'] + ) _START_OF_REQUEST_LIFECYCLE_EVENT = 'API_CALL' def __init__(self): @@ -254,7 +257,7 @@ def build_record(self, event_type, payload, source): 'event_type': event_type, 'payload': payload, 'source': source, - 'timestamp': int(time.time() * 1000) + 'timestamp': int(time.time() * 1000), } request_id = self._get_request_id(event_type) if request_id: diff --git a/awscli/customizations/history/list.py b/awscli/customizations/history/list.py index 81ebbf208652..67601133b30d 100644 --- a/awscli/customizations/history/list.py +++ b/awscli/customizations/history/list.py @@ -10,11 +10,11 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import json import datetime +import json -from awscli.utils import OutputStreamFactory from awscli.customizations.history.commands import HistorySubcommand +from awscli.utils import OutputStreamFactory class ListCommand(HistorySubcommand): @@ -26,12 +26,7 @@ class ListCommand(HistorySubcommand): '``history show`` with the command_id to see more details about ' 'a particular entry.' ) - _COL_WIDTHS = { - 'id_a': 38, - 'timestamp': 24, - 'args': 50, - 'rc': 0 - } + _COL_WIDTHS = {'id_a': 38, 'timestamp': 24, 'args': 50, 'rc': 0} def _get_default_output_stream_factory(self): return OutputStreamFactory(self._session, default_less_flags='SR') @@ -45,7 +40,8 @@ def _run_main(self, parsed_args, parsed_globals): raise RuntimeError( 'No commands were found in your history. Make sure you have ' 'enabled history mode by adding "cli_history = enabled" ' - 'to the config file.') + 'to the config file.' + ) with self._output_stream_factory.get_output_stream() as stream: formatter = TextFormatter(self._COL_WIDTHS, stream) @@ -60,6 +56,7 @@ class RecordAdapter(object): If there are no records we can just exit early. """ + def __init__(self, records): self._records = records self._next = None @@ -88,27 +85,28 @@ def __init__(self, col_widths, output_stream): def _format_time(self, timestamp): command_time = datetime.datetime.fromtimestamp(timestamp / 1000) formatted = datetime.datetime.strftime( - command_time, '%Y-%m-%d %I:%M:%S %p') + command_time, '%Y-%m-%d %I:%M:%S %p' + ) return formatted def _format_args(self, args, arg_width): json_value = json.loads(args) formatted = ' '.join(json_value[:2]) if len(formatted) >= arg_width: - formatted = '%s...' % formatted[:arg_width-4] + formatted = '%s...' % formatted[: arg_width - 4] return formatted def _format_record(self, record): fmt_string = "{0:<%s}{1:<%s}{2:<%s}{3}\n" % ( self._col_widths['id_a'], self._col_widths['timestamp'], - self._col_widths['args'] + self._col_widths['args'], ) record_line = fmt_string.format( record['id_a'], self._format_time(record['timestamp']), self._format_args(record['args'], self._col_widths['args']), - record['rc'] + record['rc'], ) return record_line diff --git a/awscli/customizations/history/show.py b/awscli/customizations/history/show.py index 93e49aed568b..b9dda2577508 100644 --- a/awscli/customizations/history/show.py +++ b/awscli/customizations/history/show.py @@ -13,15 +13,15 @@ import datetime import json import sys -import xml.parsers.expat import xml.dom.minidom +import xml.parsers.expat import colorama -from awscli.table import COLORAMA_KWARGS +from awscli.customizations.exceptions import ParamValidationError from awscli.customizations.history.commands import HistorySubcommand from awscli.customizations.history.filters import RegexFilter -from awscli.customizations.exceptions import ParamValidationError +from awscli.table import COLORAMA_KWARGS class Formatter(object): @@ -46,7 +46,8 @@ def __init__(self, output=None, include=None, exclude=None): self._output = sys.stdout if include and exclude: raise ParamValidationError( - 'Either input or exclude can be provided but not both') + 'Either input or exclude can be provided but not both' + ) self._include = include self._exclude = exclude @@ -80,97 +81,73 @@ class DetailedFormatter(Formatter): _SECTIONS = { 'CLI_VERSION': { 'title': 'AWS CLI command entered', - 'values': [ - {'description': 'with AWS CLI version'} - ] - }, - 'CLI_ARGUMENTS': { - 'values': [ - {'description': 'with arguments'} - ] + 'values': [{'description': 'with AWS CLI version'}], }, + 'CLI_ARGUMENTS': {'values': [{'description': 'with arguments'}]}, 'API_CALL': { 'title': 'API call made', 'values': [ - { - 'description': 'to service', - 'payload_key': 'service' - }, - { - 'description': 'using operation', - 'payload_key': 'operation' - }, + {'description': 'to service', 'payload_key': 'service'}, + {'description': 'using operation', 'payload_key': 'operation'}, { 'description': 'with parameters', 'payload_key': 'params', - 'value_format': 'dictionary' - } - ] + 'value_format': 'dictionary', + }, + ], }, 'HTTP_REQUEST': { 'title': 'HTTP request sent', 'values': [ - { - 'description': 'to URL', - 'payload_key': 'url' - }, - { - 'description': 'with method', - 'payload_key': 'method' - }, + {'description': 'to URL', 'payload_key': 'url'}, + {'description': 'with method', 'payload_key': 'method'}, { 'description': 'with headers', 'payload_key': 'headers', 'value_format': 'dictionary', - 'filters': [_SIG_FILTER] + 'filters': [_SIG_FILTER], }, { 'description': 'with body', 'payload_key': 'body', - 'value_format': 'http_body' - } - - ] + 'value_format': 'http_body', + }, + ], }, 'HTTP_RESPONSE': { 'title': 'HTTP response received', 'values': [ { 'description': 'with status code', - 'payload_key': 'status_code' + 'payload_key': 'status_code', }, { 'description': 'with headers', 'payload_key': 'headers', - 'value_format': 'dictionary' + 'value_format': 'dictionary', }, { 'description': 'with body', 'payload_key': 'body', - 'value_format': 'http_body' - } - ] + 'value_format': 'http_body', + }, + ], }, 'PARSED_RESPONSE': { 'title': 'HTTP response parsed', 'values': [ - { - 'description': 'parsed to', - 'value_format': 'dictionary' - } - ] + {'description': 'parsed to', 'value_format': 'dictionary'} + ], }, 'CLI_RC': { 'title': 'AWS CLI command exited', - 'values': [ - {'description': 'with return code'} - ] + 'values': [{'description': 'with return code'}], }, } _COMPONENT_COLORS = { 'title': colorama.Style.BRIGHT, - 'description': colorama.Fore.CYAN + 'description': colorama.Fore.CYAN, } def __init__(self, output=None, include=None, exclude=None, colorize=True): @@ -225,7 +202,8 @@ def _format_section_title(self, title, event_record): formatted_timestamp = self._format_description('at time') formatted_timestamp += self._format_value( - event_record['timestamp'], event_record, value_format='timestamp') + event_record['timestamp'], event_record, value_format='timestamp' + ) return '\n' + formatted_title + formatted_timestamp @@ -233,19 +211,20 @@ def _get_api_num(self, event_record): request_id = event_record['request_id'] if request_id: if request_id not in self._request_id_to_api_num: - self._request_id_to_api_num[ - request_id] = self._num_api_calls + self._request_id_to_api_num[request_id] = self._num_api_calls self._num_api_calls += 1 return self._request_id_to_api_num[request_id] def _format_description(self, value_description): return self._color_if_configured( - value_description + ': ', 'description') + value_description + ': ', 'description' + ) def _format_value(self, value, event_record, value_format=None): if value_format: formatted_value = self._value_pformatter.pformat( - value, value_format, event_record) + value, value_format, event_record + ) else: formatted_value = str(value) return formatted_value + '\n' @@ -263,7 +242,8 @@ def pformat(self, value, value_format, event_record): def _pformat_timestamp(self, event_timestamp, event_record=None): return datetime.datetime.fromtimestamp( - event_timestamp/1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + event_timestamp / 1000.0 + ).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] def _pformat_dictionary(self, obj, event_record=None): return json.dumps(obj=obj, sort_keys=True, indent=4) @@ -295,7 +275,7 @@ def _get_pretty_xml(self, body): # is called. stripped_body = self._strip_whitespace(body) xml_dom = xml.dom.minidom.parseString(stripped_body) - return xml_dom.toprettyxml(indent=' '*4, newl='\n') + return xml_dom.toprettyxml(indent=' ' * 4, newl='\n') def _get_pretty_json(self, body): # The json body is loaded so it can be dumped in a format that @@ -312,9 +292,7 @@ def _is_xml(self, body): def _strip_whitespace(self, xml_string): xml_dom = xml.dom.minidom.parseString(xml_string) - return ''.join( - [line.strip() for line in xml_dom.toxml().splitlines()] - ) + return ''.join([line.strip() for line in xml_dom.toxml().splitlines()]) def _is_json_structure(self, body): if body.startswith('{'): @@ -333,43 +311,57 @@ class ShowCommand(HistorySubcommand): 'If this command is ran without any positional arguments, it will ' 'display the events for the last CLI command ran.' ) - FORMATTERS = { - 'detailed': DetailedFormatter - } + FORMATTERS = {'detailed': DetailedFormatter} ARG_TABLE = [ - {'name': 'command_id', 'nargs': '?', 'default': 'latest', - 'positional_arg': True, - 'help_text': ( - 'The ID of the CLI command to show. If this positional argument ' - 'is omitted, it will show the last the CLI command ran.')}, - {'name': 'include', 'nargs': '+', - 'help_text': ( - 'Specifies which events to **only** include when showing the ' - 'CLI command. This argument is mutually exclusive with ' - '``--exclude``.')}, - {'name': 'exclude', 'nargs': '+', - 'help_text': ( - 'Specifies which events to exclude when showing the ' - 'CLI command. This argument is mutually exclusive with ' - '``--include``.')}, - {'name': 'format', 'choices': FORMATTERS.keys(), - 'default': 'detailed', 'help_text': ( - 'Specifies which format to use in showing the events for ' - 'the specified CLI command. The following formats are ' - 'supported:\n\n' - '
      ' - '
    • detailed - This the default format. It prints out a ' - 'detailed overview of the CLI command ran. It displays all ' - 'of the key events in the command lifecycle where each ' - 'important event has a title and its important values ' - 'underneath. The events are ordered by timestamp and events of ' - 'the same API call are associated together with the ' - '[``api_id``] notation where events that share the same ' - '``api_id`` belong to the lifecycle of the same API call.' - '
    • ' - '
    ' - ) - } + { + 'name': 'command_id', + 'nargs': '?', + 'default': 'latest', + 'positional_arg': True, + 'help_text': ( + 'The ID of the CLI command to show. If this positional argument ' + 'is omitted, it will show the last the CLI command ran.' + ), + }, + { + 'name': 'include', + 'nargs': '+', + 'help_text': ( + 'Specifies which events to **only** include when showing the ' + 'CLI command. This argument is mutually exclusive with ' + '``--exclude``.' + ), + }, + { + 'name': 'exclude', + 'nargs': '+', + 'help_text': ( + 'Specifies which events to exclude when showing the ' + 'CLI command. This argument is mutually exclusive with ' + '``--include``.' + ), + }, + { + 'name': 'format', + 'choices': FORMATTERS.keys(), + 'default': 'detailed', + 'help_text': ( + 'Specifies which format to use in showing the events for ' + 'the specified CLI command. The following formats are ' + 'supported:\n\n' + '
      ' + '
    • detailed - This the default format. It prints out a ' + 'detailed overview of the CLI command ran. It displays all ' + 'of the key events in the command lifecycle where each ' + 'important event has a title and its important values ' + 'underneath. The events are ordered by timestamp and events of ' + 'the same API call are associated together with the ' + '[``api_id``] notation where events that share the same ' + '``api_id`` belong to the lifecycle of the same API call.' + '
    • ' + '
    ' + ), + }, ] def _run_main(self, parsed_args, parsed_globals): @@ -378,7 +370,8 @@ def _run_main(self, parsed_args, parsed_globals): self._validate_args(parsed_args) with self._output_stream_factory.get_output_stream() as stream: formatter = self._get_formatter( - parsed_args, parsed_globals, stream) + parsed_args, parsed_globals, stream + ) for record in self._get_record_iterator(parsed_args): formatter.display(record) finally: @@ -388,18 +381,20 @@ def _run_main(self, parsed_args, parsed_globals): def _validate_args(self, parsed_args): if parsed_args.exclude and parsed_args.include: raise ParamValidationError( - 'Either --exclude or --include can be provided but not both') + 'Either --exclude or --include can be provided but not both' + ) def _get_formatter(self, parsed_args, parsed_globals, output_stream): format_type = parsed_args.format formatter_kwargs = { 'include': parsed_args.include, 'exclude': parsed_args.exclude, - 'output': output_stream + 'output': output_stream, } if format_type == 'detailed': formatter_kwargs['colorize'] = self._should_use_color( - parsed_globals) + parsed_globals + ) return self.FORMATTERS[format_type](**formatter_kwargs) def _get_record_iterator(self, parsed_args): diff --git a/awscli/customizations/iamvirtmfa.py b/awscli/customizations/iamvirtmfa.py index c0ee3582d6b4..ce40c41c3003 100644 --- a/awscli/customizations/iamvirtmfa.py +++ b/awscli/customizations/iamvirtmfa.py @@ -22,22 +22,27 @@ to the specified file. It will also remove the two bootstrap data fields from the response. """ -import base64 -from awscli.customizations.arguments import StatefulArgument -from awscli.customizations.arguments import resolve_given_outfile_path -from awscli.customizations.arguments import is_parsed_result_successful +import base64 +from awscli.customizations.arguments import ( + StatefulArgument, + is_parsed_result_successful, + resolve_given_outfile_path, +) CHOICES = ('QRCodePNG', 'Base32StringSeed') -OUTPUT_HELP = ('The output path and file name where the bootstrap ' - 'information will be stored.') -BOOTSTRAP_HELP = ('Method to use to seed the virtual MFA. ' - 'Valid values are: %s | %s' % CHOICES) +OUTPUT_HELP = ( + 'The output path and file name where the bootstrap ' + 'information will be stored.' +) +BOOTSTRAP_HELP = ( + 'Method to use to seed the virtual MFA. ' + 'Valid values are: %s | %s' % CHOICES +) class FileArgument(StatefulArgument): - def add_to_params(self, parameters, value): # Validate the file here so we can raise an error prior # calling the service. @@ -46,19 +51,24 @@ def add_to_params(self, parameters, value): class IAMVMFAWrapper(object): - def __init__(self, event_handler): self._event_handler = event_handler self._outfile = FileArgument( - 'outfile', help_text=OUTPUT_HELP, required=True) + 'outfile', help_text=OUTPUT_HELP, required=True + ) self._method = StatefulArgument( - 'bootstrap-method', help_text=BOOTSTRAP_HELP, - choices=CHOICES, required=True) + 'bootstrap-method', + help_text=BOOTSTRAP_HELP, + choices=CHOICES, + required=True, + ) self._event_handler.register( 'building-argument-table.iam.create-virtual-mfa-device', - self._add_options) + self._add_options, + ) self._event_handler.register( - 'after-call.iam.CreateVirtualMFADevice', self._save_file) + 'after-call.iam.CreateVirtualMFADevice', self._save_file + ) def _add_options(self, argument_table, **kwargs): argument_table['outfile'] = self._outfile diff --git a/awscli/customizations/iot.py b/awscli/customizations/iot.py index 7703014335b1..f4e4b9770513 100644 --- a/awscli/customizations/iot.py +++ b/awscli/customizations/iot.py @@ -22,6 +22,7 @@ - ``--public-key-outfile``: keyPair.PublicKey - ``--private-key-outfile``: keyPair.PrivateKey """ + from awscli.customizations.arguments import QueryOutFileArgument @@ -34,19 +35,34 @@ def register_create_keys_and_cert_arguments(session, argument_table, **kwargs): """ after_event = 'after-call.iot.CreateKeysAndCertificate' argument_table['certificate-pem-outfile'] = QueryOutFileArgument( - session=session, name='certificate-pem-outfile', - query='certificatePem', after_call_event=after_event, perm=0o600) + session=session, + name='certificate-pem-outfile', + query='certificatePem', + after_call_event=after_event, + perm=0o600, + ) argument_table['public-key-outfile'] = QueryOutFileArgument( - session=session, name='public-key-outfile', query='keyPair.PublicKey', - after_call_event=after_event, perm=0o600) + session=session, + name='public-key-outfile', + query='keyPair.PublicKey', + after_call_event=after_event, + perm=0o600, + ) argument_table['private-key-outfile'] = QueryOutFileArgument( - session=session, name='private-key-outfile', - query='keyPair.PrivateKey', after_call_event=after_event, perm=0o600) + session=session, + name='private-key-outfile', + query='keyPair.PrivateKey', + after_call_event=after_event, + perm=0o600, + ) def register_create_keys_from_csr_arguments(session, argument_table, **kwargs): """Add certificate-pem-outfile to create-certificate-from-csr""" argument_table['certificate-pem-outfile'] = QueryOutFileArgument( - session=session, name='certificate-pem-outfile', + session=session, + name='certificate-pem-outfile', query='certificatePem', - after_call_event='after-call.iot.CreateCertificateFromCsr', perm=0o600) + after_call_event='after-call.iot.CreateCertificateFromCsr', + perm=0o600, + ) diff --git a/awscli/customizations/iot_data.py b/awscli/customizations/iot_data.py index 62c02ee126dd..2b29c94a9579 100644 --- a/awscli/customizations/iot_data.py +++ b/awscli/customizations/iot_data.py @@ -14,7 +14,8 @@ def register_custom_endpoint_note(event_emitter): event_emitter.register_last( - 'doc-description.iot-data', add_custom_endpoint_url_note) + 'doc-description.iot-data', add_custom_endpoint_url_note + ) def add_custom_endpoint_url_note(help_command, **kwargs): diff --git a/awscli/customizations/lightsail/__init__.py b/awscli/customizations/lightsail/__init__.py index aa9f33389398..19c1aaad5b81 100644 --- a/awscli/customizations/lightsail/__init__.py +++ b/awscli/customizations/lightsail/__init__.py @@ -11,8 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.lightsail.push_container_image \ - import PushContainerImage +from awscli.customizations.lightsail.push_container_image import ( + PushContainerImage, +) def initialize(cli): diff --git a/awscli/customizations/lightsail/push_container_image.py b/awscli/customizations/lightsail/push_container_image.py index 9a1640945b53..4c605489f8ce 100644 --- a/awscli/customizations/lightsail/push_container_image.py +++ b/awscli/customizations/lightsail/push_container_image.py @@ -14,6 +14,7 @@ import json import logging import subprocess + import awscli from awscli.compat import ignore_user_entered_signals from awscli.customizations.commands import BasicCommand @@ -24,11 +25,12 @@ ERROR_MESSAGE = ( 'The Lightsail Control (lightsailctl) plugin was not found. ', 'To download and install it, see ', - 'https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software' + 'https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software', ) INPUT_VERSION = '1' + class PushContainerImage(BasicCommand): NAME = 'push-container-image' @@ -38,18 +40,11 @@ class PushContainerImage(BasicCommand): { 'name': 'service-name', 'help_text': helptext.SERVICENAME, - 'required': True + 'required': True, }, - { - 'name': 'image', - 'help_text': helptext.IMAGE, - 'required': True - }, - { - 'name': 'label', - 'help_text': helptext.LABEL, - 'required': True - }] + {'name': 'image', 'help_text': helptext.IMAGE, 'required': True}, + {'name': 'label', 'help_text': helptext.LABEL, 'required': True}, + ] def _run_main(self, parsed_args, parsed_globals): payload = self._get_input_request(parsed_args, parsed_globals) @@ -65,24 +60,24 @@ def _run_main(self, parsed_args, parsed_globals): subprocess.run( ['lightsailctl', '--plugin', '--input-stdin'], input=json.dumps(payload).encode('utf-8'), - check=True) + check=True, + ) return 0 except OSError as ex: if ex.errno == errno.ENOENT: - logger.debug('lightsailctl not found', - exc_info=True) + logger.debug('lightsailctl not found', exc_info=True) raise ValueError(''.join(ERROR_MESSAGE)) def _get_input_request(self, parsed_args, parsed_globals): input_request = { - 'inputVersion' : INPUT_VERSION, + 'inputVersion': INPUT_VERSION, 'operation': 'PushContainerImage', } payload = dict( service=parsed_args.service_name, image=parsed_args.image, - label=parsed_args.label + label=parsed_args.label, ) configuration = {} @@ -105,22 +100,25 @@ def _get_input_request(self, parsed_args, parsed_globals): if parsed_globals.profile: configuration['profile'] = parsed_globals.profile elif self._session.get_config_variable('profile'): - configuration['profile'] = \ - self._session.get_config_variable('profile') + configuration['profile'] = self._session.get_config_variable( + 'profile' + ) if parsed_globals.region: configuration['region'] = parsed_globals.region elif self._session.get_config_variable('region'): - configuration['region'] = \ - self._session.get_config_variable('region') + configuration['region'] = self._session.get_config_variable( + 'region' + ) configuration['doNotSignRequest'] = not parsed_globals.sign_request if parsed_globals.ca_bundle: configuration['caBundle'] = parsed_globals.ca_bundle elif self._session.get_config_variable('ca_bundle'): - configuration['caBundle'] = \ - self._session.get_config_variable('ca_bundle') + configuration['caBundle'] = self._session.get_config_variable( + 'ca_bundle' + ) if parsed_globals.read_timeout is not None: configuration['readTimeout'] = parsed_globals.read_timeout diff --git a/awscli/customizations/logs/__init__.py b/awscli/customizations/logs/__init__.py index c59af9c8492c..6cdb6292c1c5 100644 --- a/awscli/customizations/logs/__init__.py +++ b/awscli/customizations/logs/__init__.py @@ -10,13 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.logs.tail import TailCommand from awscli.customizations.logs.startlivetail import StartLiveTailCommand +from awscli.customizations.logs.tail import TailCommand def register_logs_commands(event_emitter): event_emitter.register('building-command-table.logs', inject_tail_command) - event_emitter.register('building-command-table.logs', inject_start_live_tail_command) + event_emitter.register( + 'building-command-table.logs', inject_start_live_tail_command + ) def inject_tail_command(command_table, session, **kwargs): diff --git a/awscli/customizations/logs/startlivetail.py b/awscli/customizations/logs/startlivetail.py index 907e752996d1..eb79f87cf188 100644 --- a/awscli/customizations/logs/startlivetail.py +++ b/awscli/customizations/logs/startlivetail.py @@ -10,30 +10,30 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from enum import Enum -from functools import partial -from threading import Thread import asyncio -import colorama import contextlib import json import re import signal import sys import time +from enum import Enum +from functools import partial +from threading import Thread +import colorama from prompt_toolkit.application import Application, get_app from prompt_toolkit.buffer import Buffer from prompt_toolkit.filters import Condition from prompt_toolkit.formatted_text import ( ANSI, - to_formatted_text, fragment_list_to_text, + to_formatted_text, ) from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent from prompt_toolkit.layout import Layout, Window, WindowAlign -from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl from prompt_toolkit.layout.containers import HSplit, VSplit +from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl from prompt_toolkit.layout.dimension import Dimension from prompt_toolkit.layout.processors import Processor, Transformation @@ -42,7 +42,6 @@ from awscli.customizations.exceptions import ParamValidationError from awscli.utils import is_a_tty - DESCRIPTION = ( "Starts a Live Tail streaming session for one or more log groups. " "A Live Tail session provides a near real-time streaming of log events " @@ -824,7 +823,9 @@ def exit(self): self._application.exit() async def _run_ui(self): - self._application.create_background_task(self._log_events_printer.run()) + self._application.create_background_task( + self._log_events_printer.run() + ) self._application.create_background_task(self._render_metadata()) self._application.create_background_task(self._trim_buffers()) diff --git a/awscli/customizations/logs/tail.py b/awscli/customizations/logs/tail.py index cb3151003270..de806db4bd88 100644 --- a/awscli/customizations/logs/tail.py +++ b/awscli/customizations/logs/tail.py @@ -10,23 +10,22 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from collections import defaultdict -from datetime import datetime, timedelta import json import re import time +from collections import defaultdict +from datetime import datetime, timedelta -from botocore.utils import parse_timestamp, datetime2timestamp -from dateutil import tz import colorama +from dateutil import tz from awscli.compat import get_stdout_text_writer -from awscli.utils import is_a_tty from awscli.customizations.commands import BasicCommand +from awscli.utils import is_a_tty +from botocore.utils import datetime2timestamp, parse_timestamp class BaseLogEventsFormatter(object): - _TIMESTAMP_COLOR = colorama.Fore.GREEN _STREAM_NAME_COLOR = colorama.Fore.CYAN @@ -56,13 +55,14 @@ class ShortLogEventsFormatter(BaseLogEventsFormatter): def display_log_event(self, log_event): log_event = '%s %s' % ( self._format_timestamp(log_event['timestamp']), - log_event['message'] + log_event['message'], ) self._write_log_event(log_event) def _format_timestamp(self, timestamp): return self._color_if_configured( - timestamp.strftime("%Y-%m-%dT%H:%M:%S"), self._TIMESTAMP_COLOR) + timestamp.strftime("%Y-%m-%dT%H:%M:%S"), self._TIMESTAMP_COLOR + ) class DetailedLogEventsFormatter(BaseLogEventsFormatter): @@ -70,15 +70,15 @@ def display_log_event(self, log_event): log_event = '%s %s %s' % ( self._format_timestamp(log_event['timestamp']), self._color_if_configured( - log_event['logStreamName'], self._STREAM_NAME_COLOR), - log_event['message'] + log_event['logStreamName'], self._STREAM_NAME_COLOR + ), + log_event['message'], ) self._write_log_event(log_event) def _format_timestamp(self, timestamp): return self._color_if_configured( - timestamp.isoformat(timespec='microseconds'), - self._TIMESTAMP_COLOR + timestamp.isoformat(timespec='microseconds'), self._TIMESTAMP_COLOR ) @@ -87,8 +87,9 @@ def display_log_event(self, log_event): log_event = '%s %s %s' % ( self._format_timestamp(log_event['timestamp']), self._color_if_configured( - log_event['logStreamName'], self._STREAM_NAME_COLOR), - self._format_pretty_json(log_event['message']) + log_event['logStreamName'], self._STREAM_NAME_COLOR + ), + self._format_pretty_json(log_event['message']), ) self._write_log_event(log_event) @@ -102,7 +103,8 @@ def _format_pretty_json(self, log_message): def _format_timestamp(self, timestamp): return self._color_if_configured( - timestamp.isoformat(), self._TIMESTAMP_COLOR) + timestamp.isoformat(), self._TIMESTAMP_COLOR + ) class TailCommand(BasicCommand): @@ -139,7 +141,7 @@ class TailCommand(BasicCommand): 'display logs starting five minutes in the past. ' 'Note that multiple units are **not** supported ' '(i.e. ``5h30m``)' - ) + ), }, { 'name': 'follow', @@ -149,7 +151,7 @@ class TailCommand(BasicCommand): 'Whether to continuously poll for new logs. By default, the ' 'command will exit once there are no more logs to display. ' 'To exit from this mode, use Control-C.' - ) + ), }, { 'name': 'format', @@ -169,7 +171,7 @@ class TailCommand(BasicCommand): '
  • json - Pretty print any messages that are entirely JSON.' '
  • ' '
' - ) + ), }, { 'name': 'filter-pattern', @@ -179,7 +181,7 @@ class TailCommand(BasicCommand): 'latest/logs/FilterAndPatternSyntax.html">Filter and ' 'Pattern Syntax for details. If not provided, all ' 'the events are matched' - ) + ), }, { 'name': 'log-stream-names', @@ -188,7 +190,7 @@ class TailCommand(BasicCommand): 'The list of stream names to filter logs by. This parameter ' 'cannot be specified when ``--log-stream-name-prefix`` is ' 'also specified.' - ) + ), }, { 'name': 'log-stream-name-prefix', @@ -197,10 +199,8 @@ class TailCommand(BasicCommand): 'with names beginning with this prefix will be returned. This ' 'parameter cannot be specified when ``log-stream-names`` is ' 'also specified.' - ) + ), }, - - ] _FORMAT_TO_FORMATTER_CLS = { 'detailed': DetailedLogEventsFormatter, @@ -210,17 +210,21 @@ class TailCommand(BasicCommand): def _run_main(self, parsed_args, parsed_globals): logs_client = self._session.create_client( - 'logs', region_name=parsed_globals.region, + 'logs', + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) logs_generator = self._get_log_events_generator( - logs_client, parsed_args.follow) + logs_client, parsed_args.follow + ) log_events = logs_generator.iter_log_events( - parsed_args.group_name, start=parsed_args.since, + parsed_args.group_name, + start=parsed_args.since, filter_pattern=parsed_args.filter_pattern, log_stream_names=parsed_args.log_stream_names, - log_stream_name_prefix=parsed_args.log_stream_name_prefix) + log_stream_name_prefix=parsed_args.log_stream_name_prefix, + ) self._output_log_events(parsed_args, parsed_globals, log_events) return 0 @@ -234,7 +238,8 @@ def _get_log_events_generator(self, logs_client, follow): def _output_log_events(self, parsed_args, parsed_globals, log_events): output = get_stdout_text_writer() logs_formatter = self._FORMAT_TO_FORMATTER_CLS[parsed_args.format]( - output, colorize=self._should_use_color(parsed_globals)) + output, colorize=self._should_use_color(parsed_globals) + ) for event in log_events: logs_formatter.display_log_event(event) @@ -286,11 +291,21 @@ def __init__(self, client, timestamp_utils): self._client = client self._timestamp_utils = timestamp_utils - def iter_log_events(self, group_name, start=None, filter_pattern=None, - log_stream_names=None, log_stream_name_prefix=None): + def iter_log_events( + self, + group_name, + start=None, + filter_pattern=None, + log_stream_names=None, + log_stream_name_prefix=None, + ): filter_logs_events_kwargs = self._get_filter_logs_events_kwargs( - group_name, start, filter_pattern, log_stream_names, - log_stream_name_prefix) + group_name, + start, + filter_pattern, + log_stream_names, + log_stream_name_prefix, + ) log_events = self._filter_log_events(filter_logs_events_kwargs) for log_event in log_events: self._convert_event_timestamps(log_event) @@ -299,14 +314,15 @@ def iter_log_events(self, group_name, start=None, filter_pattern=None, def _filter_log_events(self, filter_logs_events_kwargs): raise NotImplementedError('_filter_log_events()') - def _get_filter_logs_events_kwargs(self, group_name, start, - filter_pattern, - log_stream_names, - log_stream_name_prefix): - kwargs = { - 'logGroupName': group_name, - 'interleaved': True - } + def _get_filter_logs_events_kwargs( + self, + group_name, + start, + filter_pattern, + log_stream_names, + log_stream_name_prefix, + ): + kwargs = {'logGroupName': group_name, 'interleaved': True} if start is not None: kwargs['startTime'] = self._timestamp_utils.to_epoch_millis(start) if filter_pattern is not None: @@ -319,9 +335,11 @@ def _get_filter_logs_events_kwargs(self, group_name, start, def _convert_event_timestamps(self, event): event['ingestionTime'] = self._timestamp_utils.to_datetime( - event['ingestionTime']) + event['ingestionTime'] + ) event['timestamp'] = self._timestamp_utils.to_datetime( - event['timestamp']) + event['timestamp'] + ) class NoFollowLogEventsGenerator(BaseLogEventsGenerator): @@ -356,32 +374,39 @@ def _get_latest_events_and_timestamp(self, event_ids_per_timestamp): # Keep only ids of the events with the newest timestamp newest_timestamp = max(event_ids_per_timestamp.keys()) event_ids_per_timestamp = defaultdict( - set, {newest_timestamp: event_ids_per_timestamp[newest_timestamp]} + set, + {newest_timestamp: event_ids_per_timestamp[newest_timestamp]}, ) return event_ids_per_timestamp - def _reset_filter_log_events_params(self, fle_kwargs, event_ids_per_timestamp): + def _reset_filter_log_events_params( + self, fle_kwargs, event_ids_per_timestamp + ): # Remove nextToken and update startTime for the next request # with the timestamp of the newest event if event_ids_per_timestamp: - fle_kwargs['startTime'] = max( - event_ids_per_timestamp.keys() - ) + fle_kwargs['startTime'] = max(event_ids_per_timestamp.keys()) fle_kwargs.pop('nextToken', None) def _do_filter_log_events(self, filter_logs_events_kwargs): event_ids_per_timestamp = defaultdict(set) while True: response = self._client.filter_log_events( - **filter_logs_events_kwargs) + **filter_logs_events_kwargs + ) for event in response['events']: # For the case where we've hit the last page, we will be # reusing the newest timestamp of the received events to keep polling. # This means it is possible that duplicate log events with same timestamp # are returned back which we do not want to yield again. # We only want to yield log events that we have not seen. - if event['eventId'] not in event_ids_per_timestamp[event['timestamp']]: - event_ids_per_timestamp[event['timestamp']].add(event['eventId']) + if ( + event['eventId'] + not in event_ids_per_timestamp[event['timestamp']] + ): + event_ids_per_timestamp[event['timestamp']].add( + event['eventId'] + ) yield event event_ids_per_timestamp = self._get_latest_events_and_timestamp( event_ids_per_timestamp @@ -390,7 +415,6 @@ def _do_filter_log_events(self, filter_logs_events_kwargs): filter_logs_events_kwargs['nextToken'] = response['nextToken'] else: self._reset_filter_log_events_params( - filter_logs_events_kwargs, - event_ids_per_timestamp + filter_logs_events_kwargs, event_ids_per_timestamp ) self._sleep(self._TIME_TO_SLEEP) diff --git a/awscli/customizations/opsworks.py b/awscli/customizations/opsworks.py index e91d47896fd9..1b02f3120094 100644 --- a/awscli/customizations/opsworks.py +++ b/awscli/customizations/opsworks.py @@ -22,13 +22,11 @@ import tempfile import textwrap -from botocore.exceptions import ClientError - -from awscli.compat import urlopen, ensure_text_type +from awscli.compat import ensure_text_type, urlopen from awscli.customizations.commands import BasicCommand -from awscli.customizations.utils import create_client_from_parsed_globals from awscli.customizations.exceptions import ParamValidationError - +from awscli.customizations.utils import create_client_from_parsed_globals +from botocore.exceptions import ClientError LOG = logging.getLogger(__name__) @@ -41,8 +39,9 @@ INSTANCE_ID_RE = re.compile(r"^i-[0-9a-f]+$") IP_ADDRESS_RE = re.compile(r"^\d+\.\d+\.\d+\.\d+$") -IDENTITY_URL = \ +IDENTITY_URL = ( "http://169.254.169.254/latest/dynamic/instance-identity/document" +) REMOTE_SCRIPT = """ set -e @@ -78,49 +77,83 @@ class OpsWorksRegister(BasicCommand): """).strip() ARG_TABLE = [ - {'name': 'stack-id', 'required': True, - 'help_text': """A stack ID. The instance will be registered with the - given stack."""}, - {'name': 'infrastructure-class', 'required': True, - 'choices': ['ec2', 'on-premises'], - 'help_text': """Specifies whether to register an EC2 instance (`ec2`) - or an on-premises instance (`on-premises`)."""}, - {'name': 'override-hostname', 'dest': 'hostname', - 'help_text': """The instance hostname. If not provided, the current - hostname of the machine will be used."""}, - {'name': 'override-private-ip', 'dest': 'private_ip', - 'help_text': """An IP address. If you set this parameter, the given IP + { + 'name': 'stack-id', + 'required': True, + 'help_text': """A stack ID. The instance will be registered with the + given stack.""", + }, + { + 'name': 'infrastructure-class', + 'required': True, + 'choices': ['ec2', 'on-premises'], + 'help_text': """Specifies whether to register an EC2 instance (`ec2`) + or an on-premises instance (`on-premises`).""", + }, + { + 'name': 'override-hostname', + 'dest': 'hostname', + 'help_text': """The instance hostname. If not provided, the current + hostname of the machine will be used.""", + }, + { + 'name': 'override-private-ip', + 'dest': 'private_ip', + 'help_text': """An IP address. If you set this parameter, the given IP address will be used as the private IP address within OpsWorks. Otherwise the private IP address will be determined automatically. Not to be used with EC2 - instances."""}, - {'name': 'override-public-ip', 'dest': 'public_ip', - 'help_text': """An IP address. If you set this parameter, the given IP + instances.""", + }, + { + 'name': 'override-public-ip', + 'dest': 'public_ip', + 'help_text': """An IP address. If you set this parameter, the given IP address will be used as the public IP address within OpsWorks. Otherwise the public IP address will be determined automatically. Not to be used with EC2 - instances."""}, - {'name': 'override-ssh', 'dest': 'ssh', - 'help_text': """If you set this parameter, the given command will be - used to connect to the machine."""}, - {'name': 'ssh-username', 'dest': 'username', - 'help_text': """If provided, this username will be used to connect to - the host."""}, - {'name': 'ssh-private-key', 'dest': 'private_key', - 'help_text': """If provided, the given private key file will be used - to connect to the machine."""}, - {'name': 'local', 'action': 'store_true', - 'help_text': """If given, instead of a remote machine, the local + instances.""", + }, + { + 'name': 'override-ssh', + 'dest': 'ssh', + 'help_text': """If you set this parameter, the given command will be + used to connect to the machine.""", + }, + { + 'name': 'ssh-username', + 'dest': 'username', + 'help_text': """If provided, this username will be used to connect to + the host.""", + }, + { + 'name': 'ssh-private-key', + 'dest': 'private_key', + 'help_text': """If provided, the given private key file will be used + to connect to the machine.""", + }, + { + 'name': 'local', + 'action': 'store_true', + 'help_text': """If given, instead of a remote machine, the local machine will be imported. Cannot be used together - with `target`."""}, - {'name': 'use-instance-profile', 'action': 'store_true', - 'help_text': """Use the instance profile instead of creating an IAM - user."""}, - {'name': 'target', 'positional_arg': True, 'nargs': '?', - 'synopsis': '[]', - 'help_text': """Either the EC2 instance ID or the hostname of the + with `target`.""", + }, + { + 'name': 'use-instance-profile', + 'action': 'store_true', + 'help_text': """Use the instance profile instead of creating an IAM + user.""", + }, + { + 'name': 'target', + 'positional_arg': True, + 'nargs': '?', + 'synopsis': '[]', + 'help_text': """Either the EC2 instance ID or the hostname of the instance or machine to be registered with OpsWorks. - Cannot be used together with `--local`."""}, + Cannot be used together with `--local`.""", + }, ] def __init__(self, session): @@ -136,7 +169,8 @@ def __init__(self, session): def _create_clients(self, args, parsed_globals): self.iam = self._session.create_client('iam') self.opsworks = create_client_from_parsed_globals( - self._session, 'opsworks', parsed_globals) + self._session, 'opsworks', parsed_globals + ) def _run_main(self, args, parsed_globals): self._create_clients(args, parsed_globals) @@ -157,36 +191,45 @@ def prevalidate_arguments(self, args): raise ParamValidationError("One of target or --local is required.") elif args.target and args.local: raise ParamValidationError( - "Arguments target and --local are mutually exclusive.") + "Arguments target and --local are mutually exclusive." + ) if args.local and platform.system() != 'Linux': raise ParamValidationError( - "Non-Linux instances are not supported by AWS OpsWorks.") + "Non-Linux instances are not supported by AWS OpsWorks." + ) if args.ssh and (args.username or args.private_key): raise ParamValidationError( "Argument --override-ssh cannot be used together with " - "--ssh-username or --ssh-private-key.") + "--ssh-username or --ssh-private-key." + ) if args.infrastructure_class == 'ec2': if args.private_ip: raise ParamValidationError( - "--override-private-ip is not supported for EC2.") + "--override-private-ip is not supported for EC2." + ) if args.public_ip: raise ParamValidationError( - "--override-public-ip is not supported for EC2.") + "--override-public-ip is not supported for EC2." + ) - if args.infrastructure_class == 'on-premises' and \ - args.use_instance_profile: + if ( + args.infrastructure_class == 'on-premises' + and args.use_instance_profile + ): raise ParamValidationError( - "--use-instance-profile is only supported for EC2.") + "--use-instance-profile is only supported for EC2." + ) if args.hostname: if not HOSTNAME_RE.match(args.hostname): raise ParamValidationError( "Invalid hostname: '%s'. Hostnames must consist of " "letters, digits and dashes only and must not start or " - "end with a dash." % args.hostname) + "end with a dash." % args.hostname + ) def retrieve_stack(self, args): """ @@ -197,18 +240,20 @@ def retrieve_stack(self, args): """ LOG.debug("Retrieving stack and provisioning parameters") - self._stack = self.opsworks.describe_stacks( - StackIds=[args.stack_id] - )['Stacks'][0] - self._prov_params = \ + self._stack = self.opsworks.describe_stacks(StackIds=[args.stack_id])[ + 'Stacks' + ][0] + self._prov_params = ( self.opsworks.describe_stack_provisioning_parameters( StackId=self._stack['StackId'] ) + ) if args.infrastructure_class == 'ec2' and not args.local: LOG.debug("Retrieving EC2 instance information") ec2 = self._session.create_client( - 'ec2', region_name=self._stack['Region']) + 'ec2', region_name=self._stack['Region'] + ) # `desc_args` are arguments for the describe_instances call, # whereas `conditions` is a list of lambdas for further filtering @@ -234,9 +279,10 @@ def retrieve_stack(self, args): # Cannot search for either private or public IP at the same # time, thus filter afterwards conditions.append( - lambda instance: - instance.get('PrivateIpAddress') == args.target or - instance.get('PublicIpAddress') == args.target) + lambda instance: instance.get('PrivateIpAddress') + == args.target + or instance.get('PublicIpAddress') == args.target + ) # also use the given address to connect self._use_address = args.target else: @@ -255,12 +301,16 @@ def retrieve_stack(self, args): if not instances: raise ValueError( - "Did not find any instance matching %s." % args.target) + "Did not find any instance matching %s." % args.target + ) elif len(instances) > 1: raise ValueError( - "Found multiple instances matching %s: %s." % ( + "Found multiple instances matching %s: %s." + % ( args.target, - ", ".join(i['InstanceId'] for i in instances))) + ", ".join(i['InstanceId'] for i in instances), + ) + ) self._ec2_instance = instances[0] @@ -273,19 +323,24 @@ def validate_arguments(self, args): instances = self.opsworks.describe_instances( StackId=self._stack['StackId'] )['Instances'] - if any(args.hostname.lower() == instance['Hostname'] - for instance in instances): + if any( + args.hostname.lower() == instance['Hostname'] + for instance in instances + ): raise ValueError( "Invalid hostname: '%s'. Hostnames must be unique within " - "a stack." % args.hostname) + "a stack." % args.hostname + ) if args.infrastructure_class == 'ec2' and args.local: # make sure the regions match region = json.loads( - ensure_text_type(urlopen(IDENTITY_URL).read()))['region'] + ensure_text_type(urlopen(IDENTITY_URL).read()) + )['region'] if region != self._stack['Region']: raise ValueError( - "The stack's and the instance's region must match.") + "The stack's and the instance's region must match." + ) def determine_details(self, args): """ @@ -306,12 +361,14 @@ def determine_details(self, args): elif 'PrivateIpAddress' in self._ec2_instance: LOG.warning( "Instance does not have a public IP address. Trying " - "to use the private address to connect.") + "to use the private address to connect." + ) self._use_address = self._ec2_instance['PrivateIpAddress'] else: # Should never happen raise ValueError( - "The instance does not seem to have an IP address.") + "The instance does not seem to have an IP address." + ) elif args.infrastructure_class == 'on-premises': self._use_address = args.target @@ -344,7 +401,10 @@ def create_iam_entities(self, args): self.iam.create_group(GroupName=group_name, Path=IAM_PATH) LOG.debug("Created IAM group %s", group_name) except ClientError as e: - if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists': + if ( + e.response.get('Error', {}).get('Code') + == 'EntityAlreadyExists' + ): LOG.debug("IAM group %s exists, continuing", group_name) # group already exists, good pass @@ -355,17 +415,20 @@ def create_iam_entities(self, args): LOG.debug("Creating an IAM user") base_username = "OpsWorks-%s-%s" % ( shorten_name(clean_for_iam(self._stack['Name']), 25), - shorten_name(clean_for_iam(self._name_for_iam), 25) + shorten_name(clean_for_iam(self._name_for_iam), 25), ) for try_ in range(20): username = base_username + ("+%s" % try_ if try_ else "") try: self.iam.create_user(UserName=username, Path=IAM_PATH) except ClientError as e: - if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists': + if ( + e.response.get('Error', {}).get('Code') + == 'EntityAlreadyExists' + ): LOG.debug( "IAM user %s already exists, trying another name", - username + username, ) # user already exists, try the next one pass @@ -382,8 +445,7 @@ def create_iam_entities(self, args): try: self.iam.attach_user_policy( - PolicyArn=IAM_POLICY_ARN, - UserName=username + PolicyArn=IAM_POLICY_ARN, UserName=username ) except ClientError as e: if e.response.get('Error', {}).get('Code') == 'AccessDenied': @@ -391,32 +453,29 @@ def create_iam_entities(self, args): "Unauthorized to attach policy %s to user %s. Trying " "to put user policy", IAM_POLICY_ARN, - username + username, ) self.iam.put_user_policy( PolicyName=IAM_USER_POLICY_NAME, PolicyDocument=self._iam_policy_document( - self._stack['Arn'], IAM_USER_POLICY_TIMEOUT), - UserName=username + self._stack['Arn'], IAM_USER_POLICY_TIMEOUT + ), + UserName=username, ) LOG.debug( - "Put policy %s to user %s", - IAM_USER_POLICY_NAME, - username + "Put policy %s to user %s", IAM_USER_POLICY_NAME, username ) else: raise else: LOG.debug( - "Attached policy %s to user %s", - IAM_POLICY_ARN, - username + "Attached policy %s to user %s", IAM_POLICY_ARN, username ) LOG.debug("Creating an access key") - self.access_key = self.iam.create_access_key( - UserName=username - )['AccessKey'] + self.access_key = self.iam.create_access_key(UserName=username)[ + 'AccessKey' + ] def setup_target_machine(self, args): """ @@ -425,12 +484,11 @@ def setup_target_machine(self, args): """ remote_script = REMOTE_SCRIPT % { - 'agent_installer_url': - self._prov_params['AgentInstallerUrl'], - 'preconfig': - self._to_ruby_yaml(self._pre_config_document(args)), - 'assets_download_bucket': - self._prov_params['Parameters']['assets_download_bucket'] + 'agent_installer_url': self._prov_params['AgentInstallerUrl'], + 'preconfig': self._to_ruby_yaml(self._pre_config_document(args)), + 'assets_download_bucket': self._prov_params['Parameters'][ + 'assets_download_bucket' + ], } if args.local: @@ -482,13 +540,13 @@ def ssh(self, args, remote_script): def _pre_config_document(self, args): parameters = dict( - stack_id=self._stack['StackId'], - **self._prov_params["Parameters"] + stack_id=self._stack['StackId'], **self._prov_params["Parameters"] ) if self.access_key: parameters['access_key_id'] = self.access_key['AccessKeyId'] - parameters['secret_access_key'] = \ - self.access_key['SecretAccessKey'] + parameters['secret_access_key'] = self.access_key[ + 'SecretAccessKey' + ] if self._use_hostname: parameters['hostname'] = self._use_hostname if args.private_ip: @@ -510,20 +568,20 @@ def _iam_policy_document(arn, timeout=None): valid_until = datetime.datetime.utcnow() + timeout statement["Condition"] = { "DateLessThan": { - "aws:CurrentTime": - valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") + "aws:CurrentTime": valid_until.strftime( + "%Y-%m-%dT%H:%M:%SZ" + ) } } - policy_document = { - "Statement": [statement], - "Version": "2012-10-17" - } + policy_document = {"Statement": [statement], "Version": "2012-10-17"} return json.dumps(policy_document) @staticmethod def _to_ruby_yaml(parameters): - return "\n".join(":%s: %s" % (k, json.dumps(v)) - for k, v in sorted(parameters.items())) + return "\n".join( + ":%s: %s" % (k, json.dumps(v)) + for k, v in sorted(parameters.items()) + ) def clean_for_iam(name): @@ -542,4 +600,4 @@ def shorten_name(name, max_length): if len(name) <= max_length: return name q, r = divmod(max_length - 3, 2) - return name[:q + r] + "..." + name[-q:] + return name[: q + r] + "..." + name[-q:] diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py index 77b0f3362ec2..074a59875beb 100644 --- a/awscli/customizations/paginate.py +++ b/awscli/customizations/paginate.py @@ -23,17 +23,16 @@ * Add a ``--starting-token`` and a ``--max-items`` argument. """ + import logging import sys from functools import partial -from botocore import xform_name -from botocore.exceptions import DataNotFoundError -from botocore import model - from awscli.arguments import BaseCLIArgument from awscli.customizations.exceptions import ParamValidationError from awscli.customizations.utils import uni_print +from botocore import model, xform_name +from botocore.exceptions import DataNotFoundError logger = logging.getLogger(__name__) @@ -87,7 +86,8 @@ def get_paginator_config(session, service_name, operation_name): return None try: operation_paginator_config = paginator_model.get_paginator( - operation_name) + operation_name + ) except ValueError: return None return operation_paginator_config @@ -100,15 +100,19 @@ def add_paging_description(help_command, **kwargs): return service_name = help_command.obj.service_model.service_name paginator_config = get_paginator_config( - help_command.session, service_name, help_command.obj.name) + help_command.session, service_name, help_command.obj.name + ) if not paginator_config: return help_command.doc.style.new_paragraph() help_command.doc.writeln( - ('``%s`` is a paginated operation. Multiple API calls may be issued ' - 'in order to retrieve the entire data set of results. You can ' - 'disable pagination by providing the ``--no-paginate`` argument.') - % help_command.name) + ( + '``%s`` is a paginated operation. Multiple API calls may be issued ' + 'in order to retrieve the entire data set of results. You can ' + 'disable pagination by providing the ``--no-paginate`` argument.' + ) + % help_command.name + ) # Only include result key information if it is present. if paginator_config.get('result_key'): queries = paginator_config['result_key'] @@ -116,33 +120,48 @@ def add_paging_description(help_command, **kwargs): queries = [queries] queries = ", ".join([('``%s``' % s) for s in queries]) help_command.doc.writeln( - ('When using ``--output text`` and the ``--query`` argument on a ' - 'paginated response, the ``--query`` argument must extract data ' - 'from the results of the following query expressions: %s') - % queries) + ( + 'When using ``--output text`` and the ``--query`` argument on a ' + 'paginated response, the ``--query`` argument must extract data ' + 'from the results of the following query expressions: %s' + ) + % queries + ) -def unify_paging_params(argument_table, operation_model, event_name, - session, **kwargs): +def unify_paging_params( + argument_table, operation_model, event_name, session, **kwargs +): paginator_config = get_paginator_config( - session, operation_model.service_model.service_name, - operation_model.name) + session, + operation_model.service_model.service_name, + operation_model.name, + ) if paginator_config is None: # We only apply these customizations to paginated responses. return - logger.debug("Modifying paging parameters for operation: %s", - operation_model.name) + logger.debug( + "Modifying paging parameters for operation: %s", operation_model.name + ) _remove_existing_paging_arguments(argument_table, paginator_config) - parsed_args_event = event_name.replace('building-argument-table.', - 'operation-args-parsed.') - call_parameters_event = event_name.replace('building-argument-table', - 'calling-command') + parsed_args_event = event_name.replace( + 'building-argument-table.', 'operation-args-parsed.' + ) + call_parameters_event = event_name.replace( + 'building-argument-table', 'calling-command' + ) shadowed_args = {} - add_paging_argument(argument_table, 'starting-token', - PageArgument('starting-token', STARTING_TOKEN_HELP, - parse_type='string', - serialized_name='StartingToken'), - shadowed_args) + add_paging_argument( + argument_table, + 'starting-token', + PageArgument( + 'starting-token', + STARTING_TOKEN_HELP, + parse_type='string', + serialized_name='StartingToken', + ), + shadowed_args, + ) input_members = operation_model.input_shape.members type_name = 'integer' if 'limit_key' in paginator_config: @@ -150,21 +169,38 @@ def unify_paging_params(argument_table, operation_model, event_name, type_name = limit_key_shape.type_name if type_name not in PageArgument.type_map: raise TypeError( - ('Unsupported pagination type {0} for operation {1}' - ' and parameter {2}').format( - type_name, operation_model.name, - paginator_config['limit_key'])) - add_paging_argument(argument_table, 'page-size', - PageArgument('page-size', PAGE_SIZE_HELP, - parse_type=type_name, - serialized_name='PageSize'), - shadowed_args) - - add_paging_argument(argument_table, 'max-items', - PageArgument('max-items', MAX_ITEMS_HELP, - parse_type=type_name, - serialized_name='MaxItems'), - shadowed_args) + ( + 'Unsupported pagination type {0} for operation {1}' + ' and parameter {2}' + ).format( + type_name, + operation_model.name, + paginator_config['limit_key'], + ) + ) + add_paging_argument( + argument_table, + 'page-size', + PageArgument( + 'page-size', + PAGE_SIZE_HELP, + parse_type=type_name, + serialized_name='PageSize', + ), + shadowed_args, + ) + + add_paging_argument( + argument_table, + 'max-items', + PageArgument( + 'max-items', + MAX_ITEMS_HELP, + parse_type=type_name, + serialized_name='MaxItems', + ), + shadowed_args, + ) # We will register two pagination handlers. # # The first is focused on analyzing the CLI arguments passed to see @@ -179,13 +215,20 @@ def unify_paging_params(argument_table, operation_model, event_name, # directly and this bypasses all of the CLI args processing. session.register( parsed_args_event, - partial(check_should_enable_pagination, - list(_get_all_cli_input_tokens(paginator_config)), - shadowed_args, argument_table)) + partial( + check_should_enable_pagination, + list(_get_all_cli_input_tokens(paginator_config)), + shadowed_args, + argument_table, + ), + ) session.register( call_parameters_event, - partial(check_should_enable_pagination_call_parameters, - list(_get_all_input_tokens(paginator_config)))) + partial( + check_should_enable_pagination_call_parameters, + list(_get_all_input_tokens(paginator_config)), + ), + ) def add_paging_argument(argument_table, arg_name, argument, shadowed_args): @@ -199,17 +242,27 @@ def add_paging_argument(argument_table, arg_name, argument, shadowed_args): argument_table[arg_name] = argument -def check_should_enable_pagination(input_tokens, shadowed_args, argument_table, - parsed_args, parsed_globals, **kwargs): +def check_should_enable_pagination( + input_tokens, + shadowed_args, + argument_table, + parsed_args, + parsed_globals, + **kwargs, +): normalized_paging_args = ['start_token', 'max_items'] for token in input_tokens: py_name = token.replace('-', '_') - if getattr(parsed_args, py_name) is not None and \ - py_name not in normalized_paging_args: + if ( + getattr(parsed_args, py_name) is not None + and py_name not in normalized_paging_args + ): # The user has specified a manual (undocumented) pagination arg. # We need to automatically turn pagination off. - logger.debug("User has specified a manual pagination arg. " - "Automatically setting --no-paginate.") + logger.debug( + "User has specified a manual pagination arg. " + "Automatically setting --no-paginate." + ) parsed_globals.paginate = False if not parsed_globals.paginate: @@ -229,12 +282,16 @@ def check_should_enable_pagination(input_tokens, shadowed_args, argument_table, def ensure_paging_params_not_set(parsed_args, shadowed_args): paging_params = ['starting_token', 'page_size', 'max_items'] shadowed_params = [p.replace('-', '_') for p in shadowed_args.keys()] - params_used = [p for p in paging_params if - p not in shadowed_params and getattr(parsed_args, p, None)] + params_used = [ + p + for p in paging_params + if p not in shadowed_params and getattr(parsed_args, p, None) + ] if len(params_used) > 0: converted_params = ', '.join( - ["--" + p.replace('_', '-') for p in params_used]) + ["--" + p.replace('_', '-') for p in params_used] + ) raise ParamValidationError( "Cannot specify --no-paginate along with pagination " "arguments: %s" % converted_params @@ -291,11 +348,14 @@ def _get_cli_name(param_objects, token_name): # and would be missed by the processing above. This function gets # called on the calling-command event. def check_should_enable_pagination_call_parameters( - input_tokens, call_parameters, parsed_args, parsed_globals, **kwargs): + input_tokens, call_parameters, parsed_args, parsed_globals, **kwargs +): for param in call_parameters: if param in input_tokens: - logger.debug("User has specified a manual pagination arg. " - "Automatically setting --no-paginate.") + logger.debug( + "User has specified a manual pagination arg. " + "Automatically setting --no-paginate." + ) parsed_globals.paginate = False @@ -317,7 +377,8 @@ def __init__(self, name, documentation, parse_type, serialized_name): def _emit_non_positive_max_items_warning(self): uni_print( "warning: Non-positive values for --max-items may result in undefined behavior.\n", - sys.stderr) + sys.stderr, + ) @property def cli_name(self): @@ -340,8 +401,11 @@ def documentation(self): return self._documentation def add_to_parser(self, parser): - parser.add_argument(self.cli_name, dest=self.py_name, - type=self.type_map[self._parse_type]) + parser.add_argument( + self.cli_name, + dest=self.py_name, + type=self.type_map[self._parse_type], + ) def add_to_params(self, parameters, value): if value is not None: diff --git a/awscli/customizations/putmetricdata.py b/awscli/customizations/putmetricdata.py index 10ef322b2323..da63967bdd8b 100644 --- a/awscli/customizations/putmetricdata.py +++ b/awscli/customizations/putmetricdata.py @@ -23,21 +23,32 @@ * --storage-resolution """ + import decimal from awscli.arguments import CustomArgument -from awscli.utils import split_on_commas from awscli.customizations.utils import validate_mutually_exclusive_handler +from awscli.utils import split_on_commas def register_put_metric_data(event_handler): event_handler.register( - 'building-argument-table.cloudwatch.put-metric-data', _promote_args) + 'building-argument-table.cloudwatch.put-metric-data', _promote_args + ) event_handler.register( 'operation-args-parsed.cloudwatch.put-metric-data', validate_mutually_exclusive_handler( - ['metric_data'], ['metric_name', 'timestamp', 'unit', 'value', - 'dimensions', 'statistic_values'])) + ['metric_data'], + [ + 'metric_name', + 'timestamp', + 'unit', + 'value', + 'dimensions', + 'statistic_values', + ], + ), + ) def _promote_args(argument_table, operation_model, **kwargs): @@ -48,25 +59,32 @@ def _promote_args(argument_table, operation_model, **kwargs): argument_table['metric-data'].required = False argument_table['metric-name'] = PutMetricArgument( - 'metric-name', help_text='The name of the metric.') + 'metric-name', help_text='The name of the metric.' + ) argument_table['timestamp'] = PutMetricArgument( - 'timestamp', help_text='The time stamp used for the metric. ' - 'If not specified, the default value is ' - 'set to the time the metric data was ' - 'received.') + 'timestamp', + help_text='The time stamp used for the metric. ' + 'If not specified, the default value is ' + 'set to the time the metric data was ' + 'received.', + ) argument_table['unit'] = PutMetricArgument( - 'unit', help_text='The unit of metric.') + 'unit', help_text='The unit of metric.' + ) argument_table['value'] = PutMetricArgument( - 'value', help_text='The value for the metric. Although the --value ' - 'parameter accepts numbers of type Double, ' - 'Amazon CloudWatch truncates values with very ' - 'large exponents. Values with base-10 exponents ' - 'greater than 126 (1 x 10^126) are truncated. ' - 'Likewise, values with base-10 exponents less ' - 'than -130 (1 x 10^-130) are also truncated.') + 'value', + help_text='The value for the metric. Although the --value ' + 'parameter accepts numbers of type Double, ' + 'Amazon CloudWatch truncates values with very ' + 'large exponents. Values with base-10 exponents ' + 'greater than 126 (1 x 10^126) are truncated. ' + 'Likewise, values with base-10 exponents less ' + 'than -130 (1 x 10^-130) are also truncated.', + ) argument_table['dimensions'] = PutMetricArgument( - 'dimensions', help_text=( + 'dimensions', + help_text=( 'The --dimensions argument further expands ' 'on the identity of a metric using a Name=Value ' 'pair, separated by commas, for example: ' @@ -76,11 +94,12 @@ def _promote_args(argument_table, operation_model, **kwargs): 'where for the same example you would use the format ' '--dimensions Name=InstanceID,Value=i-aaba32d4 ' 'Name=InstanceType,value=m1.small .' - ) + ), ) argument_table['statistic-values'] = PutMetricArgument( - 'statistic-values', help_text='A set of statistical values describing ' - 'the metric.') + 'statistic-values', + help_text='A set of statistical values describing ' 'the metric.', + ) metric_data = operation_model.input_shape.members['MetricData'].member storage_resolution = metric_data.members['StorageResolution'] @@ -103,7 +122,9 @@ def _add_to_params(self, parameters, value): parameters[name] = [{}] first_element = parameters[name][0] return func(self, first_element, value) + return _add_to_params + return _wrap_add_to_params diff --git a/awscli/customizations/quicksight.py b/awscli/customizations/quicksight.py index 3cc048452573..6750e0b5b0f2 100644 --- a/awscli/customizations/quicksight.py +++ b/awscli/customizations/quicksight.py @@ -16,11 +16,13 @@ _ASSET_BUNDLE_FILE_DOCSTRING = ( '

The content of the asset bundle to be uploaded. ' 'To specify the content of a local file use the ' - 'fileb:// prefix. Example: fileb://asset-bundle.zip

') + 'fileb:// prefix. Example: fileb://asset-bundle.zip

' +) _ASSET_BUNDLE_DOCSTRING_ADDENDUM = ( '

To specify a local file use ' - '--asset-bundle-import-source-bytes instead.

') + '--asset-bundle-import-source-bytes instead.

' +) def register_quicksight_asset_bundle_customizations(cli): @@ -31,4 +33,6 @@ def register_quicksight_asset_bundle_customizations(cli): source_arg_blob_member='Body', new_arg='asset-bundle-import-source-bytes', new_arg_doc_string=_ASSET_BUNDLE_FILE_DOCSTRING, - doc_string_addendum=_ASSET_BUNDLE_DOCSTRING_ADDENDUM)) + doc_string_addendum=_ASSET_BUNDLE_DOCSTRING_ADDENDUM, + ), + ) diff --git a/awscli/customizations/rds.py b/awscli/customizations/rds.py index cac3173f3f76..48fd7c3b042f 100644 --- a/awscli/customizations/rds.py +++ b/awscli/customizations/rds.py @@ -24,8 +24,7 @@ """ -from awscli.clidriver import ServiceOperation -from awscli.clidriver import CLIOperationCaller +from awscli.clidriver import CLIOperationCaller, ServiceOperation from awscli.customizations import utils from awscli.customizations.commands import BasicCommand from awscli.customizations.utils import uni_print @@ -33,10 +32,14 @@ def register_rds_modify_split(cli): cli.register('building-command-table.rds', _building_command_table) - cli.register('building-argument-table.rds.add-option-to-option-group', - _rename_add_option) - cli.register('building-argument-table.rds.remove-option-from-option-group', - _rename_remove_option) + cli.register( + 'building-argument-table.rds.add-option-to-option-group', + _rename_add_option, + ) + cli.register( + 'building-argument-table.rds.remove-option-from-option-group', + _rename_remove_option, + ) def register_add_generate_db_auth_token(cli): @@ -49,14 +52,16 @@ def _add_generate_db_auth_token(command_table, session, **kwargs): def _rename_add_option(argument_table, **kwargs): - utils.rename_argument(argument_table, 'options-to-include', - new_name='options') + utils.rename_argument( + argument_table, 'options-to-include', new_name='options' + ) del argument_table['options-to-remove'] def _rename_remove_option(argument_table, **kwargs): - utils.rename_argument(argument_table, 'options-to-remove', - new_name='options') + utils.rename_argument( + argument_table, 'options-to-remove', new_name='options' + ) del argument_table['options-to-include'] @@ -69,15 +74,19 @@ def _building_command_table(command_table, session, **kwargs): rds_model = session.get_service_model('rds') modify_operation_model = rds_model.operation_model('ModifyOptionGroup') command_table['add-option-to-option-group'] = ServiceOperation( - parent_name='rds', name='add-option-to-option-group', + parent_name='rds', + name='add-option-to-option-group', operation_caller=CLIOperationCaller(session), session=session, - operation_model=modify_operation_model) + operation_model=modify_operation_model, + ) command_table['remove-option-from-option-group'] = ServiceOperation( - parent_name='rds', name='remove-option-from-option-group', + parent_name='rds', + name='remove-option-from-option-group', session=session, operation_model=modify_operation_model, - operation_caller=CLIOperationCaller(session)) + operation_caller=CLIOperationCaller(session), + ) class GenerateDBAuthTokenCommand(BasicCommand): @@ -86,23 +95,35 @@ class GenerateDBAuthTokenCommand(BasicCommand): 'Generates an auth token used to connect to a db with IAM credentials.' ) ARG_TABLE = [ - {'name': 'hostname', 'required': True, - 'help_text': 'The hostname of the database to connect to.'}, - {'name': 'port', 'cli_type_name': 'integer', 'required': True, - 'help_text': 'The port number the database is listening on.'}, - {'name': 'username', 'required': True, - 'help_text': 'The username to log in as.'} + { + 'name': 'hostname', + 'required': True, + 'help_text': 'The hostname of the database to connect to.', + }, + { + 'name': 'port', + 'cli_type_name': 'integer', + 'required': True, + 'help_text': 'The port number the database is listening on.', + }, + { + 'name': 'username', + 'required': True, + 'help_text': 'The username to log in as.', + }, ] def _run_main(self, parsed_args, parsed_globals): rds = self._session.create_client( - 'rds', parsed_globals.region, parsed_globals.endpoint_url, - parsed_globals.verify_ssl + 'rds', + parsed_globals.region, + parsed_globals.endpoint_url, + parsed_globals.verify_ssl, ) token = rds.generate_db_auth_token( DBHostname=parsed_args.hostname, Port=parsed_args.port, - DBUsername=parsed_args.username + DBUsername=parsed_args.username, ) uni_print(token) uni_print('\n') diff --git a/awscli/customizations/rekognition.py b/awscli/customizations/rekognition.py index ba03ef1d7e9f..267376015b36 100644 --- a/awscli/customizations/rekognition.py +++ b/awscli/customizations/rekognition.py @@ -13,12 +13,15 @@ from awscli.customizations.arguments import NestedBlobArgumentHoister -IMAGE_FILE_DOCSTRING = ('

The content of the image to be uploaded. ' - 'To specify the content of a local file use the ' - 'fileb:// prefix. ' - 'Example: fileb://image.png

') -IMAGE_DOCSTRING_ADDENDUM = ('

To specify a local file use --%s ' - 'instead.

') +IMAGE_FILE_DOCSTRING = ( + '

The content of the image to be uploaded. ' + 'To specify the content of a local file use the ' + 'fileb:// prefix. ' + 'Example: fileb://image.png

' +) +IMAGE_DOCSTRING_ADDENDUM = ( + '

To specify a local file use --%s ' 'instead.

' +) FILE_PARAMETER_UPDATES = { @@ -32,10 +35,13 @@ def register_rekognition_detect_labels(cli): for target, new_param in FILE_PARAMETER_UPDATES.items(): operation, old_param = target.rsplit('.', 1) doc_string_addendum = IMAGE_DOCSTRING_ADDENDUM % new_param - cli.register('building-argument-table.rekognition.%s' % operation, - NestedBlobArgumentHoister( - source_arg=old_param, - source_arg_blob_member='Bytes', - new_arg=new_param, - new_arg_doc_string=IMAGE_FILE_DOCSTRING, - doc_string_addendum=doc_string_addendum)) + cli.register( + 'building-argument-table.rekognition.%s' % operation, + NestedBlobArgumentHoister( + source_arg=old_param, + source_arg_blob_member='Bytes', + new_arg=new_param, + new_arg_doc_string=IMAGE_FILE_DOCSTRING, + doc_string_addendum=doc_string_addendum, + ), + ) diff --git a/awscli/customizations/removals.py b/awscli/customizations/removals.py index 5add46dc4f81..a7d99862f42e 100644 --- a/awscli/customizations/removals.py +++ b/awscli/customizations/removals.py @@ -18,6 +18,7 @@ yet fully supported. """ + import logging from functools import partial @@ -26,43 +27,73 @@ def register_removals(event_handler): cmd_remover = CommandRemover(event_handler) - cmd_remover.remove(on_event='building-command-table.ses', - remove_commands=['delete-verified-email-address', - 'list-verified-email-addresses', - 'verify-email-address']) - cmd_remover.remove(on_event='building-command-table.ec2', - remove_commands=['import-instance', 'import-volume']) - cmd_remover.remove(on_event='building-command-table.emr', - remove_commands=['run-job-flow', 'describe-job-flows', - 'add-job-flow-steps', - 'terminate-job-flows', - 'list-bootstrap-actions', - 'list-instance-groups', - 'set-termination-protection', - 'set-keep-job-flow-alive-when-no-steps', - 'set-visible-to-all-users', - 'set-unhealthy-node-replacement']) - cmd_remover.remove(on_event='building-command-table.kinesis', - remove_commands=['subscribe-to-shard']) - cmd_remover.remove(on_event='building-command-table.lexv2-runtime', - remove_commands=['start-conversation']) - cmd_remover.remove(on_event='building-command-table.lambda', - remove_commands=['invoke-with-response-stream']) - cmd_remover.remove(on_event='building-command-table.sagemaker-runtime', - remove_commands=['invoke-endpoint-with-response-stream']) - cmd_remover.remove(on_event='building-command-table.bedrock-runtime', - remove_commands=['invoke-model-with-response-stream', - 'converse-stream']) - cmd_remover.remove(on_event='building-command-table.bedrock-agent-runtime', - remove_commands=['invoke-agent', - 'invoke-flow', - 'invoke-inline-agent', - 'optimize-prompt', - 'retrieve-and-generate-stream']) - cmd_remover.remove(on_event='building-command-table.qbusiness', - remove_commands=['chat']) - cmd_remover.remove(on_event='building-command-table.iotsitewise', - remove_commands=['invoke-assistant']) + cmd_remover.remove( + on_event='building-command-table.ses', + remove_commands=[ + 'delete-verified-email-address', + 'list-verified-email-addresses', + 'verify-email-address', + ], + ) + cmd_remover.remove( + on_event='building-command-table.ec2', + remove_commands=['import-instance', 'import-volume'], + ) + cmd_remover.remove( + on_event='building-command-table.emr', + remove_commands=[ + 'run-job-flow', + 'describe-job-flows', + 'add-job-flow-steps', + 'terminate-job-flows', + 'list-bootstrap-actions', + 'list-instance-groups', + 'set-termination-protection', + 'set-keep-job-flow-alive-when-no-steps', + 'set-visible-to-all-users', + 'set-unhealthy-node-replacement', + ], + ) + cmd_remover.remove( + on_event='building-command-table.kinesis', + remove_commands=['subscribe-to-shard'], + ) + cmd_remover.remove( + on_event='building-command-table.lexv2-runtime', + remove_commands=['start-conversation'], + ) + cmd_remover.remove( + on_event='building-command-table.lambda', + remove_commands=['invoke-with-response-stream'], + ) + cmd_remover.remove( + on_event='building-command-table.sagemaker-runtime', + remove_commands=['invoke-endpoint-with-response-stream'], + ) + cmd_remover.remove( + on_event='building-command-table.bedrock-runtime', + remove_commands=[ + 'invoke-model-with-response-stream', + 'converse-stream', + ], + ) + cmd_remover.remove( + on_event='building-command-table.bedrock-agent-runtime', + remove_commands=[ + 'invoke-agent', + 'invoke-flow', + 'invoke-inline-agent', + 'optimize-prompt', + 'retrieve-and-generate-stream', + ], + ) + cmd_remover.remove( + on_event='building-command-table.qbusiness', remove_commands=['chat'] + ) + cmd_remover.remove( + on_event='building-command-table.iotsitewise', + remove_commands=['invoke-assistant'], + ) class CommandRemover(object): @@ -70,8 +101,7 @@ def __init__(self, events): self._events = events def remove(self, on_event, remove_commands): - self._events.register(on_event, - self._create_remover(remove_commands)) + self._events.register(on_event, self._create_remover(remove_commands)) def _create_remover(self, commands_to_remove): return partial(_remove_commands, commands_to_remove=commands_to_remove) @@ -84,5 +114,6 @@ def _remove_commands(command_table, commands_to_remove, **kwargs): LOG.debug("Removing operation: %s", command) del command_table[command] except KeyError: - LOG.warning("Attempting to delete command that does not exist: %s", - command) + LOG.warning( + "Attempting to delete command that does not exist: %s", command + ) diff --git a/awscli/customizations/route53.py b/awscli/customizations/route53.py index 686abc40c914..f482ff605827 100644 --- a/awscli/customizations/route53.py +++ b/awscli/customizations/route53.py @@ -18,7 +18,8 @@ def register_create_hosted_zone_doc_fix(cli): # has the necessary documentation. cli.register( 'doc-option.route53.create-hosted-zone.hosted-zone-config', - add_private_zone_note) + add_private_zone_note, + ) def add_private_zone_note(help_command, **kwargs): diff --git a/awscli/customizations/s3/comparator.py b/awscli/customizations/s3/comparator.py index efe49c63a120..06ab58c76688 100644 --- a/awscli/customizations/s3/comparator.py +++ b/awscli/customizations/s3/comparator.py @@ -11,8 +11,8 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging -from awscli.compat import advance_iterator +from awscli.compat import advance_iterator LOG = logging.getLogger(__name__) @@ -21,10 +21,13 @@ class Comparator(object): """ This class performs all of the comparisons behind the sync operation """ - def __init__(self, file_at_src_and_dest_sync_strategy, - file_not_at_dest_sync_strategy, - file_not_at_src_sync_strategy): + def __init__( + self, + file_at_src_and_dest_sync_strategy, + file_not_at_dest_sync_strategy, + file_not_at_src_sync_strategy, + ): self._sync_strategy = file_at_src_and_dest_sync_strategy self._not_at_dest_sync_strategy = file_not_at_dest_sync_strategy self._not_at_src_sync_strategy = file_not_at_src_sync_strategy @@ -102,26 +105,42 @@ def call(self, src_files, dest_files): elif compare_keys == 'less_than': src_take = True dest_take = False - should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) + should_sync = ( + self._not_at_dest_sync_strategy.determine_should_sync( + src_file, None + ) + ) if should_sync: yield src_file elif compare_keys == 'greater_than': src_take = False dest_take = True - should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) + should_sync = ( + self._not_at_src_sync_strategy.determine_should_sync( + None, dest_file + ) + ) if should_sync: yield dest_file elif (not src_done) and dest_done: src_take = True - should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) + should_sync = ( + self._not_at_dest_sync_strategy.determine_should_sync( + src_file, None + ) + ) if should_sync: yield src_file elif src_done and (not dest_done): dest_take = True - should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) + should_sync = ( + self._not_at_src_sync_strategy.determine_should_sync( + None, dest_file + ) + ) if should_sync: yield dest_file else: @@ -135,10 +154,10 @@ def compare_comp_key(self, src_file, dest_file): src_comp_key = src_file.compare_key dest_comp_key = dest_file.compare_key - if (src_comp_key == dest_comp_key): + if src_comp_key == dest_comp_key: return 'equal' - elif (src_comp_key < dest_comp_key): + elif src_comp_key < dest_comp_key: return 'less_than' else: diff --git a/awscli/customizations/s3/factory.py b/awscli/customizations/s3/factory.py index 1692fd1cef86..afc98abe0f48 100644 --- a/awscli/customizations/s3/factory.py +++ b/awscli/customizations/s3/factory.py @@ -13,20 +13,22 @@ import logging import awscrt.s3 + +from awscli.compat import urlparse +from awscli.customizations.s3 import constants +from awscli.customizations.s3.transferconfig import ( + create_transfer_config_from_runtime_config, +) from botocore.client import Config from botocore.httpsession import DEFAULT_CA_BUNDLE -from s3transfer.manager import TransferManager from s3transfer.crt import ( - acquire_crt_s3_process_lock, create_s3_crt_client, - BotocoreCRTRequestSerializer, CRTTransferManager, - BotocoreCRTCredentialsWrapper + BotocoreCRTCredentialsWrapper, + BotocoreCRTRequestSerializer, + CRTTransferManager, + acquire_crt_s3_process_lock, + create_s3_crt_client, ) - -from awscli.compat import urlparse -from awscli.customizations.s3 import constants -from awscli.customizations.s3.transferconfig import \ - create_transfer_config_from_runtime_config - +from s3transfer.manager import TransferManager LOGGER = logging.getLogger(__name__) @@ -36,9 +38,7 @@ def __init__(self, session): self._session = session def create_client(self, params, is_source_client=False): - create_client_kwargs = { - 'verify': params['verify_ssl'] - } + create_client_kwargs = {'verify': params['verify_ssl']} if params.get('sse') == 'aws:kms': create_client_kwargs['config'] = Config(signature_version='s3v4') region = params['region'] @@ -61,22 +61,24 @@ def __init__(self, session): self._session = session self._botocore_client_factory = ClientFactory(self._session) - def create_transfer_manager(self, params, runtime_config, - botocore_client=None): + def create_transfer_manager( + self, params, runtime_config, botocore_client=None + ): client_type = self._compute_transfer_client_type( - params, runtime_config) + params, runtime_config + ) if client_type == constants.CRT_TRANSFER_CLIENT: return self._create_crt_transfer_manager(params, runtime_config) else: return self._create_classic_transfer_manager( - params, runtime_config, botocore_client) + params, runtime_config, botocore_client + ) def _compute_transfer_client_type(self, params, runtime_config): if params.get('paths_type') == 's3s3': return constants.CLASSIC_TRANSFER_CLIENT preferred_transfer_client = runtime_config.get( - 'preferred_transfer_client', - constants.AUTO_RESOLVE_TRANSFER_CLIENT + 'preferred_transfer_client', constants.AUTO_RESOLVE_TRANSFER_CLIENT ) if preferred_transfer_client == constants.AUTO_RESOLVE_TRANSFER_CLIENT: return self._resolve_transfer_client_type_for_system() @@ -92,7 +94,7 @@ def _resolve_transfer_client_type_for_system(self): is_running = self._is_crt_client_running_in_other_aws_cli_process() LOGGER.debug( 'S3 CRT client running in different AWS CLI process: %s', - is_running + is_running, ) if not is_running: transfer_client_type = constants.CRT_TRANSFER_CLIENT @@ -114,7 +116,7 @@ def _create_crt_transfer_manager(self, params, runtime_config): self._acquire_crt_s3_process_lock() return CRTTransferManager( self._create_crt_client(params, runtime_config), - self._create_crt_request_serializer(params) + self._create_crt_request_serializer(params), ) def _create_crt_client(self, params, runtime_config): @@ -133,8 +135,9 @@ def _create_crt_client(self, params, runtime_config): create_crt_client_kwargs['part_size'] = multipart_chunksize if params.get('sign_request', True): crt_credentials_provider = self._get_crt_credentials_provider() - create_crt_client_kwargs[ - 'crt_credentials_provider'] = crt_credentials_provider + create_crt_client_kwargs['crt_credentials_provider'] = ( + crt_credentials_provider + ) return create_s3_crt_client(**create_crt_client_kwargs) @@ -144,23 +147,27 @@ def _create_crt_request_serializer(self, params): { 'region_name': self._resolve_region(params), 'endpoint_url': params.get('endpoint_url'), - } + }, ) - def _create_classic_transfer_manager(self, params, runtime_config, - client=None): + def _create_classic_transfer_manager( + self, params, runtime_config, client=None + ): if client is None: client = self._botocore_client_factory.create_client(params) transfer_config = create_transfer_config_from_runtime_config( - runtime_config) - transfer_config.max_in_memory_upload_chunks = \ + runtime_config + ) + transfer_config.max_in_memory_upload_chunks = ( self._MAX_IN_MEMORY_CHUNKS - transfer_config.max_in_memory_download_chunks = \ + ) + transfer_config.max_in_memory_download_chunks = ( self._MAX_IN_MEMORY_CHUNKS + ) LOGGER.debug( "Using a multipart threshold of %s and a part size of %s", transfer_config.multipart_threshold, - transfer_config.multipart_chunksize + transfer_config.multipart_chunksize, ) return TransferManager(client, transfer_config) diff --git a/awscli/customizations/s3/fileformat.py b/awscli/customizations/s3/fileformat.py index ef15fd6785af..4f9d40448510 100644 --- a/awscli/customizations/s3/fileformat.py +++ b/awscli/customizations/s3/fileformat.py @@ -53,9 +53,12 @@ def format(self, src, dest, parameters): # will take on the name the user specified in the # command line. dest_path, use_src_name = format_table[dest_type](dest_path, dir_op) - files = {'src': {'path': src_path, 'type': src_type}, - 'dest': {'path': dest_path, 'type': dest_type}, - 'dir_op': dir_op, 'use_src_name': use_src_name} + files = { + 'src': {'path': src_path, 'type': src_type}, + 'dest': {'path': dest_path, 'type': dest_type}, + 'dir_op': dir_op, + 'use_src_name': use_src_name, + } return files def local_format(self, path, dir_op): diff --git a/awscli/customizations/s3/filegenerator.py b/awscli/customizations/s3/filegenerator.py index e98d78c78edb..d68f26b7b527 100644 --- a/awscli/customizations/s3/filegenerator.py +++ b/awscli/customizations/s3/filegenerator.py @@ -11,17 +11,22 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os -import sys import stat +import sys from dateutil.parser import parse from dateutil.tz import tzlocal -from botocore.exceptions import ClientError -from awscli.customizations.s3.utils import find_bucket_key, get_file_stat -from awscli.customizations.s3.utils import BucketLister, create_warning, \ - find_dest_path_comp_key, EPOCH_TIME from awscli.compat import queue +from awscli.customizations.s3.utils import ( + EPOCH_TIME, + BucketLister, + create_warning, + find_bucket_key, + find_dest_path_comp_key, + get_file_stat, +) +from botocore.exceptions import ClientError _open = open @@ -70,6 +75,7 @@ def is_readable(path): # This class is provided primarily to provide a detailed error message. + class FileDecodingError(Exception): """Raised when there was an issue decoding the file.""" @@ -84,17 +90,25 @@ def __init__(self, directory, filename): self.file_name = filename self.error_message = ( 'There was an error trying to decode the the file %s in ' - 'directory "%s". \n%s' % (repr(self.file_name), - self.directory, - self.ADVICE) + 'directory "%s". \n%s' + % (repr(self.file_name), self.directory, self.ADVICE) ) super(FileDecodingError, self).__init__(self.error_message) class FileStat(object): - def __init__(self, src, dest=None, compare_key=None, size=None, - last_update=None, src_type=None, dest_type=None, - operation_name=None, response_data=None): + def __init__( + self, + src, + dest=None, + compare_key=None, + size=None, + last_update=None, + src_type=None, + dest_type=None, + operation_name=None, + response_data=None, + ): self.src = src self.dest = dest self.compare_key = compare_key @@ -114,8 +128,16 @@ class FileGenerator(object): under the same common prefix. The generator yields corresponding ``FileInfo`` objects to send to a ``Comparator`` or ``S3Handler``. """ - def __init__(self, client, operation_name, follow_symlinks=True, - page_size=None, result_queue=None, request_parameters=None): + + def __init__( + self, + client, + operation_name, + follow_symlinks=True, + page_size=None, + result_queue=None, + request_parameters=None, + ): self._client = client self.operation_name = operation_name self.follow_symlinks = follow_symlinks @@ -141,9 +163,12 @@ def call(self, files): for src_path, extra_information in file_iterator: dest_path, compare_key = find_dest_path_comp_key(files, src_path) file_stat_kwargs = { - 'src': src_path, 'dest': dest_path, 'compare_key': compare_key, - 'src_type': src_type, 'dest_type': dest_type, - 'operation_name': self.operation_name + 'src': src_path, + 'dest': dest_path, + 'compare_key': compare_key, + 'src_type': src_type, + 'dest_type': dest_type, + 'operation_name': self.operation_name, } self._inject_extra_information(file_stat_kwargs, extra_information) yield FileStat(**file_stat_kwargs) @@ -188,7 +213,8 @@ def list_files(self, path, dir_op): names = [] for name in listdir_names: if not self.should_ignore_file_with_decoding_warnings( - path, name): + path, name + ): file_path = join(path, name) if isdir(file_path): name = name + os.path.sep @@ -225,8 +251,9 @@ def _validate_update_time(self, update_time, path): warning = create_warning( path=path, error_message="File has an invalid timestamp. Passing epoch " - "time as timestamp.", - skip_file=False) + "time as timestamp.", + skip_file=False, + ) self.result_queue.put(warning) return EPOCH_TIME return update_time @@ -251,8 +278,9 @@ def should_ignore_file_with_decoding_warnings(self, dirname, filename): """ if not isinstance(filename, str): decoding_error = FileDecodingError(dirname, filename) - warning = create_warning(repr(filename), - decoding_error.error_message) + warning = create_warning( + repr(filename), decoding_error.error_message + ) self.result_queue.put(warning) return True path = os.path.join(dirname, filename) @@ -290,10 +318,14 @@ def triggers_warning(self, path): self.result_queue.put(warning) return True if is_special_file(path): - warning = create_warning(path, - ("File is character special device, " - "block special device, FIFO, or " - "socket.")) + warning = create_warning( + path, + ( + "File is character special device, " + "block special device, FIFO, or " + "socket." + ), + ) self.result_queue.put(warning) return True if not is_readable(path): @@ -318,9 +350,12 @@ def list_objects(self, s3_path, dir_op): else: lister = BucketLister(self._client) extra_args = self.request_parameters.get('ListObjectsV2', {}) - for key in lister.list_objects(bucket=bucket, prefix=prefix, - page_size=self.page_size, - extra_args=extra_args): + for key in lister.list_objects( + bucket=bucket, + prefix=prefix, + page_size=self.page_size, + extra_args=extra_args, + ): source_path, response_data = key if response_data['Size'] == 0 and source_path.endswith('/'): if self.operation_name == 'delete': diff --git a/awscli/customizations/s3/fileinfo.py b/awscli/customizations/s3/fileinfo.py index 615be15e81f6..a1b05e97f547 100644 --- a/awscli/customizations/s3/fileinfo.py +++ b/awscli/customizations/s3/fileinfo.py @@ -38,11 +38,23 @@ class FileInfo(object): from the list of a ListObjects or the response from a HeadObject. It will only be filled if the task was generated from an S3 bucket. """ - def __init__(self, src, dest=None, compare_key=None, size=None, - last_update=None, src_type=None, dest_type=None, - operation_name=None, client=None, parameters=None, - source_client=None, is_stream=False, - associated_response_data=None): + + def __init__( + self, + src, + dest=None, + compare_key=None, + size=None, + last_update=None, + src_type=None, + dest_type=None, + operation_name=None, + client=None, + parameters=None, + source_client=None, + is_stream=False, + associated_response_data=None, + ): self.src = src self.src_type = src_type self.operation_name = operation_name @@ -82,8 +94,11 @@ def is_glacier_compatible(self): def _is_glacier_object(self, response_data): glacier_storage_classes = ['GLACIER', 'DEEP_ARCHIVE'] if response_data: - if response_data.get('StorageClass') in glacier_storage_classes \ - and not self._is_restored(response_data): + if response_data.get( + 'StorageClass' + ) in glacier_storage_classes and not self._is_restored( + response_data + ): return True return False diff --git a/awscli/customizations/s3/fileinfobuilder.py b/awscli/customizations/s3/fileinfobuilder.py index d539bbb051ef..d9f8bce9ae14 100644 --- a/awscli/customizations/s3/fileinfobuilder.py +++ b/awscli/customizations/s3/fileinfobuilder.py @@ -18,8 +18,10 @@ class FileInfoBuilder(object): This class takes a ``FileBase`` object's attributes and generates a ``FileInfo`` object so that the operation can be performed. """ - def __init__(self, client, source_client=None, - parameters = None, is_stream=False): + + def __init__( + self, client, source_client=None, parameters=None, is_stream=False + ): self._client = client self._source_client = client if source_client is not None: @@ -57,8 +59,9 @@ def _inject_info(self, file_base): # issue by swapping clients only in the case of a sync delete since # swapping which client is used in the delete function would then break # moving under s3v4. - if (file_base.operation_name == 'delete' and - self._parameters.get('delete')): + if file_base.operation_name == 'delete' and self._parameters.get( + 'delete' + ): file_info_attr['client'] = self._source_client file_info_attr['source_client'] = self._client else: diff --git a/awscli/customizations/s3/filters.py b/awscli/customizations/s3/filters.py index f41820ac09de..04f926d73332 100644 --- a/awscli/customizations/s3/filters.py +++ b/awscli/customizations/s3/filters.py @@ -10,13 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import logging import fnmatch +import logging import os from awscli.customizations.s3.utils import split_s3_bucket_key - LOG = logging.getLogger(__name__) @@ -28,24 +27,26 @@ def create_filter(parameters): cli_filters = parameters['filters'] real_filters = [] for filter_type, filter_pattern in cli_filters: - real_filters.append((filter_type.lstrip('-'), - filter_pattern)) + real_filters.append((filter_type.lstrip('-'), filter_pattern)) source_location = parameters['src'] if source_location.startswith('s3://'): # This gives us (bucket, keyname) and we want # the bucket to be the root dir. - src_rootdir = _get_s3_root(source_location, - parameters['dir_op']) + src_rootdir = _get_s3_root(source_location, parameters['dir_op']) else: - src_rootdir = _get_local_root(parameters['src'], parameters['dir_op']) + src_rootdir = _get_local_root( + parameters['src'], parameters['dir_op'] + ) destination_location = parameters['dest'] if destination_location.startswith('s3://'): - dst_rootdir = _get_s3_root(parameters['dest'], - parameters['dir_op']) + dst_rootdir = _get_s3_root( + parameters['dest'], parameters['dir_op'] + ) else: - dst_rootdir = _get_local_root(parameters['dest'], - parameters['dir_op']) + dst_rootdir = _get_local_root( + parameters['dest'], parameters['dir_op'] + ) return Filter(real_filters, src_rootdir, dst_rootdir) else: @@ -77,6 +78,7 @@ class Filter(object): """ This is a universal exclude/include filter. """ + def __init__(self, patterns, rootdir, dst_rootdir): """ :var patterns: A list of patterns. A pattern consists of a list @@ -100,7 +102,8 @@ def _full_path_patterns(self, original_patterns, rootdir): full_patterns = [] for pattern in original_patterns: full_patterns.append( - (pattern[0], os.path.join(rootdir, pattern[1]))) + (pattern[0], os.path.join(rootdir, pattern[1])) + ) return full_patterns def call(self, file_infos): @@ -122,11 +125,16 @@ def call(self, file_infos): current_file_status = self._match_pattern(pattern, file_info) if current_file_status is not None: file_status = current_file_status - dst_current_file_status = self._match_pattern(dst_pattern, file_info) + dst_current_file_status = self._match_pattern( + dst_pattern, file_info + ) if dst_current_file_status is not None: file_status = dst_current_file_status - LOG.debug("=%s final filtered status, should_include: %s", - file_path, file_status[1]) + LOG.debug( + "=%s final filtered status, should_include: %s", + file_path, + file_status[1], + ) if file_status[1]: yield file_info @@ -141,13 +149,15 @@ def _match_pattern(self, pattern, file_info): is_match = fnmatch.fnmatch(file_path, path_pattern) if is_match and pattern_type == 'include': file_status = (file_info, True) - LOG.debug("%s matched include filter: %s", - file_path, path_pattern) + LOG.debug("%s matched include filter: %s", file_path, path_pattern) elif is_match and pattern_type == 'exclude': file_status = (file_info, False) - LOG.debug("%s matched exclude filter: %s", - file_path, path_pattern) + LOG.debug("%s matched exclude filter: %s", file_path, path_pattern) else: - LOG.debug("%s did not match %s filter: %s", - file_path, pattern_type, path_pattern) + LOG.debug( + "%s did not match %s filter: %s", + file_path, + pattern_type, + path_pattern, + ) return file_status diff --git a/awscli/customizations/s3/results.py b/awscli/customizations/s3/results.py index 3a4ea5df44c3..11c118b0d72c 100644 --- a/awscli/customizations/s3/results.py +++ b/awscli/customizations/s3/results.py @@ -11,23 +11,19 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import division + import logging import sys import threading import time -from collections import namedtuple -from collections import defaultdict - -from s3transfer.exceptions import CancelledError -from s3transfer.exceptions import FatalError -from s3transfer.subscribers import BaseSubscriber +from collections import defaultdict, namedtuple -from awscli.compat import queue, ensure_text_type -from awscli.customizations.s3.utils import human_readable_size -from awscli.customizations.utils import uni_print -from awscli.customizations.s3.utils import WarningResult +from awscli.compat import ensure_text_type, queue from awscli.customizations.s3.subscribers import OnDoneFilteredSubscriber - +from awscli.customizations.s3.utils import WarningResult, human_readable_size +from awscli.customizations.utils import uni_print +from s3transfer.exceptions import CancelledError, FatalError +from s3transfer.subscribers import BaseSubscriber LOGGER = logging.getLogger(__name__) @@ -51,8 +47,8 @@ def _create_new_result_cls(name, extra_fields=None, base_cls=BaseResult): QueuedResult = _create_new_result_cls('QueuedResult', ['total_transfer_size']) ProgressResult = _create_new_result_cls( - 'ProgressResult', ['bytes_transferred', 'total_transfer_size', - 'timestamp']) + 'ProgressResult', ['bytes_transferred', 'total_transfer_size', 'timestamp'] +) SuccessResult = _create_new_result_cls('SuccessResult') @@ -65,10 +61,12 @@ def _create_new_result_cls(name, extra_fields=None, base_cls=BaseResult): CtrlCResult = _create_new_result_cls('CtrlCResult', base_cls=ErrorResult) CommandResult = namedtuple( - 'CommandResult', ['num_tasks_failed', 'num_tasks_warned']) + 'CommandResult', ['num_tasks_failed', 'num_tasks_warned'] +) FinalTotalSubmissionsResult = namedtuple( - 'FinalTotalSubmissionsResult', ['total_submissions']) + 'FinalTotalSubmissionsResult', ['total_submissions'] +) class ShutdownThreadRequest(object): @@ -91,7 +89,7 @@ def on_queued(self, future, **kwargs): transfer_type=self._transfer_type, src=self._src, dest=self._dest, - total_transfer_size=self._size + total_transfer_size=self._size, ) ) @@ -105,7 +103,7 @@ def on_progress(self, future, bytes_transferred, **kwargs): dest=self._dest, bytes_transferred=bytes_transferred, timestamp=time.time(), - total_transfer_size=self._size + total_transfer_size=self._size, ) ) @@ -139,12 +137,14 @@ def _on_failure(self, future, e): class BaseResultHandler(object): """Base handler class to be called in the ResultProcessor""" + def __call__(self, result): raise NotImplementedError('__call__()') class ResultRecorder(BaseResultHandler): """Records and track transfer statistics based on results received""" + def __init__(self): self.bytes_transferred = 0 self.bytes_failed_to_transfer = 0 @@ -175,14 +175,15 @@ def __init__(self): def expected_totals_are_final(self): return ( - self.final_expected_files_transferred == - self.expected_files_transferred + self.final_expected_files_transferred + == self.expected_files_transferred ) def __call__(self, result): """Record the result of an individual Result object""" self._result_handler_map.get(type(result), self._record_noop)( - result=result) + result=result + ) def _get_ongoing_dict_key(self, result): if not isinstance(result, BaseResult): @@ -194,7 +195,7 @@ def _get_ongoing_dict_key(self, result): for result_property in [result.transfer_type, result.src, result.dest]: if result_property is not None: key_parts.append(ensure_text_type(result_property)) - return u':'.join(key_parts) + return ':'.join(key_parts) def _pop_result_from_ongoing_dicts(self, result): ongoing_key = self._get_ongoing_dict_key(result) @@ -210,8 +211,9 @@ def _record_queued_result(self, result, **kwargs): if self.start_time is None: self.start_time = time.time() total_transfer_size = result.total_transfer_size - self._ongoing_total_sizes[ - self._get_ongoing_dict_key(result)] = total_transfer_size + self._ongoing_total_sizes[self._get_ongoing_dict_key(result)] = ( + total_transfer_size + ) # The total transfer size can be None if we do not know the size # immediately so do not add to the total right away. if total_transfer_size: @@ -221,8 +223,9 @@ def _record_queued_result(self, result, **kwargs): def _record_progress_result(self, result, **kwargs): bytes_transferred = result.bytes_transferred self._update_ongoing_transfer_size_if_unknown(result) - self._ongoing_progress[ - self._get_ongoing_dict_key(result)] += bytes_transferred + self._ongoing_progress[self._get_ongoing_dict_key(result)] += ( + bytes_transferred + ) self.bytes_transferred += bytes_transferred # Since the start time is captured in the result recorder and # capture timestamps in the subscriber, there is a chance that if @@ -233,7 +236,8 @@ def _record_progress_result(self, result, **kwargs): # negative progress being displayed or zero division occurring. if result.timestamp > self.start_time: self.bytes_transfer_speed = self.bytes_transferred / ( - result.timestamp - self.start_time) + result.timestamp - self.start_time + ) def _update_ongoing_transfer_size_if_unknown(self, result): # This is a special case when the transfer size was previous not @@ -270,7 +274,8 @@ def _record_failure_result(self, result, **kwargs): # the count for bytes transferred by just adding on the remaining bytes # that did not get transferred. total_progress, total_file_size = self._pop_result_from_ongoing_dicts( - result) + result + ) if total_file_size is not None: progress_left = total_file_size - total_progress self.bytes_failed_to_transfer += progress_left @@ -299,25 +304,17 @@ class ResultPrinter(BaseResultHandler): FILE_PROGRESS_FORMAT = ( 'Completed {files_completed} file(s) with ' + _FILES_REMAINING ) - SUCCESS_FORMAT = ( - u'{transfer_type}: {transfer_location}' - ) - DRY_RUN_FORMAT = u'(dryrun) ' + SUCCESS_FORMAT - FAILURE_FORMAT = ( - u'{transfer_type} failed: {transfer_location} {exception}' - ) + SUCCESS_FORMAT = '{transfer_type}: {transfer_location}' + DRY_RUN_FORMAT = '(dryrun) ' + SUCCESS_FORMAT + FAILURE_FORMAT = '{transfer_type} failed: {transfer_location} {exception}' # TODO: Add "warning: " prefix once all commands are converted to using # result printer and remove "warning: " prefix from ``create_warning``. - WARNING_FORMAT = ( - u'{message}' - ) - ERROR_FORMAT = ( - u'fatal error: {exception}' - ) + WARNING_FORMAT = '{message}' + ERROR_FORMAT = 'fatal error: {exception}' CTRL_C_MSG = 'cancelled: ctrl-c received' - SRC_DEST_TRANSFER_LOCATION_FORMAT = u'{src} to {dest}' - SRC_TRANSFER_LOCATION_FORMAT = u'{src}' + SRC_DEST_TRANSFER_LOCATION_FORMAT = '{src} to {dest}' + SRC_TRANSFER_LOCATION_FORMAT = '{src}' def __init__(self, result_recorder, out_file=None, error_file=None): """Prints status of ongoing transfer @@ -349,14 +346,14 @@ def __init__(self, result_recorder, out_file=None, error_file=None): ErrorResult: self._print_error, CtrlCResult: self._print_ctrl_c, DryRunResult: self._print_dry_run, - FinalTotalSubmissionsResult: - self._clear_progress_if_no_more_expected_transfers, + FinalTotalSubmissionsResult: self._clear_progress_if_no_more_expected_transfers, } def __call__(self, result): """Print the progress of the ongoing transfer based on a result""" self._result_handler_map.get(type(result), self._print_noop)( - result=result) + result=result + ) def _print_noop(self, **kwargs): # If the result does not have a handler, then do nothing with it. @@ -365,7 +362,7 @@ def _print_noop(self, **kwargs): def _print_dry_run(self, result, **kwargs): statement = self.DRY_RUN_FORMAT.format( transfer_type=result.transfer_type, - transfer_location=self._get_transfer_location(result) + transfer_location=self._get_transfer_location(result), ) statement = self._adjust_statement_padding(statement) self._print_to_out_file(statement) @@ -373,7 +370,7 @@ def _print_dry_run(self, result, **kwargs): def _print_success(self, result, **kwargs): success_statement = self.SUCCESS_FORMAT.format( transfer_type=result.transfer_type, - transfer_location=self._get_transfer_location(result) + transfer_location=self._get_transfer_location(result), ) success_statement = self._adjust_statement_padding(success_statement) self._print_to_out_file(success_statement) @@ -383,7 +380,7 @@ def _print_failure(self, result, **kwargs): failure_statement = self.FAILURE_FORMAT.format( transfer_type=result.transfer_type, transfer_location=self._get_transfer_location(result), - exception=result.exception + exception=result.exception, ) failure_statement = self._adjust_statement_padding(failure_statement) self._print_to_error_file(failure_statement) @@ -397,7 +394,8 @@ def _print_warning(self, result, **kwargs): def _print_error(self, result, **kwargs): self._flush_error_statement( - self.ERROR_FORMAT.format(exception=result.exception)) + self.ERROR_FORMAT.format(exception=result.exception) + ) def _print_ctrl_c(self, result, **kwargs): self._flush_error_statement(self.CTRL_C_MSG) @@ -410,7 +408,8 @@ def _get_transfer_location(self, result): if result.dest is None: return self.SRC_TRANSFER_LOCATION_FORMAT.format(src=result.src) return self.SRC_DEST_TRANSFER_LOCATION_FORMAT.format( - src=result.src, dest=result.dest) + src=result.src, dest=result.dest + ) def _redisplay_progress(self): # Reset to zero because done statements are printed with new lines @@ -426,34 +425,40 @@ def _add_progress_if_needed(self): def _print_progress(self, **kwargs): # Get all of the statistics in the correct form. remaining_files = self._get_expected_total( - str(self._result_recorder.expected_files_transferred - - self._result_recorder.files_transferred) + str( + self._result_recorder.expected_files_transferred + - self._result_recorder.files_transferred + ) ) # Create the display statement. if self._result_recorder.expected_bytes_transferred > 0: bytes_completed = human_readable_size( - self._result_recorder.bytes_transferred + - self._result_recorder.bytes_failed_to_transfer + self._result_recorder.bytes_transferred + + self._result_recorder.bytes_failed_to_transfer ) expected_bytes_completed = self._get_expected_total( human_readable_size( - self._result_recorder.expected_bytes_transferred)) + self._result_recorder.expected_bytes_transferred + ) + ) - transfer_speed = human_readable_size( - self._result_recorder.bytes_transfer_speed) + '/s' + transfer_speed = ( + human_readable_size(self._result_recorder.bytes_transfer_speed) + + '/s' + ) progress_statement = self.BYTE_PROGRESS_FORMAT.format( bytes_completed=bytes_completed, expected_bytes_completed=expected_bytes_completed, transfer_speed=transfer_speed, - remaining_files=remaining_files + remaining_files=remaining_files, ) else: # We're not expecting any bytes to be transferred, so we should # only print of information about number of files transferred. progress_statement = self.FILE_PROGRESS_FORMAT.format( files_completed=self._result_recorder.files_transferred, - remaining_files=remaining_files + remaining_files=remaining_files, ) if not self._result_recorder.expected_totals_are_final(): @@ -461,7 +466,8 @@ def _print_progress(self, **kwargs): # Make sure that it overrides any previous progress bar. progress_statement = self._adjust_statement_padding( - progress_statement, ending_char='\r') + progress_statement, ending_char='\r' + ) # We do not want to include the carriage return in this calculation # as progress length is used for determining whitespace padding. # So we subtract one off of the length. @@ -473,7 +479,8 @@ def _print_progress(self, **kwargs): def _get_expected_total(self, expected_total): if not self._result_recorder.expected_totals_are_final(): return self._ESTIMATED_EXPECTED_TOTAL.format( - expected_total=expected_total) + expected_total=expected_total + ) return expected_total def _adjust_statement_padding(self, print_statement, ending_char='\n'): @@ -500,12 +507,14 @@ def _clear_progress_if_no_more_expected_transfers(self, **kwargs): class NoProgressResultPrinter(ResultPrinter): """A result printer that doesn't print progress""" + def _print_progress(self, **kwargs): pass class OnlyShowErrorsResultPrinter(ResultPrinter): """A result printer that only prints out errors""" + def _print_progress(self, **kwargs): pass @@ -537,7 +546,8 @@ def run(self): if isinstance(result, ShutdownThreadRequest): LOGGER.debug( 'Shutdown request received in result processing ' - 'thread, shutting down result thread.') + 'thread, shutting down result thread.' + ) break if self._result_handlers_enabled: self._process_result(result) @@ -558,7 +568,11 @@ def _process_result(self, result): except Exception as e: LOGGER.debug( 'Error processing result %s with handler %s: %s', - result, result_handler, e, exc_info=True) + result, + result_handler, + e, + exc_info=True, + ) class CommandResultRecorder(object): @@ -600,7 +614,7 @@ def get_command_result(self): """ return CommandResult( self._result_recorder.files_failed + self._result_recorder.errors, - self._result_recorder.files_warned + self._result_recorder.files_warned, ) def notify_total_submissions(self, total): @@ -612,8 +626,11 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, *args): if exc_type: - LOGGER.debug('Exception caught during command execution: %s', - exc_value, exc_info=True) + LOGGER.debug( + 'Exception caught during command execution: %s', + exc_value, + exc_info=True, + ) self.result_queue.put(ErrorResult(exception=exc_value)) self.shutdown() return True diff --git a/awscli/customizations/s3/s3.py b/awscli/customizations/s3/s3.py index 7d9c3d6a70c9..725bd0ca9bf7 100644 --- a/awscli/customizations/s3/s3.py +++ b/awscli/customizations/s3/s3.py @@ -12,11 +12,20 @@ # language governing permissions and limitations under the License. from awscli.customizations import utils from awscli.customizations.commands import BasicCommand -from awscli.customizations.s3.subcommands import ListCommand, WebsiteCommand, \ - CpCommand, MvCommand, RmCommand, SyncCommand, MbCommand, RbCommand, \ - PresignCommand -from awscli.customizations.s3.syncstrategy.register import \ - register_sync_strategies +from awscli.customizations.s3.subcommands import ( + CpCommand, + ListCommand, + MbCommand, + MvCommand, + PresignCommand, + RbCommand, + RmCommand, + SyncCommand, + WebsiteCommand, +) +from awscli.customizations.s3.syncstrategy.register import ( + register_sync_strategies, +) def awscli_initialize(cli): diff --git a/awscli/customizations/s3/s3handler.py b/awscli/customizations/s3/s3handler.py index 1f875312d79b..43d86ca48f75 100644 --- a/awscli/customizations/s3/s3handler.py +++ b/awscli/customizations/s3/s3handler.py @@ -13,37 +13,46 @@ import logging import os -from s3transfer.manager import TransferManager - -from awscli.customizations.s3.utils import ( - human_readable_size, MAX_UPLOAD_SIZE, find_bucket_key, relative_path, - create_warning, NonSeekableStream) -from awscli.customizations.s3.transferconfig import \ - create_transfer_config_from_runtime_config -from awscli.customizations.s3.results import QueuedResultSubscriber -from awscli.customizations.s3.results import ProgressResultSubscriber -from awscli.customizations.s3.results import DoneResultSubscriber -from awscli.customizations.s3.results import QueuedResult -from awscli.customizations.s3.results import SuccessResult -from awscli.customizations.s3.results import FailureResult -from awscli.customizations.s3.results import DryRunResult -from awscli.customizations.s3.results import ResultRecorder -from awscli.customizations.s3.results import ResultPrinter -from awscli.customizations.s3.results import OnlyShowErrorsResultPrinter -from awscli.customizations.s3.results import NoProgressResultPrinter -from awscli.customizations.s3.results import ResultProcessor -from awscli.customizations.s3.results import CommandResultRecorder -from awscli.customizations.s3.utils import RequestParamsMapper -from awscli.customizations.s3.utils import StdoutBytesWriter +from awscli.compat import get_binary_stdin +from awscli.customizations.s3.results import ( + CommandResultRecorder, + DoneResultSubscriber, + DryRunResult, + FailureResult, + NoProgressResultPrinter, + OnlyShowErrorsResultPrinter, + ProgressResultSubscriber, + QueuedResult, + QueuedResultSubscriber, + ResultPrinter, + ResultProcessor, + ResultRecorder, + SuccessResult, +) from awscli.customizations.s3.subscribers import ( - ProvideSizeSubscriber, ProvideUploadContentTypeSubscriber, + CopyPropsSubscriberFactory, + DeleteCopySourceObjectSubscriber, + DeleteSourceFileSubscriber, + DeleteSourceObjectSubscriber, + DirectoryCreatorSubscriber, ProvideLastModifiedTimeSubscriber, - CopyPropsSubscriberFactory, DirectoryCreatorSubscriber, - DeleteSourceFileSubscriber, DeleteSourceObjectSubscriber, - DeleteCopySourceObjectSubscriber + ProvideSizeSubscriber, + ProvideUploadContentTypeSubscriber, ) -from awscli.compat import get_binary_stdin - +from awscli.customizations.s3.transferconfig import ( + create_transfer_config_from_runtime_config, +) +from awscli.customizations.s3.utils import ( + MAX_UPLOAD_SIZE, + NonSeekableStream, + RequestParamsMapper, + StdoutBytesWriter, + create_warning, + find_bucket_key, + human_readable_size, + relative_path, +) +from s3transfer.manager import TransferManager LOGGER = logging.getLogger(__name__) @@ -73,12 +82,15 @@ def __call__(self, transfer_manager, result_queue): result_processor_handlers = [result_recorder] self._add_result_printer(result_recorder, result_processor_handlers) result_processor = ResultProcessor( - result_queue, result_processor_handlers) + result_queue, result_processor_handlers + ) command_result_recorder = CommandResultRecorder( - result_queue, result_recorder, result_processor) + result_queue, result_recorder, result_processor + ) return S3TransferHandler( - transfer_manager, self._cli_params, command_result_recorder) + transfer_manager, self._cli_params, command_result_recorder + ) def _add_result_printer(self, result_recorder, result_processor_handlers): if self._cli_params.get('quiet'): @@ -119,8 +131,9 @@ def __init__(self, transfer_manager, cli_params, result_command_recorder): self._result_command_recorder = result_command_recorder submitter_args = ( - self._transfer_manager, self._result_command_recorder.result_queue, - cli_params + self._transfer_manager, + self._result_command_recorder.result_queue, + cli_params, ) self._submitters = [ UploadStreamRequestSubmitter(*submitter_args), @@ -129,7 +142,7 @@ def __init__(self, transfer_manager, cli_params, result_command_recorder): DownloadRequestSubmitter(*submitter_args), CopyRequestSubmitter(*submitter_args), DeleteRequestSubmitter(*submitter_args), - LocalDeleteRequestSubmitter(*submitter_args) + LocalDeleteRequestSubmitter(*submitter_args), ] def call(self, fileinfos): @@ -153,7 +166,8 @@ def call(self, fileinfos): total_submissions += 1 break self._result_command_recorder.notify_total_submissions( - total_submissions) + total_submissions + ) return self._result_command_recorder.get_command_result() @@ -219,7 +233,8 @@ def _do_submit(self, fileinfo): self.REQUEST_MAPPER_METHOD(extra_args, self._cli_params) if not self._cli_params.get('dryrun'): return self._submit_transfer_request( - fileinfo, extra_args, self._get_subscribers(fileinfo)) + fileinfo, extra_args, self._get_subscribers(fileinfo) + ) else: self._submit_dryrun(fileinfo) @@ -232,9 +247,8 @@ def _get_subscribers(self, fileinfo): subscribers.extend( [ ProgressResultSubscriber(**result_subscriber_kwargs), - DoneResultSubscriber(**result_subscriber_kwargs) + DoneResultSubscriber(**result_subscriber_kwargs), ] - ) return subscribers @@ -251,8 +265,9 @@ def _get_result_subscriber_kwargs(self, fileinfo): def _submit_dryrun(self, fileinfo): transfer_type = self._get_transfer_type(fileinfo) src, dest = self._format_src_dest(fileinfo) - self._result_queue.put(DryRunResult( - transfer_type=transfer_type, src=src, dest=dest)) + self._result_queue.put( + DryRunResult(transfer_type=transfer_type, src=src, dest=dest) + ) def _add_provide_size_subscriber(self, subscribers, fileinfo): subscribers.append(ProvideSizeSubscriber(fileinfo.size)) @@ -280,27 +295,27 @@ def _get_warning_handlers(self): return [] def _should_inject_content_type(self): - return ( - self._cli_params.get('guess_mime_type') and - not self._cli_params.get('content_type') - ) + return self._cli_params.get( + 'guess_mime_type' + ) and not self._cli_params.get('content_type') def _warn_glacier(self, fileinfo): if not self._cli_params.get('force_glacier_transfer'): if not fileinfo.is_glacier_compatible(): LOGGER.debug( 'Encountered glacier object s3://%s. Not performing ' - '%s on object.' % (fileinfo.src, fileinfo.operation_name)) + '%s on object.' % (fileinfo.src, fileinfo.operation_name) + ) if not self._cli_params.get('ignore_glacier_warnings'): warning = create_warning( - 's3://'+fileinfo.src, + 's3://' + fileinfo.src, 'Object is of storage class GLACIER. Unable to ' 'perform %s operations on GLACIER objects. You must ' 'restore the object to be able to perform the ' 'operation. See aws s3 %s help for additional ' 'parameter options to ignore or force these ' - 'transfers.' % - (fileinfo.operation_name, fileinfo.operation_name) + 'transfers.' + % (fileinfo.operation_name, fileinfo.operation_name), ) self._result_queue.put(warning) return True @@ -311,10 +326,12 @@ def _warn_parent_reference(self, fileinfo): # need to take that into account when checking for a parent prefix. parent_prefix = '..' + os.path.sep escapes_cwd = os.path.normpath(fileinfo.compare_key).startswith( - parent_prefix) + parent_prefix + ) if escapes_cwd: warning = create_warning( - fileinfo.compare_key, "File references a parent directory.") + fileinfo.compare_key, "File references a parent directory." + ) self._result_queue.put(warning) return True return False @@ -353,8 +370,11 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.dest) filein = self._get_filein(fileinfo) return self._transfer_manager.upload( - fileobj=filein, bucket=bucket, key=key, - extra_args=extra_args, subscribers=subscribers + fileobj=filein, + bucket=bucket, + key=key, + extra_args=extra_args, + subscribers=subscribers, ) def _get_filein(self, fileinfo): @@ -366,11 +386,13 @@ def _get_warning_handlers(self): def _warn_if_too_large(self, fileinfo): if getattr(fileinfo, 'size') and fileinfo.size > MAX_UPLOAD_SIZE: file_path = relative_path(fileinfo.src) - warning_message = ( - "File %s exceeds s3 upload limit of %s." % ( - file_path, human_readable_size(MAX_UPLOAD_SIZE))) + warning_message = "File %s exceeds s3 upload limit of %s." % ( + file_path, + human_readable_size(MAX_UPLOAD_SIZE), + ) warning = create_warning( - file_path, warning_message, skip_file=False) + file_path, warning_message, skip_file=False + ) self._result_queue.put(warning) def _format_src_dest(self, fileinfo): @@ -387,18 +409,25 @@ def can_submit(self, fileinfo): def _add_additional_subscribers(self, subscribers, fileinfo): subscribers.append(DirectoryCreatorSubscriber()) - subscribers.append(ProvideLastModifiedTimeSubscriber( - fileinfo.last_update, self._result_queue)) + subscribers.append( + ProvideLastModifiedTimeSubscriber( + fileinfo.last_update, self._result_queue + ) + ) if self._cli_params.get('is_move', False): - subscribers.append(DeleteSourceObjectSubscriber( - fileinfo.source_client)) + subscribers.append( + DeleteSourceObjectSubscriber(fileinfo.source_client) + ) def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.src) fileout = self._get_fileout(fileinfo) return self._transfer_manager.download( - fileobj=fileout, bucket=bucket, key=key, - extra_args=extra_args, subscribers=subscribers + fileobj=fileout, + bucket=bucket, + key=key, + extra_args=extra_args, + subscribers=subscribers, ) def _get_fileout(self, fileinfo): @@ -423,8 +452,9 @@ def _add_additional_subscribers(self, subscribers, fileinfo): if not self._cli_params.get('metadata_directive'): self._add_copy_props_subscribers(subscribers, fileinfo) if self._cli_params.get('is_move', False): - subscribers.append(DeleteCopySourceObjectSubscriber( - fileinfo.source_client)) + subscribers.append( + DeleteCopySourceObjectSubscriber(fileinfo.source_client) + ) def _add_copy_props_subscribers(self, subscribers, fileinfo): copy_props_factory = CopyPropsSubscriberFactory( @@ -439,9 +469,12 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers): source_bucket, source_key = find_bucket_key(fileinfo.src) copy_source = {'Bucket': source_bucket, 'Key': source_key} return self._transfer_manager.copy( - bucket=bucket, key=key, copy_source=copy_source, - extra_args=extra_args, subscribers=subscribers, - source_client=fileinfo.source_client + bucket=bucket, + key=key, + copy_source=copy_source, + extra_args=extra_args, + subscribers=subscribers, + source_client=fileinfo.source_client, ) def _get_warning_handlers(self): @@ -455,9 +488,8 @@ def _format_src_dest(self, fileinfo): class UploadStreamRequestSubmitter(UploadRequestSubmitter): def can_submit(self, fileinfo): - return ( - fileinfo.operation_name == 'upload' and - self._cli_params.get('is_stream') + return fileinfo.operation_name == 'upload' and self._cli_params.get( + 'is_stream' ) def _add_provide_size_subscriber(self, subscribers, fileinfo): @@ -478,9 +510,8 @@ def _format_local_path(self, path): class DownloadStreamRequestSubmitter(DownloadRequestSubmitter): def can_submit(self, fileinfo): - return ( - fileinfo.operation_name == 'download' and - self._cli_params.get('is_stream') + return fileinfo.operation_name == 'download' and self._cli_params.get( + 'is_stream' ) def _add_provide_size_subscriber(self, subscribers, fileinfo): @@ -500,8 +531,9 @@ class DeleteRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = RequestParamsMapper.map_delete_object_params def can_submit(self, fileinfo): - return fileinfo.operation_name == 'delete' and \ - fileinfo.src_type == 's3' + return ( + fileinfo.operation_name == 'delete' and fileinfo.src_type == 's3' + ) def _add_provide_size_subscriber(self, subscribers, fileinfo): pass @@ -509,8 +541,11 @@ def _add_provide_size_subscriber(self, subscribers, fileinfo): def _submit_transfer_request(self, fileinfo, extra_args, subscribers): bucket, key = find_bucket_key(fileinfo.src) return self._transfer_manager.delete( - bucket=bucket, key=key, extra_args=extra_args, - subscribers=subscribers) + bucket=bucket, + key=key, + extra_args=extra_args, + subscribers=subscribers, + ) def _format_src_dest(self, fileinfo): return self._format_s3_path(fileinfo.src), None @@ -520,8 +555,10 @@ class LocalDeleteRequestSubmitter(BaseTransferRequestSubmitter): REQUEST_MAPPER_METHOD = None def can_submit(self, fileinfo): - return fileinfo.operation_name == 'delete' and \ - fileinfo.src_type == 'local' + return ( + fileinfo.operation_name == 'delete' + and fileinfo.src_type == 'local' + ) def _submit_transfer_request(self, fileinfo, extra_args, subscribers): # This is quirky but essentially instead of relying on a built-in @@ -537,19 +574,15 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers): # deleting a local file only happens for sync --delete downloads and # is very fast compared to all of the other types of transfers. src, dest = self._format_src_dest(fileinfo) - result_kwargs = { - 'transfer_type': 'delete', - 'src': src, - 'dest': dest - } + result_kwargs = {'transfer_type': 'delete', 'src': src, 'dest': dest} try: - self._result_queue.put(QueuedResult( - total_transfer_size=0, **result_kwargs)) + self._result_queue.put( + QueuedResult(total_transfer_size=0, **result_kwargs) + ) os.remove(fileinfo.src) self._result_queue.put(SuccessResult(**result_kwargs)) except Exception as e: - self._result_queue.put( - FailureResult(exception=e, **result_kwargs)) + self._result_queue.put(FailureResult(exception=e, **result_kwargs)) finally: # Return True to indicate that the transfer was submitted return True diff --git a/awscli/customizations/s3/subcommands.py b/awscli/customizations/s3/subcommands.py index 1c51238c26f6..f6d614057dd6 100644 --- a/awscli/customizations/s3/subcommands.py +++ b/awscli/customizations/s3/subcommands.py @@ -10,146 +10,216 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os import logging +import os import sys -from botocore.client import Config -from botocore.utils import is_s3express_bucket, ensure_boolean from dateutil.parser import parse from dateutil.tz import tzlocal from awscli.compat import queue from awscli.customizations.commands import BasicCommand +from awscli.customizations.exceptions import ParamValidationError +from awscli.customizations.s3 import transferconfig from awscli.customizations.s3.comparator import Comparator from awscli.customizations.s3.factory import ( - ClientFactory, TransferManagerFactory + ClientFactory, + TransferManagerFactory, ) -from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder from awscli.customizations.s3.fileformat import FileFormat from awscli.customizations.s3.filegenerator import FileGenerator from awscli.customizations.s3.fileinfo import FileInfo +from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder from awscli.customizations.s3.filters import create_filter from awscli.customizations.s3.s3handler import S3TransferHandlerFactory -from awscli.customizations.s3.utils import find_bucket_key, AppendFilter, \ - find_dest_path_comp_key, human_readable_size, \ - RequestParamsMapper, split_s3_bucket_key, block_unsupported_resources, \ - S3PathResolver +from awscli.customizations.s3.syncstrategy.base import ( + MissingFileSync, + NeverSync, + SizeAndLastModifiedSync, +) +from awscli.customizations.s3.utils import ( + AppendFilter, + RequestParamsMapper, + S3PathResolver, + block_unsupported_resources, + find_bucket_key, + find_dest_path_comp_key, + human_readable_size, + split_s3_bucket_key, +) from awscli.customizations.utils import uni_print -from awscli.customizations.s3.syncstrategy.base import MissingFileSync, \ - SizeAndLastModifiedSync, NeverSync -from awscli.customizations.s3 import transferconfig -from awscli.customizations.exceptions import ParamValidationError - +from botocore.client import Config +from botocore.utils import ensure_boolean, is_s3express_bucket LOGGER = logging.getLogger(__name__) -RECURSIVE = {'name': 'recursive', 'action': 'store_true', 'dest': 'dir_op', - 'help_text': ( - "Command is performed on all files or objects " - "under the specified directory or prefix.")} +RECURSIVE = { + 'name': 'recursive', + 'action': 'store_true', + 'dest': 'dir_op', + 'help_text': ( + "Command is performed on all files or objects " + "under the specified directory or prefix." + ), +} -HUMAN_READABLE = {'name': 'human-readable', 'action': 'store_true', - 'help_text': "Displays file sizes in human readable format."} +HUMAN_READABLE = { + 'name': 'human-readable', + 'action': 'store_true', + 'help_text': "Displays file sizes in human readable format.", +} -SUMMARIZE = {'name': 'summarize', 'action': 'store_true', - 'help_text': ( - "Displays summary information " - "(number of objects, total size).")} +SUMMARIZE = { + 'name': 'summarize', + 'action': 'store_true', + 'help_text': ( + "Displays summary information " "(number of objects, total size)." + ), +} -DRYRUN = {'name': 'dryrun', 'action': 'store_true', - 'help_text': ( - "Displays the operations that would be performed using the " - "specified command without actually running them.")} +DRYRUN = { + 'name': 'dryrun', + 'action': 'store_true', + 'help_text': ( + "Displays the operations that would be performed using the " + "specified command without actually running them." + ), +} -QUIET = {'name': 'quiet', 'action': 'store_true', - 'help_text': ( - "Does not display the operations performed from the specified " - "command.")} +QUIET = { + 'name': 'quiet', + 'action': 'store_true', + 'help_text': ( + "Does not display the operations performed from the specified " + "command." + ), +} -FORCE = {'name': 'force', 'action': 'store_true', - 'help_text': ( - "Deletes all objects in the bucket including the bucket itself. " - "Note that versioned objects will not be deleted in this " - "process which would cause the bucket deletion to fail because " - "the bucket would not be empty. To delete versioned " - "objects use the ``s3api delete-object`` command with " - "the ``--version-id`` parameter.")} +FORCE = { + 'name': 'force', + 'action': 'store_true', + 'help_text': ( + "Deletes all objects in the bucket including the bucket itself. " + "Note that versioned objects will not be deleted in this " + "process which would cause the bucket deletion to fail because " + "the bucket would not be empty. To delete versioned " + "objects use the ``s3api delete-object`` command with " + "the ``--version-id`` parameter." + ), +} -FOLLOW_SYMLINKS = {'name': 'follow-symlinks', 'action': 'store_true', - 'default': True, 'group_name': 'follow_symlinks', - 'help_text': ( - "Symbolic links are followed " - "only when uploading to S3 from the local filesystem. " - "Note that S3 does not support symbolic links, so the " - "contents of the link target are uploaded under the " - "name of the link. When neither ``--follow-symlinks`` " - "nor ``--no-follow-symlinks`` is specified, the default " - "is to follow symlinks.")} +FOLLOW_SYMLINKS = { + 'name': 'follow-symlinks', + 'action': 'store_true', + 'default': True, + 'group_name': 'follow_symlinks', + 'help_text': ( + "Symbolic links are followed " + "only when uploading to S3 from the local filesystem. " + "Note that S3 does not support symbolic links, so the " + "contents of the link target are uploaded under the " + "name of the link. When neither ``--follow-symlinks`` " + "nor ``--no-follow-symlinks`` is specified, the default " + "is to follow symlinks." + ), +} -NO_FOLLOW_SYMLINKS = {'name': 'no-follow-symlinks', 'action': 'store_false', - 'dest': 'follow_symlinks', 'default': True, - 'group_name': 'follow_symlinks'} +NO_FOLLOW_SYMLINKS = { + 'name': 'no-follow-symlinks', + 'action': 'store_false', + 'dest': 'follow_symlinks', + 'default': True, + 'group_name': 'follow_symlinks', +} -NO_GUESS_MIME_TYPE = {'name': 'no-guess-mime-type', 'action': 'store_false', - 'dest': 'guess_mime_type', 'default': True, - 'help_text': ( - "Do not try to guess the mime type for " - "uploaded files. By default the mime type of a " - "file is guessed when it is uploaded.")} +NO_GUESS_MIME_TYPE = { + 'name': 'no-guess-mime-type', + 'action': 'store_false', + 'dest': 'guess_mime_type', + 'default': True, + 'help_text': ( + "Do not try to guess the mime type for " + "uploaded files. By default the mime type of a " + "file is guessed when it is uploaded." + ), +} -CONTENT_TYPE = {'name': 'content-type', - 'help_text': ( - "Specify an explicit content type for this operation. " - "This value overrides any guessed mime types.")} +CONTENT_TYPE = { + 'name': 'content-type', + 'help_text': ( + "Specify an explicit content type for this operation. " + "This value overrides any guessed mime types." + ), +} -EXCLUDE = {'name': 'exclude', 'action': AppendFilter, 'nargs': 1, - 'dest': 'filters', - 'help_text': ( - "Exclude all files or objects from the command that matches " - "the specified pattern.")} +EXCLUDE = { + 'name': 'exclude', + 'action': AppendFilter, + 'nargs': 1, + 'dest': 'filters', + 'help_text': ( + "Exclude all files or objects from the command that matches " + "the specified pattern." + ), +} -INCLUDE = {'name': 'include', 'action': AppendFilter, 'nargs': 1, - 'dest': 'filters', - 'help_text': ( - "Don't exclude files or objects " - "in the command that match the specified pattern. " - 'See Use of ' - 'Exclude and Include Filters for details.')} +INCLUDE = { + 'name': 'include', + 'action': AppendFilter, + 'nargs': 1, + 'dest': 'filters', + 'help_text': ( + "Don't exclude files or objects " + "in the command that match the specified pattern. " + 'See Use of ' + 'Exclude and Include Filters for details.' + ), +} -ACL = {'name': 'acl', - 'choices': ['private', 'public-read', 'public-read-write', - 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', - 'bucket-owner-full-control', 'log-delivery-write'], - 'help_text': ( - "Sets the ACL for the object when the command is " - "performed. If you use this parameter you must have the " - '"s3:PutObjectAcl" permission included in the list of actions ' - "for your IAM policy. " - "Only accepts values of ``private``, ``public-read``, " - "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, " - "``bucket-owner-read``, ``bucket-owner-full-control`` and " - "``log-delivery-write``. " - 'See Canned ACL for details')} +ACL = { + 'name': 'acl', + 'choices': [ + 'private', + 'public-read', + 'public-read-write', + 'authenticated-read', + 'aws-exec-read', + 'bucket-owner-read', + 'bucket-owner-full-control', + 'log-delivery-write', + ], + 'help_text': ( + "Sets the ACL for the object when the command is " + "performed. If you use this parameter you must have the " + '"s3:PutObjectAcl" permission included in the list of actions ' + "for your IAM policy. " + "Only accepts values of ``private``, ``public-read``, " + "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, " + "``bucket-owner-read``, ``bucket-owner-full-control`` and " + "``log-delivery-write``. " + 'See Canned ACL for details' + ), +} GRANTS = { - 'name': 'grants', 'nargs': '+', + 'name': 'grants', + 'nargs': '+', 'help_text': ( '

Grant specific permissions to individual users or groups. You ' 'can supply a list of grants of the form

--grants ' @@ -174,40 +244,48 @@ '' 'For more information on Amazon S3 access control, see ' 'Access Control')} + 'UsingAuthAccess.html">Access Control' + ), +} SSE = { - 'name': 'sse', 'nargs': '?', 'const': 'AES256', + 'name': 'sse', + 'nargs': '?', + 'const': 'AES256', 'choices': ['AES256', 'aws:kms'], 'help_text': ( 'Specifies server-side encryption of the object in S3. ' 'Valid values are ``AES256`` and ``aws:kms``. If the parameter is ' 'specified but no value is provided, ``AES256`` is used.' - ) + ), } SSE_C = { - 'name': 'sse-c', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], + 'name': 'sse-c', + 'nargs': '?', + 'const': 'AES256', + 'choices': ['AES256'], 'help_text': ( 'Specifies server-side encryption using customer provided keys ' 'of the the object in S3. ``AES256`` is the only valid value. ' 'If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ``--sse-c-key`` ' 'must be specified as well.' - ) + ), } SSE_C_KEY = { - 'name': 'sse-c-key', 'cli_type_name': 'blob', + 'name': 'sse-c-key', + 'cli_type_name': 'blob', 'help_text': ( 'The customer-provided encryption key to use to server-side ' 'encrypt the object in S3. If you provide this value, ' '``--sse-c`` must be specified as well. The key provided should ' '**not** be base64 encoded.' - ) + ), } @@ -218,13 +296,15 @@ 'should be used to server-side encrypt the object in S3. You should ' 'only provide this parameter if you are using a customer managed ' 'customer master key (CMK) and not the AWS managed KMS CMK.' - ) + ), } SSE_C_COPY_SOURCE = { - 'name': 'sse-c-copy-source', 'nargs': '?', - 'const': 'AES256', 'choices': ['AES256'], + 'name': 'sse-c-copy-source', + 'nargs': '?', + 'const': 'AES256', + 'choices': ['AES256'], 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' @@ -233,12 +313,13 @@ 'value. If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ' '``--sse-c-copy-source-key`` must be specified as well. ' - ) + ), } SSE_C_COPY_SOURCE_KEY = { - 'name': 'sse-c-copy-source-key', 'cli_type_name': 'blob', + 'name': 'sse-c-copy-source-key', + 'cli_type_name': 'blob', 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' @@ -247,105 +328,132 @@ 'must be one that was used when the source object was created. ' 'If you provide this value, ``--sse-c-copy-source`` be specified as ' 'well. The key provided should **not** be base64 encoded.' - ) + ), } -STORAGE_CLASS = {'name': 'storage-class', - 'choices': ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', - 'ONEZONE_IA', 'INTELLIGENT_TIERING', 'GLACIER', - 'DEEP_ARCHIVE', 'GLACIER_IR'], - 'help_text': ( - "The type of storage to use for the object. " - "Valid choices are: STANDARD | REDUCED_REDUNDANCY " - "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING " - "| GLACIER | DEEP_ARCHIVE | GLACIER_IR. " - "Defaults to 'STANDARD'")} +STORAGE_CLASS = { + 'name': 'storage-class', + 'choices': [ + 'STANDARD', + 'REDUCED_REDUNDANCY', + 'STANDARD_IA', + 'ONEZONE_IA', + 'INTELLIGENT_TIERING', + 'GLACIER', + 'DEEP_ARCHIVE', + 'GLACIER_IR', + ], + 'help_text': ( + "The type of storage to use for the object. " + "Valid choices are: STANDARD | REDUCED_REDUNDANCY " + "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING " + "| GLACIER | DEEP_ARCHIVE | GLACIER_IR. " + "Defaults to 'STANDARD'" + ), +} -WEBSITE_REDIRECT = {'name': 'website-redirect', - 'help_text': ( - "If the bucket is configured as a website, " - "redirects requests for this object to another object " - "in the same bucket or to an external URL. Amazon S3 " - "stores the value of this header in the object " - "metadata.")} +WEBSITE_REDIRECT = { + 'name': 'website-redirect', + 'help_text': ( + "If the bucket is configured as a website, " + "redirects requests for this object to another object " + "in the same bucket or to an external URL. Amazon S3 " + "stores the value of this header in the object " + "metadata." + ), +} -CACHE_CONTROL = {'name': 'cache-control', - 'help_text': ( - "Specifies caching behavior along the " - "request/reply chain.")} +CACHE_CONTROL = { + 'name': 'cache-control', + 'help_text': ( + "Specifies caching behavior along the " "request/reply chain." + ), +} -CONTENT_DISPOSITION = {'name': 'content-disposition', - 'help_text': ( - "Specifies presentational information " - "for the object.")} +CONTENT_DISPOSITION = { + 'name': 'content-disposition', + 'help_text': ("Specifies presentational information " "for the object."), +} -CONTENT_ENCODING = {'name': 'content-encoding', - 'help_text': ( - "Specifies what content encodings have been " - "applied to the object and thus what decoding " - "mechanisms must be applied to obtain the media-type " - "referenced by the Content-Type header field.")} +CONTENT_ENCODING = { + 'name': 'content-encoding', + 'help_text': ( + "Specifies what content encodings have been " + "applied to the object and thus what decoding " + "mechanisms must be applied to obtain the media-type " + "referenced by the Content-Type header field." + ), +} -CONTENT_LANGUAGE = {'name': 'content-language', - 'help_text': ("The language the content is in.")} +CONTENT_LANGUAGE = { + 'name': 'content-language', + 'help_text': ("The language the content is in."), +} -SOURCE_REGION = {'name': 'source-region', - 'help_text': ( - "When transferring objects from an s3 bucket to an s3 " - "bucket, this specifies the region of the source bucket." - " Note the region specified by ``--region`` or through " - "configuration of the CLI refers to the region of the " - "destination bucket. If ``--source-region`` is not " - "specified the region of the source will be the same " - "as the region of the destination bucket.")} +SOURCE_REGION = { + 'name': 'source-region', + 'help_text': ( + "When transferring objects from an s3 bucket to an s3 " + "bucket, this specifies the region of the source bucket." + " Note the region specified by ``--region`` or through " + "configuration of the CLI refers to the region of the " + "destination bucket. If ``--source-region`` is not " + "specified the region of the source will be the same " + "as the region of the destination bucket." + ), +} EXPIRES = { 'name': 'expires', 'help_text': ( - "The date and time at which the object is no longer cacheable.") + "The date and time at which the object is no longer cacheable." + ), } METADATA = { - 'name': 'metadata', 'cli_type_name': 'map', + 'name': 'metadata', + 'cli_type_name': 'map', 'schema': { 'type': 'map', 'key': {'type': 'string'}, - 'value': {'type': 'string'} + 'value': {'type': 'string'}, }, 'help_text': ( "A map of metadata to store with the objects in S3. This will be " "applied to every object which is part of this request. In a sync, " "this means that files which haven't changed won't receive the new " "metadata. " - ) + ), } METADATA_DIRECTIVE = { - 'name': 'metadata-directive', 'choices': ['COPY', 'REPLACE'], + 'name': 'metadata-directive', + 'choices': ['COPY', 'REPLACE'], 'help_text': ( 'Sets the ``x-amz-metadata-directive`` header for CopyObject ' 'operations. It is recommended to use the ``--copy-props`` parameter ' 'instead to control copying of metadata properties. ' 'If ``--metadata-directive`` is set, the ``--copy-props`` parameter ' 'will be disabled and will have no affect on the transfer.' - ) + ), } COPY_PROPS = { 'name': 'copy-props', 'choices': ['none', 'metadata-directive', 'default'], - 'default': 'default', 'help_text': ( + 'default': 'default', + 'help_text': ( 'Determines which properties are copied from the source S3 object. ' 'This parameter only applies for S3 to S3 copies. Valid values are: ' '
    ' @@ -375,81 +483,104 @@ 'If you want to guarantee no additional API calls are made other than ' 'than the ones needed to perform the actual copy, set this option to ' '``none``.' - ) + ), } -INDEX_DOCUMENT = {'name': 'index-document', - 'help_text': ( - 'A suffix that is appended to a request that is for ' - 'a directory on the website endpoint (e.g. if the ' - 'suffix is index.html and you make a request to ' - 'samplebucket/images/ the data that is returned ' - 'will be for the object with the key name ' - 'images/index.html) The suffix must not be empty and ' - 'must not include a slash character.')} +INDEX_DOCUMENT = { + 'name': 'index-document', + 'help_text': ( + 'A suffix that is appended to a request that is for ' + 'a directory on the website endpoint (e.g. if the ' + 'suffix is index.html and you make a request to ' + 'samplebucket/images/ the data that is returned ' + 'will be for the object with the key name ' + 'images/index.html) The suffix must not be empty and ' + 'must not include a slash character.' + ), +} -ERROR_DOCUMENT = {'name': 'error-document', - 'help_text': ( - 'The object key name to use when ' - 'a 4XX class error occurs.')} +ERROR_DOCUMENT = { + 'name': 'error-document', + 'help_text': ( + 'The object key name to use when ' 'a 4XX class error occurs.' + ), +} -ONLY_SHOW_ERRORS = {'name': 'only-show-errors', 'action': 'store_true', - 'help_text': ( - 'Only errors and warnings are displayed. All other ' - 'output is suppressed.')} +ONLY_SHOW_ERRORS = { + 'name': 'only-show-errors', + 'action': 'store_true', + 'help_text': ( + 'Only errors and warnings are displayed. All other ' + 'output is suppressed.' + ), +} -NO_PROGRESS = {'name': 'no-progress', - 'action': 'store_false', - 'dest': 'progress', - 'help_text': ( - 'File transfer progress is not displayed. This flag ' - 'is only applied when the quiet and only-show-errors ' - 'flags are not provided.')} +NO_PROGRESS = { + 'name': 'no-progress', + 'action': 'store_false', + 'dest': 'progress', + 'help_text': ( + 'File transfer progress is not displayed. This flag ' + 'is only applied when the quiet and only-show-errors ' + 'flags are not provided.' + ), +} -EXPECTED_SIZE = {'name': 'expected-size', - 'help_text': ( - 'This argument specifies the expected size of a stream ' - 'in terms of bytes. Note that this argument is needed ' - 'only when a stream is being uploaded to s3 and the size ' - 'is larger than 50GB. Failure to include this argument ' - 'under these conditions may result in a failed upload ' - 'due to too many parts in upload.')} +EXPECTED_SIZE = { + 'name': 'expected-size', + 'help_text': ( + 'This argument specifies the expected size of a stream ' + 'in terms of bytes. Note that this argument is needed ' + 'only when a stream is being uploaded to s3 and the size ' + 'is larger than 50GB. Failure to include this argument ' + 'under these conditions may result in a failed upload ' + 'due to too many parts in upload.' + ), +} -PAGE_SIZE = {'name': 'page-size', 'cli_type_name': 'integer', - 'help_text': ( - 'The number of results to return in each response to a list ' - 'operation. The default value is 1000 (the maximum allowed). ' - 'Using a lower value may help if an operation times out.')} +PAGE_SIZE = { + 'name': 'page-size', + 'cli_type_name': 'integer', + 'help_text': ( + 'The number of results to return in each response to a list ' + 'operation. The default value is 1000 (the maximum allowed). ' + 'Using a lower value may help if an operation times out.' + ), +} IGNORE_GLACIER_WARNINGS = { - 'name': 'ignore-glacier-warnings', 'action': 'store_true', + 'name': 'ignore-glacier-warnings', + 'action': 'store_true', 'help_text': ( 'Turns off glacier warnings. Warnings about an operation that cannot ' 'be performed because it involves copying, downloading, or moving ' 'a glacier object will no longer be printed to standard error and ' 'will no longer cause the return code of the command to be ``2``.' - ) + ), } FORCE_GLACIER_TRANSFER = { - 'name': 'force-glacier-transfer', 'action': 'store_true', + 'name': 'force-glacier-transfer', + 'action': 'store_true', 'help_text': ( 'Forces a transfer request on all Glacier objects in a sync or ' 'recursive copy.' - ) + ), } REQUEST_PAYER = { - 'name': 'request-payer', 'choices': ['requester'], - 'nargs': '?', 'const': 'requester', + 'name': 'request-payer', + 'choices': ['requester'], + 'nargs': '?', + 'const': 'requester', 'help_text': ( 'Confirms that the requester knows that they will be charged ' 'for the request. Bucket owners need not specify this parameter in ' @@ -457,11 +588,12 @@ 'pays buckets can be found at ' 'http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'ObjectsinRequesterPaysBuckets.html' - ) + ), } VALIDATE_SAME_S3_PATHS = { - 'name': 'validate-same-s3-paths', 'action': 'store_true', + 'name': 'validate-same-s3-paths', + 'action': 'store_true', 'help_text': ( 'Resolves the source and destination S3 URIs to their ' 'underlying buckets and verifies that the file or object ' @@ -478,29 +610,56 @@ 'NOTE: Path validation requires making additional API calls. ' 'Future updates to this path-validation mechanism might change ' 'which API calls are made.' - ) + ), } CHECKSUM_MODE = { - 'name': 'checksum-mode', 'choices': ['ENABLED'], - 'help_text': 'To retrieve the checksum, this mode must be enabled. If the object has a ' - 'checksum, it will be verified.' + 'name': 'checksum-mode', + 'choices': ['ENABLED'], + 'help_text': 'To retrieve the checksum, this mode must be enabled. If the object has a ' + 'checksum, it will be verified.', } CHECKSUM_ALGORITHM = { - 'name': 'checksum-algorithm', 'choices': ['CRC32', 'SHA256', 'SHA1', 'CRC32C'], - 'help_text': 'Indicates the algorithm used to create the checksum for the object.' + 'name': 'checksum-algorithm', + 'choices': ['CRC32', 'SHA256', 'SHA1', 'CRC32C'], + 'help_text': 'Indicates the algorithm used to create the checksum for the object.', } -TRANSFER_ARGS = [DRYRUN, QUIET, INCLUDE, EXCLUDE, ACL, - FOLLOW_SYMLINKS, NO_FOLLOW_SYMLINKS, NO_GUESS_MIME_TYPE, - SSE, SSE_C, SSE_C_KEY, SSE_KMS_KEY_ID, SSE_C_COPY_SOURCE, - SSE_C_COPY_SOURCE_KEY, STORAGE_CLASS, GRANTS, - WEBSITE_REDIRECT, CONTENT_TYPE, CACHE_CONTROL, - CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, - EXPIRES, SOURCE_REGION, ONLY_SHOW_ERRORS, NO_PROGRESS, - PAGE_SIZE, IGNORE_GLACIER_WARNINGS, FORCE_GLACIER_TRANSFER, - REQUEST_PAYER, CHECKSUM_MODE, CHECKSUM_ALGORITHM] +TRANSFER_ARGS = [ + DRYRUN, + QUIET, + INCLUDE, + EXCLUDE, + ACL, + FOLLOW_SYMLINKS, + NO_FOLLOW_SYMLINKS, + NO_GUESS_MIME_TYPE, + SSE, + SSE_C, + SSE_C_KEY, + SSE_KMS_KEY_ID, + SSE_C_COPY_SOURCE, + SSE_C_COPY_SOURCE_KEY, + STORAGE_CLASS, + GRANTS, + WEBSITE_REDIRECT, + CONTENT_TYPE, + CACHE_CONTROL, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + EXPIRES, + SOURCE_REGION, + ONLY_SHOW_ERRORS, + NO_PROGRESS, + PAGE_SIZE, + IGNORE_GLACIER_WARNINGS, + FORCE_GLACIER_TRANSFER, + REQUEST_PAYER, + CHECKSUM_MODE, + CHECKSUM_ALGORITHM, +] class S3Command(BasicCommand): @@ -515,13 +674,26 @@ def _run_main(self, parsed_args, parsed_globals): class ListCommand(S3Command): NAME = 'ls' - DESCRIPTION = ("List S3 objects and common prefixes under a prefix or " - "all S3 buckets. Note that the --output and --no-paginate " - "arguments are ignored for this command.") + DESCRIPTION = ( + "List S3 objects and common prefixes under a prefix or " + "all S3 buckets. Note that the --output and --no-paginate " + "arguments are ignored for this command." + ) USAGE = " or NONE" - ARG_TABLE = [{'name': 'paths', 'nargs': '?', 'default': 's3://', - 'positional_arg': True, 'synopsis': USAGE}, RECURSIVE, - PAGE_SIZE, HUMAN_READABLE, SUMMARIZE, REQUEST_PAYER] + ARG_TABLE = [ + { + 'name': 'paths', + 'nargs': '?', + 'default': 's3://', + 'positional_arg': True, + 'synopsis': USAGE, + }, + RECURSIVE, + PAGE_SIZE, + HUMAN_READABLE, + SUMMARIZE, + REQUEST_PAYER, + ] def _run_main(self, parsed_args, parsed_globals): super(ListCommand, self)._run_main(parsed_args, parsed_globals) @@ -539,10 +711,12 @@ def _run_main(self, parsed_args, parsed_globals): elif parsed_args.dir_op: # Then --recursive was specified. self._list_all_objects_recursive( - bucket, key, parsed_args.page_size, parsed_args.request_payer) + bucket, key, parsed_args.page_size, parsed_args.request_payer + ) else: self._list_all_objects( - bucket, key, parsed_args.page_size, parsed_args.request_payer) + bucket, key, parsed_args.page_size, parsed_args.request_payer + ) if parsed_args.summarize: self._print_summary() if key: @@ -559,12 +733,15 @@ def _run_main(self, parsed_args, parsed_globals): # thrown before reaching the automatic return of rc of zero. return 0 - def _list_all_objects(self, bucket, key, page_size=None, - request_payer=None): + def _list_all_objects( + self, bucket, key, page_size=None, request_payer=None + ): paginator = self.client.get_paginator('list_objects_v2') paging_args = { - 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', - 'PaginationConfig': {'PageSize': page_size} + 'Bucket': bucket, + 'Prefix': key, + 'Delimiter': '/', + 'PaginationConfig': {'PageSize': page_size}, } if request_payer is not None: paging_args['RequestPayer'] = request_payer @@ -594,16 +771,13 @@ def _display_page(self, response_data, use_basename=True): filename = filename_components[-1] else: filename = content['Key'] - print_str = last_mod_str + ' ' + size_str + ' ' + \ - filename + '\n' + print_str = last_mod_str + ' ' + size_str + ' ' + filename + '\n' uni_print(print_str) self._at_first_page = False def _list_all_buckets(self, page_size=None): paginator = self.client.get_paginator('list_buckets') - paging_args = { - 'PaginationConfig': {'PageSize': page_size} - } + paging_args = {'PaginationConfig': {'PageSize': page_size}} iterator = paginator.paginate(**paging_args) @@ -615,12 +789,14 @@ def _list_all_buckets(self, page_size=None): print_str = last_mod_str + ' ' + bucket['Name'] + '\n' uni_print(print_str) - def _list_all_objects_recursive(self, bucket, key, page_size=None, - request_payer=None): + def _list_all_objects_recursive( + self, bucket, key, page_size=None, request_payer=None + ): paginator = self.client.get_paginator('list_objects_v2') paging_args = { - 'Bucket': bucket, 'Prefix': key, - 'PaginationConfig': {'PageSize': page_size} + 'Bucket': bucket, + 'Prefix': key, + 'PaginationConfig': {'PageSize': page_size}, } if request_payer is not None: paging_args['RequestPayer'] = request_payer @@ -642,11 +818,14 @@ def _make_last_mod_str(self, last_mod): """ last_mod = parse(last_mod) last_mod = last_mod.astimezone(tzlocal()) - last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2), - str(last_mod.day).zfill(2), - str(last_mod.hour).zfill(2), - str(last_mod.minute).zfill(2), - str(last_mod.second).zfill(2)) + last_mod_tup = ( + str(last_mod.year), + str(last_mod.month).zfill(2), + str(last_mod.day).zfill(2), + str(last_mod.hour).zfill(2), + str(last_mod.minute).zfill(2), + str(last_mod.second).zfill(2), + ) last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup return last_mod_str.ljust(19, ' ') @@ -677,25 +856,36 @@ class WebsiteCommand(S3Command): NAME = 'website' DESCRIPTION = 'Set the website configuration for a bucket.' USAGE = '' - ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, - 'synopsis': USAGE}, INDEX_DOCUMENT, ERROR_DOCUMENT] + ARG_TABLE = [ + { + 'name': 'paths', + 'nargs': 1, + 'positional_arg': True, + 'synopsis': USAGE, + }, + INDEX_DOCUMENT, + ERROR_DOCUMENT, + ] def _run_main(self, parsed_args, parsed_globals): super(WebsiteCommand, self)._run_main(parsed_args, parsed_globals) bucket = self._get_bucket_name(parsed_args.paths[0]) website_configuration = self._build_website_configuration(parsed_args) self.client.put_bucket_website( - Bucket=bucket, WebsiteConfiguration=website_configuration) + Bucket=bucket, WebsiteConfiguration=website_configuration + ) return 0 def _build_website_configuration(self, parsed_args): website_config = {} if parsed_args.index_document is not None: - website_config['IndexDocument'] = \ - {'Suffix': parsed_args.index_document} + website_config['IndexDocument'] = { + 'Suffix': parsed_args.index_document + } if parsed_args.error_document is not None: - website_config['ErrorDocument'] = \ - {'Key': parsed_args.error_document} + website_config['ErrorDocument'] = { + 'Key': parsed_args.error_document + } return website_config def _get_bucket_name(self, path): @@ -722,13 +912,18 @@ class PresignCommand(S3Command): "so the region needs to be configured explicitly." ) USAGE = "" - ARG_TABLE = [{'name': 'path', - 'positional_arg': True, 'synopsis': USAGE}, - {'name': 'expires-in', 'default': 3600, - 'cli_type_name': 'integer', - 'help_text': ( - 'Number of seconds until the pre-signed ' - 'URL expires. Default is 3600 seconds. Maximum is 604800 seconds.')}] + ARG_TABLE = [ + {'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, + { + 'name': 'expires-in', + 'default': 3600, + 'cli_type_name': 'integer', + 'help_text': ( + 'Number of seconds until the pre-signed ' + 'URL expires. Default is 3600 seconds. Maximum is 604800 seconds.' + ), + }, + ] def _run_main(self, parsed_args, parsed_globals): super(PresignCommand, self)._run_main(parsed_args, parsed_globals) @@ -739,7 +934,7 @@ def _run_main(self, parsed_args, parsed_globals): url = self.client.generate_presigned_url( 'get_object', {'Bucket': bucket, 'Key': key}, - ExpiresIn=parsed_args.expires_in + ExpiresIn=parsed_args.expires_in, ) uni_print(url) uni_print('\n') @@ -755,12 +950,15 @@ def _run_main(self, parsed_args, parsed_globals): params=params ) transfer_manager = self._get_transfer_manager( - params=params, - botocore_transfer_client=transfer_client + params=params, botocore_transfer_client=transfer_client ) cmd = CommandArchitecture( - self._session, self.NAME, params, - transfer_manager, source_client, transfer_client + self._session, + self.NAME, + params, + transfer_manager, + source_client, + transfer_client, ) cmd.create_instructions() return cmd.run() @@ -778,8 +976,8 @@ def _convert_path_args(self, parsed_args): def _get_params(self, parsed_args, parsed_globals, session): cmd_params = CommandParameters( - self.NAME, vars(parsed_args), self.USAGE, - session, parsed_globals) + self.NAME, vars(parsed_args), self.USAGE, session, parsed_globals + ) cmd_params.add_region(parsed_globals) cmd_params.add_endpoint_url(parsed_globals) cmd_params.add_verify_ssl(parsed_globals) @@ -791,7 +989,8 @@ def _get_params(self, parsed_args, parsed_globals, session): def _get_source_and_transfer_clients(self, params): client_factory = ClientFactory(self._session) source_client = client_factory.create_client( - params, is_source_client=True) + params, is_source_client=True + ) transfer_client = client_factory.create_client(params) return source_client, transfer_client @@ -805,52 +1004,98 @@ def _get_transfer_manager(self, params, botocore_transfer_client): def _get_runtime_config(self): return transferconfig.RuntimeConfig().build_config( - **self._session.get_scoped_config().get('s3', {})) + **self._session.get_scoped_config().get('s3', {}) + ) class CpCommand(S3TransferCommand): NAME = 'cp' - DESCRIPTION = "Copies a local file or S3 object to another location " \ - "locally or in S3." - USAGE = " or " \ - "or " - ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, - 'synopsis': USAGE}] + TRANSFER_ARGS + \ - [METADATA, COPY_PROPS, METADATA_DIRECTIVE, EXPECTED_SIZE, - RECURSIVE] + DESCRIPTION = ( + "Copies a local file or S3 object to another location " + "locally or in S3." + ) + USAGE = " or " "or " + ARG_TABLE = ( + [ + { + 'name': 'paths', + 'nargs': 2, + 'positional_arg': True, + 'synopsis': USAGE, + } + ] + + TRANSFER_ARGS + + [METADATA, COPY_PROPS, METADATA_DIRECTIVE, EXPECTED_SIZE, RECURSIVE] + ) class MvCommand(S3TransferCommand): NAME = 'mv' DESCRIPTION = BasicCommand.FROM_FILE('s3', 'mv', '_description.rst') - USAGE = " or " \ - "or " - ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, - 'synopsis': USAGE}] + TRANSFER_ARGS +\ - [METADATA, COPY_PROPS, METADATA_DIRECTIVE, RECURSIVE, - VALIDATE_SAME_S3_PATHS] + USAGE = " or " "or " + ARG_TABLE = ( + [ + { + 'name': 'paths', + 'nargs': 2, + 'positional_arg': True, + 'synopsis': USAGE, + } + ] + + TRANSFER_ARGS + + [ + METADATA, + COPY_PROPS, + METADATA_DIRECTIVE, + RECURSIVE, + VALIDATE_SAME_S3_PATHS, + ] + ) class RmCommand(S3TransferCommand): NAME = 'rm' DESCRIPTION = "Deletes an S3 object." USAGE = "" - ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, - 'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, REQUEST_PAYER, - INCLUDE, EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE] + ARG_TABLE = [ + { + 'name': 'paths', + 'nargs': 1, + 'positional_arg': True, + 'synopsis': USAGE, + }, + DRYRUN, + QUIET, + RECURSIVE, + REQUEST_PAYER, + INCLUDE, + EXCLUDE, + ONLY_SHOW_ERRORS, + PAGE_SIZE, + ] class SyncCommand(S3TransferCommand): NAME = 'sync' - DESCRIPTION = "Syncs directories and S3 prefixes. Recursively copies " \ - "new and updated files from the source directory to " \ - "the destination. Only creates folders in the destination " \ - "if they contain one or more files." - USAGE = " or " \ - " or " - ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, - 'synopsis': USAGE}] + TRANSFER_ARGS + \ - [METADATA, COPY_PROPS, METADATA_DIRECTIVE] + DESCRIPTION = ( + "Syncs directories and S3 prefixes. Recursively copies " + "new and updated files from the source directory to " + "the destination. Only creates folders in the destination " + "if they contain one or more files." + ) + USAGE = " or " " or " + ARG_TABLE = ( + [ + { + 'name': 'paths', + 'nargs': 2, + 'positional_arg': True, + 'synopsis': USAGE, + } + ] + + TRANSFER_ARGS + + [METADATA, COPY_PROPS, METADATA_DIRECTIVE] + ) class MbCommand(S3Command): @@ -886,7 +1131,7 @@ def _run_main(self, parsed_args, parsed_globals): except Exception as e: uni_print( "make_bucket failed: %s %s\n" % (parsed_args.path, e), - sys.stderr + sys.stderr, ) return 1 @@ -901,8 +1146,10 @@ class RbCommand(S3Command): "deleted." ) USAGE = "" - ARG_TABLE = [{'name': 'path', 'positional_arg': True, - 'synopsis': USAGE}, FORCE] + ARG_TABLE = [ + {'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, + FORCE, + ] def _run_main(self, parsed_args, parsed_globals): super(RbCommand, self)._run_main(parsed_args, parsed_globals) @@ -929,7 +1176,7 @@ def _run_main(self, parsed_args, parsed_globals): except Exception as e: uni_print( "remove_bucket failed: %s %s\n" % (parsed_args.path, e), - sys.stderr + sys.stderr, ) return 1 @@ -940,7 +1187,8 @@ def _force(self, path, parsed_globals): if rc != 0: raise RuntimeError( "remove_bucket failed: Unable to delete all objects in the " - "bucket, bucket will not be deleted.") + "bucket, bucket will not be deleted." + ) class CommandArchitecture(object): @@ -953,8 +1201,16 @@ class CommandArchitecture(object): list of instructions to wire together an assortment of generators to perform the command. """ - def __init__(self, session, cmd, parameters, transfer_manager, - source_client, transfer_client): + + def __init__( + self, + session, + cmd, + parameters, + transfer_manager, + source_client, + transfer_client, + ): self.session = session self.cmd = cmd self.parameters = parameters @@ -992,14 +1248,16 @@ def choose_sync_strategies(self): """ sync_strategies = {} # Set the default strategies. - sync_strategies['file_at_src_and_dest_sync_strategy'] = \ + sync_strategies['file_at_src_and_dest_sync_strategy'] = ( SizeAndLastModifiedSync() + ) sync_strategies['file_not_at_dest_sync_strategy'] = MissingFileSync() sync_strategies['file_not_at_src_sync_strategy'] = NeverSync() # Determine what strategies to override if any. responses = self.session.emit( - 'choosing-s3-sync-strategy', params=self.parameters) + 'choosing-s3-sync-strategy', params=self.parameters + ) if responses is not None: for response in responses: override_sync_strategy = response[1] @@ -1042,85 +1300,106 @@ def run(self): 'locals3': 'upload', 's3s3': 'copy', 's3local': 'download', - 's3': 'delete' + 's3': 'delete', } result_queue = queue.Queue() operation_name = cmd_translation[paths_type] fgen_kwargs = { - 'client': self._source_client, 'operation_name': operation_name, + 'client': self._source_client, + 'operation_name': operation_name, 'follow_symlinks': self.parameters['follow_symlinks'], 'page_size': self.parameters['page_size'], 'result_queue': result_queue, } rgen_kwargs = { - 'client': self._client, 'operation_name': '', + 'client': self._client, + 'operation_name': '', 'follow_symlinks': self.parameters['follow_symlinks'], 'page_size': self.parameters['page_size'], 'result_queue': result_queue, } - fgen_request_parameters = \ + fgen_request_parameters = ( self._get_file_generator_request_parameters_skeleton() + ) self._map_request_payer_params(fgen_request_parameters) self._map_sse_c_params(fgen_request_parameters, paths_type) fgen_kwargs['request_parameters'] = fgen_request_parameters - rgen_request_parameters = \ + rgen_request_parameters = ( self._get_file_generator_request_parameters_skeleton() + ) self._map_request_payer_params(rgen_request_parameters) rgen_kwargs['request_parameters'] = rgen_request_parameters file_generator = FileGenerator(**fgen_kwargs) rev_generator = FileGenerator(**rgen_kwargs) stream_dest_path, stream_compare_key = find_dest_path_comp_key(files) - stream_file_info = [FileInfo(src=files['src']['path'], - dest=stream_dest_path, - compare_key=stream_compare_key, - src_type=files['src']['type'], - dest_type=files['dest']['type'], - operation_name=operation_name, - client=self._client, - is_stream=True)] + stream_file_info = [ + FileInfo( + src=files['src']['path'], + dest=stream_dest_path, + compare_key=stream_compare_key, + src_type=files['src']['type'], + dest_type=files['dest']['type'], + operation_name=operation_name, + client=self._client, + is_stream=True, + ) + ] file_info_builder = FileInfoBuilder( - self._client, self._source_client, self.parameters) + self._client, self._source_client, self.parameters + ) s3_transfer_handler = S3TransferHandlerFactory(self.parameters)( - self._transfer_manager, result_queue) + self._transfer_manager, result_queue + ) sync_strategies = self.choose_sync_strategies() command_dict = {} if self.cmd == 'sync': - command_dict = {'setup': [files, rev_files], - 'file_generator': [file_generator, - rev_generator], - 'filters': [create_filter(self.parameters), - create_filter(self.parameters)], - 'comparator': [Comparator(**sync_strategies)], - 'file_info_builder': [file_info_builder], - 's3_handler': [s3_transfer_handler]} + command_dict = { + 'setup': [files, rev_files], + 'file_generator': [file_generator, rev_generator], + 'filters': [ + create_filter(self.parameters), + create_filter(self.parameters), + ], + 'comparator': [Comparator(**sync_strategies)], + 'file_info_builder': [file_info_builder], + 's3_handler': [s3_transfer_handler], + } elif self.cmd == 'cp' and self.parameters['is_stream']: - command_dict = {'setup': [stream_file_info], - 's3_handler': [s3_transfer_handler]} + command_dict = { + 'setup': [stream_file_info], + 's3_handler': [s3_transfer_handler], + } elif self.cmd == 'cp': - command_dict = {'setup': [files], - 'file_generator': [file_generator], - 'filters': [create_filter(self.parameters)], - 'file_info_builder': [file_info_builder], - 's3_handler': [s3_transfer_handler]} + command_dict = { + 'setup': [files], + 'file_generator': [file_generator], + 'filters': [create_filter(self.parameters)], + 'file_info_builder': [file_info_builder], + 's3_handler': [s3_transfer_handler], + } elif self.cmd == 'rm': - command_dict = {'setup': [files], - 'file_generator': [file_generator], - 'filters': [create_filter(self.parameters)], - 'file_info_builder': [file_info_builder], - 's3_handler': [s3_transfer_handler]} + command_dict = { + 'setup': [files], + 'file_generator': [file_generator], + 'filters': [create_filter(self.parameters)], + 'file_info_builder': [file_info_builder], + 's3_handler': [s3_transfer_handler], + } elif self.cmd == 'mv': - command_dict = {'setup': [files], - 'file_generator': [file_generator], - 'filters': [create_filter(self.parameters)], - 'file_info_builder': [file_info_builder], - 's3_handler': [s3_transfer_handler]} + command_dict = { + 'setup': [files], + 'file_generator': [file_generator], + 'filters': [create_filter(self.parameters)], + 'file_info_builder': [file_info_builder], + 's3_handler': [s3_transfer_handler], + } files = command_dict['setup'] while self.instructions: @@ -1151,22 +1430,16 @@ def run(self): return rc def _get_file_generator_request_parameters_skeleton(self): - return { - 'HeadObject': {}, - 'ListObjects': {}, - 'ListObjectsV2': {} - } + return {'HeadObject': {}, 'ListObjects': {}, 'ListObjectsV2': {}} def _map_request_payer_params(self, request_parameters): RequestParamsMapper.map_head_object_params( - request_parameters['HeadObject'], { - 'request_payer': self.parameters.get('request_payer') - } + request_parameters['HeadObject'], + {'request_payer': self.parameters.get('request_payer')}, ) RequestParamsMapper.map_list_objects_v2_params( - request_parameters['ListObjectsV2'], { - 'request_payer': self.parameters.get('request_payer') - } + request_parameters['ListObjectsV2'], + {'request_payer': self.parameters.get('request_payer')}, ) def _map_sse_c_params(self, request_parameters, paths_type): @@ -1177,13 +1450,15 @@ def _map_sse_c_params(self, request_parameters, paths_type): # not need any of these because it is used only for sync operations # which only use ListObjects which does not require HeadObject. RequestParamsMapper.map_head_object_params( - request_parameters['HeadObject'], self.parameters) + request_parameters['HeadObject'], self.parameters + ) if paths_type == 's3s3': RequestParamsMapper.map_head_object_params( - request_parameters['HeadObject'], { + request_parameters['HeadObject'], + { 'sse_c': self.parameters.get('sse_c_copy_source'), - 'sse_c_key': self.parameters.get('sse_c_copy_source_key') - } + 'sse_c_key': self.parameters.get('sse_c_copy_source_key'), + }, ) @@ -1202,8 +1477,10 @@ class CommandParameters(object): This class is used to do some initial error based on the parameters and arguments passed to the command line. """ - def __init__(self, cmd, parameters, usage, - session=None, parsed_globals=None): + + def __init__( + self, cmd, parameters, usage, session=None, parsed_globals=None + ): """ Stores command name and parameters. Ensures that the ``dir_op`` flag is true if a certain command is being used. @@ -1252,9 +1529,10 @@ def add_paths(self, paths): self._validate_not_s3_express_bucket_for_sync() def _validate_not_s3_express_bucket_for_sync(self): - if self.cmd == 'sync' and \ - (self._is_s3express_path(self.parameters['src']) or - self._is_s3express_path(self.parameters['dest'])): + if self.cmd == 'sync' and ( + self._is_s3express_path(self.parameters['src']) + or self._is_s3express_path(self.parameters['dest']) + ): raise ParamValidationError( "Cannot use sync command with a directory bucket." ) @@ -1280,7 +1558,7 @@ def _validate_streaming_paths(self): def _validate_path_args(self): # If we're using a mv command, you can't copy the object onto itself. params = self.parameters - if self.cmd == 'mv' and params['paths_type']=='s3s3': + if self.cmd == 'mv' and params['paths_type'] == 's3s3': self._raise_if_mv_same_paths(params['src'], params['dest']) if self._should_validate_same_underlying_s3_paths(): self._validate_same_underlying_s3_paths() @@ -1291,20 +1569,20 @@ def _validate_path_args(self): self._raise_if_paths_type_incorrect_for_param( CHECKSUM_ALGORITHM['name'], params['paths_type'], - ['locals3', 's3s3']) + ['locals3', 's3s3'], + ) if params.get('checksum_mode'): self._raise_if_paths_type_incorrect_for_param( - CHECKSUM_MODE['name'], - params['paths_type'], - ['s3local']) + CHECKSUM_MODE['name'], params['paths_type'], ['s3local'] + ) # If the user provided local path does not exist, hard fail because # we know that we will not be able to upload the file. if 'locals3' == params['paths_type'] and not params['is_stream']: if not os.path.exists(params['src']): raise RuntimeError( - 'The user-provided path %s does not exist.' % - params['src']) + 'The user-provided path %s does not exist.' % params['src'] + ) # If the operation is downloading to a directory that does not exist, # create the directories so no warnings are thrown during the syncing # process. @@ -1328,19 +1606,27 @@ def _same_key(self, src, dest): def _validate_same_s3_paths_enabled(self): validate_env_var = ensure_boolean( - os.environ.get('AWS_CLI_S3_MV_VALIDATE_SAME_S3_PATHS')) - return (self.parameters.get('validate_same_s3_paths') or - validate_env_var) + os.environ.get('AWS_CLI_S3_MV_VALIDATE_SAME_S3_PATHS') + ) + return ( + self.parameters.get('validate_same_s3_paths') or validate_env_var + ) def _should_emit_validate_s3_paths_warning(self): is_same_key = self._same_key( - self.parameters['src'], self.parameters['dest']) + self.parameters['src'], self.parameters['dest'] + ) src_has_underlying_path = S3PathResolver.has_underlying_s3_path( - self.parameters['src']) + self.parameters['src'] + ) dest_has_underlying_path = S3PathResolver.has_underlying_s3_path( - self.parameters['dest']) - return (is_same_key and not self._validate_same_s3_paths_enabled() and - (src_has_underlying_path or dest_has_underlying_path)) + self.parameters['dest'] + ) + return ( + is_same_key + and not self._validate_same_s3_paths_enabled() + and (src_has_underlying_path or dest_has_underlying_path) + ) def _emit_validate_s3_paths_warning(self): msg = ( @@ -1356,19 +1642,20 @@ def _emit_validate_s3_paths_warning(self): def _should_validate_same_underlying_s3_paths(self): is_same_key = self._same_key( - self.parameters['src'], self.parameters['dest']) + self.parameters['src'], self.parameters['dest'] + ) return is_same_key and self._validate_same_s3_paths_enabled() def _validate_same_underlying_s3_paths(self): src_paths = S3PathResolver.from_session( self._session, self.parameters.get('source_region', self._parsed_globals.region), - self._parsed_globals.verify_ssl + self._parsed_globals.verify_ssl, ).resolve_underlying_s3_paths(self.parameters['src']) dest_paths = S3PathResolver.from_session( self._session, self._parsed_globals.region, - self._parsed_globals.verify_ssl + self._parsed_globals.verify_ssl, ).resolve_underlying_s3_paths(self.parameters['dest']) for src_path in src_paths: for dest_path in dest_paths: @@ -1381,13 +1668,15 @@ def _raise_if_mv_same_paths(self, src, dest): f"{self.parameters['src']} - {self.parameters['dest']}" ) - def _raise_if_paths_type_incorrect_for_param(self, param, paths_type, allowed_paths): + def _raise_if_paths_type_incorrect_for_param( + self, param, paths_type, allowed_paths + ): if paths_type not in allowed_paths: expected_usage_map = { 'locals3': ' ', 's3s3': ' ', 's3local': ' ', - 's3': '' + 's3': '', } raise ParamValidationError( f"Expected {param} parameter to be used with one of following path formats: " @@ -1410,14 +1699,16 @@ def check_path_type(self, paths): This initial check ensures that the path types for the specified command is correct. """ - template_type = {'s3s3': ['cp', 'sync', 'mv'], - 's3local': ['cp', 'sync', 'mv'], - 'locals3': ['cp', 'sync', 'mv'], - 's3': ['mb', 'rb', 'rm'], - 'local': [], 'locallocal': []} + template_type = { + 's3s3': ['cp', 'sync', 'mv'], + 's3local': ['cp', 'sync', 'mv'], + 'locals3': ['cp', 'sync', 'mv'], + 's3': ['mb', 'rb', 'rm'], + 'local': [], + 'locallocal': [], + } paths_type = '' - usage = "usage: aws s3 %s %s" % (self.cmd, - self.usage) + usage = "usage: aws s3 %s %s" % (self.cmd, self.usage) for i in range(len(paths)): if paths[i].startswith('s3://'): paths_type = paths_type + 's3' @@ -1438,8 +1729,9 @@ def add_endpoint_url(self, parsed_globals): Adds endpoint_url to the parameters. """ if 'endpoint_url' in parsed_globals: - self.parameters['endpoint_url'] = getattr(parsed_globals, - 'endpoint_url') + self.parameters['endpoint_url'] = getattr( + parsed_globals, 'endpoint_url' + ) else: self.parameters['endpoint_url'] = None @@ -1448,7 +1740,8 @@ def add_verify_ssl(self, parsed_globals): def add_sign_request(self, parsed_globals): self.parameters['sign_request'] = getattr( - parsed_globals, 'sign_request', True) + parsed_globals, 'sign_request', True + ) def add_page_size(self, parsed_args): self.parameters['page_size'] = getattr(parsed_args, 'page_size', None) diff --git a/awscli/customizations/s3/subscribers.py b/awscli/customizations/s3/subscribers.py index 34bb2815d97d..73bd4c61e47b 100644 --- a/awscli/customizations/s3/subscribers.py +++ b/awscli/customizations/s3/subscribers.py @@ -15,12 +15,10 @@ import os import time +from awscli.customizations.s3 import utils from botocore.utils import percent_encode_sequence from s3transfer.subscribers import BaseSubscriber -from awscli.customizations.s3 import utils - - LOGGER = logging.getLogger(__name__) @@ -37,10 +35,10 @@ class OnDoneFilteredSubscriber(BaseSubscriber): It is really a convenience class so developers do not have to have to constantly remember to have a general try/except around future.result() """ + def on_done(self, future, **kwargs): future_exception = None try: - future.result() except Exception as e: future_exception = e @@ -62,6 +60,7 @@ class ProvideSizeSubscriber(BaseSubscriber): """ A subscriber which provides the transfer size before it's queued. """ + def __init__(self, size): self.size = size @@ -73,12 +72,14 @@ def on_queued(self, future, **kwargs): else: LOGGER.debug( 'Not providing transfer size. Future: %s does not offer' - 'the capability to notify the size of a transfer', future + 'the capability to notify the size of a transfer', + future, ) class DeleteSourceSubscriber(OnDoneFilteredSubscriber): """A subscriber which deletes the source of the transfer.""" + def _on_success(self, future): try: self._delete_source(future) @@ -91,6 +92,7 @@ def _delete_source(self, future): class DeleteSourceObjectSubscriber(DeleteSourceSubscriber): """A subscriber which deletes an object.""" + def __init__(self, client): self._client = client @@ -104,16 +106,18 @@ def _delete_source(self, future): call_args = future.meta.call_args delete_object_kwargs = { 'Bucket': self._get_bucket(call_args), - 'Key': self._get_key(call_args) + 'Key': self._get_key(call_args), } if call_args.extra_args.get('RequestPayer'): delete_object_kwargs['RequestPayer'] = call_args.extra_args[ - 'RequestPayer'] + 'RequestPayer' + ] self._client.delete_object(**delete_object_kwargs) class DeleteCopySourceObjectSubscriber(DeleteSourceObjectSubscriber): """A subscriber which deletes the copy source.""" + def _get_bucket(self, call_args): return call_args.copy_source['Bucket'] @@ -123,6 +127,7 @@ def _get_key(self, call_args): class DeleteSourceFileSubscriber(DeleteSourceSubscriber): """A subscriber which deletes a file.""" + def _delete_source(self, future): os.remove(future.meta.call_args.fileobj) @@ -146,6 +151,7 @@ def _get_filename(self, future): class ProvideLastModifiedTimeSubscriber(OnDoneFilteredSubscriber): """Sets utime for a downloaded file""" + def __init__(self, last_modified_time, result_queue): self._last_modified_time = last_modified_time self._result_queue = result_queue @@ -159,13 +165,16 @@ def _on_success(self, future, **kwargs): except Exception as e: warning_message = ( 'Successfully Downloaded %s but was unable to update the ' - 'last modified time. %s' % (filename, e)) + 'last modified time. %s' % (filename, e) + ) self._result_queue.put( - utils.create_warning(filename, warning_message)) + utils.create_warning(filename, warning_message) + ) class DirectoryCreatorSubscriber(BaseSubscriber): """Creates a directory to download if it does not exist""" + def on_queued(self, future, **kwargs): d = os.path.dirname(future.meta.call_args.fileobj) try: @@ -174,7 +183,8 @@ def on_queued(self, future, **kwargs): except OSError as e: if not e.errno == errno.EEXIST: raise CreateDirectoryError( - "Could not create directory %s: %s" % (d, e)) + "Could not create directory %s: %s" % (d, e) + ) class CopyPropsSubscriberFactory(object): @@ -184,8 +194,9 @@ def __init__(self, client, transfer_config, cli_params): self._cli_params = cli_params def get_subscribers(self, fileinfo): - copy_props = self._cli_params.get( - 'copy_props', 'default').replace('-', '_') + copy_props = self._cli_params.get('copy_props', 'default').replace( + '-', '_' + ) return getattr(self, '_get_%s_subscribers' % copy_props)(fileinfo) def _get_none_subscribers(self, fileinfo): @@ -197,16 +208,18 @@ def _get_none_subscribers(self, fileinfo): def _get_metadata_directive_subscribers(self, fileinfo): return [ self._create_metadata_directive_props_subscriber(fileinfo), - ReplaceTaggingDirectiveSubscriber() + ReplaceTaggingDirectiveSubscriber(), ] def _get_default_subscribers(self, fileinfo): return [ self._create_metadata_directive_props_subscriber(fileinfo), SetTagsSubscriber( - self._client, self._transfer_config, self._cli_params, + self._client, + self._transfer_config, + self._cli_params, source_client=fileinfo.source_client, - ) + ), ] def _create_metadata_directive_props_subscriber(self, fileinfo): @@ -216,8 +229,9 @@ def _create_metadata_directive_props_subscriber(self, fileinfo): 'cli_params': self._cli_params, } if not self._cli_params.get('dir_op'): - subscriber_kwargs[ - 'head_object_response'] = fileinfo.associated_response_data + subscriber_kwargs['head_object_response'] = ( + fileinfo.associated_response_data + ) return SetMetadataDirectivePropsSubscriber(**subscriber_kwargs) @@ -247,8 +261,9 @@ class SetMetadataDirectivePropsSubscriber(BaseSubscriber): 'Metadata', ] - def __init__(self, client, transfer_config, cli_params, - head_object_response=None): + def __init__( + self, client, transfer_config, cli_params, head_object_response=None + ): self._client = client self._transfer_config = transfer_config self._cli_params = cli_params @@ -280,7 +295,8 @@ def _get_head_object_response(self, future): 'Key': copy_source['Key'], } utils.RequestParamsMapper.map_head_object_params( - head_object_params, self._cli_params) + head_object_params, self._cli_params + ) return self._client.head_object(**head_object_params) def _inject_metadata_props(self, future, head_object_response): @@ -333,19 +349,14 @@ def _put_object_tagging(self, bucket, key, tag_set): extra_args, self._cli_params ) self._client.put_object_tagging( - Bucket=bucket, - Key=key, - Tagging={'TagSet': tag_set}, - **extra_args + Bucket=bucket, Key=key, Tagging={'TagSet': tag_set}, **extra_args ) def _delete_object(self, bucket, key): - params = { - 'Bucket': bucket, - 'Key': key - } + params = {'Bucket': bucket, 'Key': key} utils.RequestParamsMapper.map_delete_object_params( - params, self._cli_params) + params, self._cli_params + ) self._client.delete_object(**params) def _get_bucket_key_from_copy_source(self, future): @@ -358,16 +369,20 @@ def _get_tags(self, bucket, key): extra_args, self._cli_params ) get_tags_response = self._source_client.get_object_tagging( - Bucket=bucket, Key=key, **extra_args) + Bucket=bucket, Key=key, **extra_args + ) return get_tags_response['TagSet'] def _fits_in_tagging_header(self, tagging_header): - return len( - tagging_header.encode('utf-8')) <= self._MAX_TAGGING_HEADER_SIZE + return ( + len(tagging_header.encode('utf-8')) + <= self._MAX_TAGGING_HEADER_SIZE + ) def _serialize_to_header_value(self, tags): return percent_encode_sequence( - [(tag['Key'], tag['Value']) for tag in tags]) + [(tag['Key'], tag['Value']) for tag in tags] + ) def _is_multipart_copy(self, future): return future.meta.size >= self._transfer_config.multipart_threshold diff --git a/awscli/customizations/s3/syncstrategy/base.py b/awscli/customizations/s3/syncstrategy/base.py index 6f134082358c..e903516cf24e 100644 --- a/awscli/customizations/s3/syncstrategy/base.py +++ b/awscli/customizations/s3/syncstrategy/base.py @@ -14,11 +14,13 @@ from awscli.customizations.exceptions import ParamValidationError - LOG = logging.getLogger(__name__) -VALID_SYNC_TYPES = ['file_at_src_and_dest', 'file_not_at_dest', - 'file_not_at_src'] +VALID_SYNC_TYPES = [ + 'file_at_src_and_dest', + 'file_not_at_dest', + 'file_not_at_src', +] class BaseSync(object): @@ -69,8 +71,7 @@ def _check_sync_type(self, sync_type): if sync_type not in VALID_SYNC_TYPES: raise ParamValidationError( "Unknown sync_type: %s.\n" - "Valid options are %s." % - (sync_type, VALID_SYNC_TYPES) + "Valid options are %s." % (sync_type, VALID_SYNC_TYPES) ) @property @@ -80,8 +81,7 @@ def sync_type(self): def register_strategy(self, session): """Registers the sync strategy class to the given session.""" - session.register('building-arg-table.s3_sync', - self.add_sync_argument) + session.register('building-arg-table.s3_sync', self.add_sync_argument) session.register('choosing-s3-sync-strategy', self.use_sync_strategy) def determine_should_sync(self, src_file, dest_file): @@ -118,7 +118,7 @@ def determine_should_sync(self, src_file, dest_file): 'file_not_at_dest': refers to ``src_file`` 'file_not_at_src': refers to ``dest_file`` - """ + """ raise NotImplementedError("determine_should_sync") @@ -187,8 +187,9 @@ def total_seconds(self, td): :param td: The difference between two datetime objects. """ - return (td.microseconds + (td.seconds + td.days * 24 * - 3600) * 10**6) / 10**6 + return ( + td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6 + ) / 10**6 def compare_size(self, src_file, dest_file): """ @@ -218,7 +219,6 @@ def compare_time(self, src_file, dest_file): # at the source location. return False elif cmd == "download": - if self.total_seconds(delta) <= 0: return True else: @@ -228,7 +228,6 @@ def compare_time(self, src_file, dest_file): class SizeAndLastModifiedSync(BaseSync): - def determine_should_sync(self, src_file, dest_file): same_size = self.compare_size(src_file, dest_file) same_last_modified_time = self.compare_time(src_file, dest_file) @@ -236,9 +235,13 @@ def determine_should_sync(self, src_file, dest_file): if should_sync: LOG.debug( "syncing: %s -> %s, size: %s -> %s, modified time: %s -> %s", - src_file.src, src_file.dest, - src_file.size, dest_file.size, - src_file.last_update, dest_file.last_update) + src_file.src, + src_file.dest, + src_file.size, + dest_file.size, + src_file.last_update, + dest_file.last_update, + ) return should_sync @@ -255,6 +258,9 @@ def __init__(self, sync_type='file_not_at_dest'): super(MissingFileSync, self).__init__(sync_type) def determine_should_sync(self, src_file, dest_file): - LOG.debug("syncing: %s -> %s, file does not exist at destination", - src_file.src, src_file.dest) + LOG.debug( + "syncing: %s -> %s, file does not exist at destination", + src_file.src, + src_file.dest, + ) return True diff --git a/awscli/customizations/s3/syncstrategy/delete.py b/awscli/customizations/s3/syncstrategy/delete.py index 9858b264e44c..e5512b17c116 100644 --- a/awscli/customizations/s3/syncstrategy/delete.py +++ b/awscli/customizations/s3/syncstrategy/delete.py @@ -14,24 +14,29 @@ from awscli.customizations.s3.syncstrategy.base import BaseSync - LOG = logging.getLogger(__name__) -DELETE = {'name': 'delete', 'action': 'store_true', - 'help_text': ( - "Files that exist in the destination but not in the source are " - "deleted during sync. Note that files excluded by filters are " - "excluded from deletion.")} +DELETE = { + 'name': 'delete', + 'action': 'store_true', + 'help_text': ( + "Files that exist in the destination but not in the source are " + "deleted during sync. Note that files excluded by filters are " + "excluded from deletion." + ), +} class DeleteSync(BaseSync): - ARGUMENT = DELETE def determine_should_sync(self, src_file, dest_file): dest_file.operation_name = 'delete' - LOG.debug("syncing: (None) -> %s (remove), file does not " - "exist at source (%s) and delete mode enabled", - dest_file.src, dest_file.dest) + LOG.debug( + "syncing: (None) -> %s (remove), file does not " + "exist at source (%s) and delete mode enabled", + dest_file.src, + dest_file.dest, + ) return True diff --git a/awscli/customizations/s3/syncstrategy/exacttimestamps.py b/awscli/customizations/s3/syncstrategy/exacttimestamps.py index 564e6eeb4bf4..7921e9ad94a7 100644 --- a/awscli/customizations/s3/syncstrategy/exacttimestamps.py +++ b/awscli/customizations/s3/syncstrategy/exacttimestamps.py @@ -14,21 +14,23 @@ from awscli.customizations.s3.syncstrategy.base import SizeAndLastModifiedSync - LOG = logging.getLogger(__name__) -EXACT_TIMESTAMPS = {'name': 'exact-timestamps', 'action': 'store_true', - 'help_text': ( - 'When syncing from S3 to local, same-sized ' - 'items will be ignored only when the timestamps ' - 'match exactly. The default behavior is to ignore ' - 'same-sized items unless the local version is newer ' - 'than the S3 version.')} +EXACT_TIMESTAMPS = { + 'name': 'exact-timestamps', + 'action': 'store_true', + 'help_text': ( + 'When syncing from S3 to local, same-sized ' + 'items will be ignored only when the timestamps ' + 'match exactly. The default behavior is to ignore ' + 'same-sized items unless the local version is newer ' + 'than the S3 version.' + ), +} class ExactTimestampsSync(SizeAndLastModifiedSync): - ARGUMENT = EXACT_TIMESTAMPS def compare_time(self, src_file, dest_file): @@ -39,5 +41,6 @@ def compare_time(self, src_file, dest_file): if cmd == 'download': return self.total_seconds(delta) == 0 else: - return super(ExactTimestampsSync, self).compare_time(src_file, - dest_file) + return super(ExactTimestampsSync, self).compare_time( + src_file, dest_file + ) diff --git a/awscli/customizations/s3/syncstrategy/register.py b/awscli/customizations/s3/syncstrategy/register.py index b75674dcb99b..13f2c35c0620 100644 --- a/awscli/customizations/s3/syncstrategy/register.py +++ b/awscli/customizations/s3/syncstrategy/register.py @@ -10,14 +10,16 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.s3.syncstrategy.sizeonly import SizeOnlySync -from awscli.customizations.s3.syncstrategy.exacttimestamps import \ - ExactTimestampsSync from awscli.customizations.s3.syncstrategy.delete import DeleteSync +from awscli.customizations.s3.syncstrategy.exacttimestamps import ( + ExactTimestampsSync, +) +from awscli.customizations.s3.syncstrategy.sizeonly import SizeOnlySync -def register_sync_strategy(session, strategy_cls, - sync_type='file_at_src_and_dest'): +def register_sync_strategy( + session, strategy_cls, sync_type='file_at_src_and_dest' +): """Registers a single sync strategy :param session: The session that the sync strategy is being registered to. diff --git a/awscli/customizations/s3/syncstrategy/sizeonly.py b/awscli/customizations/s3/syncstrategy/sizeonly.py index e83d0fd7be5d..109e21fa4e7c 100644 --- a/awscli/customizations/s3/syncstrategy/sizeonly.py +++ b/awscli/customizations/s3/syncstrategy/sizeonly.py @@ -14,24 +14,30 @@ from awscli.customizations.s3.syncstrategy.base import BaseSync - LOG = logging.getLogger(__name__) -SIZE_ONLY = {'name': 'size-only', 'action': 'store_true', - 'help_text': ( - 'Makes the size of each key the only criteria used to ' - 'decide whether to sync from source to destination.')} +SIZE_ONLY = { + 'name': 'size-only', + 'action': 'store_true', + 'help_text': ( + 'Makes the size of each key the only criteria used to ' + 'decide whether to sync from source to destination.' + ), +} class SizeOnlySync(BaseSync): - ARGUMENT = SIZE_ONLY def determine_should_sync(self, src_file, dest_file): same_size = self.compare_size(src_file, dest_file) should_sync = not same_size if should_sync: - LOG.debug("syncing: %s -> %s, size_changed: %s", - src_file.src, src_file.dest, not same_size) + LOG.debug( + "syncing: %s -> %s, size_changed: %s", + src_file.src, + src_file.dest, + not same_size, + ) return should_sync diff --git a/awscli/customizations/s3/transferconfig.py b/awscli/customizations/s3/transferconfig.py index 5764c6f9772d..95057ea6f1e8 100644 --- a/awscli/customizations/s3/transferconfig.py +++ b/awscli/customizations/s3/transferconfig.py @@ -10,21 +10,20 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from s3transfer.manager import TransferConfig - -from awscli.customizations.s3 import constants -from awscli.customizations.s3.utils import human_readable_to_int # If the user does not specify any overrides, # these are the default values we use for the s3 transfer # commands. import logging +from awscli.customizations.s3 import constants +from awscli.customizations.s3.utils import human_readable_to_int +from s3transfer.manager import TransferConfig LOGGER = logging.getLogger(__name__) DEFAULTS = { - 'multipart_threshold': 8 * (1024 ** 2), - 'multipart_chunksize': 8 * (1024 ** 2), + 'multipart_threshold': 8 * (1024**2), + 'multipart_chunksize': 8 * (1024**2), 'max_concurrent_requests': 10, 'max_queue_size': 1000, 'max_bandwidth': None, @@ -38,10 +37,14 @@ class InvalidConfigError(Exception): class RuntimeConfig(object): - - POSITIVE_INTEGERS = ['multipart_chunksize', 'multipart_threshold', - 'max_concurrent_requests', 'max_queue_size', - 'max_bandwidth', 'target_bandwidth'] + POSITIVE_INTEGERS = [ + 'multipart_chunksize', + 'multipart_threshold', + 'max_concurrent_requests', + 'max_queue_size', + 'max_bandwidth', + 'target_bandwidth', + ] HUMAN_READABLE_SIZES = ['multipart_chunksize', 'multipart_threshold'] HUMAN_READABLE_RATES = ['max_bandwidth', 'target_bandwidth'] SUPPORTED_CHOICES = { @@ -107,7 +110,8 @@ def _convert_human_readable_rates(self, runtime_config): 'as an integer in terms of bytes per second ' '(e.g. 10485760) or a rate in terms of bytes ' 'per second (e.g. 10MB/s or 800KB/s) or bits per ' - 'second (e.g. 10Mb/s or 800Kb/s)' % value) + 'second (e.g. 10Mb/s or 800Kb/s)' % value + ) def _human_readable_rate_to_int(self, value): # The human_readable_to_int() utility only supports integers (e.g. 1024) @@ -145,7 +149,9 @@ def _resolve_choice_aliases(self, runtime_config): resolved_value = self.CHOICE_ALIASES[attr][current_value] LOGGER.debug( 'Resolved %s configuration alias value "%s" to "%s"', - attr, current_value, resolved_value + attr, + current_value, + resolved_value, ) runtime_config[attr] = resolved_value @@ -173,7 +179,8 @@ def _validate_choices(self, runtime_config): def _error_positive_value(self, name, value): raise InvalidConfigError( - "Value for %s must be a positive integer: %s" % (name, value)) + "Value for %s must be a positive integer: %s" % (name, value) + ) def _error_invalid_choice(self, name, value): raise InvalidConfigError( diff --git a/awscli/customizations/s3/utils.py b/awscli/customizations/s3/utils.py index d8dedb710f98..a5d954d3ec7f 100644 --- a/awscli/customizations/s3/utils.py +++ b/awscli/customizations/s3/utils.py @@ -11,19 +11,18 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import argparse +import errno import logging -from datetime import datetime import mimetypes -import errno import os import re -from collections import namedtuple, deque +from collections import deque, namedtuple +from datetime import datetime from dateutil.parser import parse from dateutil.tz import tzlocal, tzutc -from awscli.compat import bytes_print -from awscli.compat import queue +from awscli.compat import bytes_print, queue from awscli.customizations.exceptions import ParamValidationError LOGGER = logging.getLogger(__name__) @@ -31,16 +30,16 @@ EPOCH_TIME = datetime(1970, 1, 1, tzinfo=tzutc()) # Maximum object size allowed in S3. # See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html -MAX_UPLOAD_SIZE = 5 * (1024 ** 4) +MAX_UPLOAD_SIZE = 5 * (1024**4) SIZE_SUFFIX = { 'kb': 1024, - 'mb': 1024 ** 2, - 'gb': 1024 ** 3, - 'tb': 1024 ** 4, + 'mb': 1024**2, + 'gb': 1024**3, + 'tb': 1024**4, 'kib': 1024, - 'mib': 1024 ** 2, - 'gib': 1024 ** 3, - 'tib': 1024 ** 4, + 'mib': 1024**2, + 'gib': 1024**3, + 'tib': 1024**4, } _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile( r'^(?Parn:(aws).*:s3:[a-z\-0-9]*:[0-9]{12}:accesspoint[:/][^/]+)/?' @@ -90,7 +89,7 @@ def human_readable_size(value): return '%d Bytes' % bytes_int for i, suffix in enumerate(HUMANIZE_SUFFIXES): - unit = base ** (i+2) + unit = base ** (i + 2) if round((bytes_int / unit) * base) < base: return '%.1f %s' % ((base * bytes_int / unit), suffix) @@ -110,8 +109,7 @@ def human_readable_to_int(value): suffix = value[-3:].lower() else: suffix = value[-2:].lower() - has_size_identifier = ( - len(value) >= 2 and suffix in SIZE_SUFFIX) + has_size_identifier = len(value) >= 2 and suffix in SIZE_SUFFIX if not has_size_identifier: try: return int(value) @@ -119,7 +117,7 @@ def human_readable_to_int(value): raise ValueError("Invalid size value: %s" % value) else: multiplier = SIZE_SUFFIX[suffix] - return int(value[:-len(suffix)]) * multiplier + return int(value[: -len(suffix)]) * multiplier class AppendFilter(argparse.Action): @@ -136,6 +134,7 @@ class AppendFilter(argparse.Action): appear later in the command line take preference over rulers that appear earlier. """ + def __call__(self, parser, namespace, values, option_string=None): filter_list = getattr(namespace, self.dest) if filter_list: @@ -170,6 +169,7 @@ class outside of that context. In order for this to be the case, (least important) priority available. """ + def __init__(self, maxsize=0, max_priority=20): queue.Queue.__init__(self, maxsize=maxsize) self.priorities = [deque([]) for i in range(max_priority + 1)] @@ -182,8 +182,10 @@ def _qsize(self): return size def _put(self, item): - priority = min(getattr(item, 'PRIORITY', self.default_priority), - self.default_priority) + priority = min( + getattr(item, 'PRIORITY', self.default_priority), + self.default_priority, + ) self.priorities[priority].append(item) def _get(self): @@ -252,8 +254,9 @@ def get_file_stat(path): try: stats = os.stat(path) except IOError as e: - raise ValueError('Could not retrieve file stat of "%s": %s' % ( - path, e)) + raise ValueError( + 'Could not retrieve file stat of "%s": %s' % (path, e) + ) try: update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal()) @@ -284,14 +287,15 @@ def find_dest_path_comp_key(files, src_path=None): sep_table = {'s3': '/', 'local': os.sep} if files['dir_op']: - rel_path = src_path[len(src['path']):] + rel_path = src_path[len(src['path']) :] else: rel_path = src_path.split(sep_table[src_type])[-1] compare_key = rel_path.replace(sep_table[src_type], '/') if files['use_src_name']: dest_path = dest['path'] - dest_path += rel_path.replace(sep_table[src_type], - sep_table[dest_type]) + dest_path += rel_path.replace( + sep_table[src_type], sep_table[dest_type] + ) else: dest_path = dest['path'] return dest_path, compare_key @@ -305,8 +309,9 @@ def create_warning(path, error_message, skip_file=True): if skip_file: print_string = print_string + "Skipping file " + path + ". " print_string = print_string + error_message - warning_message = WarningResult(message=print_string, error=False, - warning=True) + warning_message = WarningResult( + message=print_string, error=False, warning=True + ) return warning_message @@ -315,6 +320,7 @@ class StdoutBytesWriter(object): This class acts as a file-like object that performs the bytes_print function on write. """ + def __init__(self, stdout=None): self._stdout = stdout @@ -345,7 +351,9 @@ def guess_content_type(filename): except UnicodeDecodeError: LOGGER.debug( 'Unable to guess content type for %s due to ' - 'UnicodeDecodeError: ', filename, exc_info=True + 'UnicodeDecodeError: ', + filename, + exc_info=True, ) @@ -381,8 +389,11 @@ def set_file_utime(filename, desired_time): if e.errno != errno.EPERM: raise e raise SetFileUtimeError( - ("The file was downloaded, but attempting to modify the " - "utime of the file failed. Is the file owned by another user?")) + ( + "The file was downloaded, but attempting to modify the " + "utime of the file failed. Is the file owned by another user?" + ) + ) class SetFileUtimeError(Exception): @@ -395,13 +406,18 @@ def _date_parser(date_string): class BucketLister(object): """List keys in a bucket.""" + def __init__(self, client, date_parser=_date_parser): self._client = client self._date_parser = date_parser - def list_objects(self, bucket, prefix=None, page_size=None, - extra_args=None): - kwargs = {'Bucket': bucket, 'PaginationConfig': {'PageSize': page_size}} + def list_objects( + self, bucket, prefix=None, page_size=None, extra_args=None + ): + kwargs = { + 'Bucket': bucket, + 'PaginationConfig': {'PageSize': page_size}, + } if prefix is not None: kwargs['Prefix'] = prefix if extra_args is not None: @@ -414,12 +430,14 @@ def list_objects(self, bucket, prefix=None, page_size=None, for content in contents: source_path = bucket + '/' + content['Key'] content['LastModified'] = self._date_parser( - content['LastModified']) + content['LastModified'] + ) yield source_path, content -class PrintTask(namedtuple('PrintTask', - ['message', 'error', 'total_parts', 'warning'])): +class PrintTask( + namedtuple('PrintTask', ['message', 'error', 'total_parts', 'warning']) +): def __new__(cls, message, error=False, total_parts=None, warning=None): """ :param message: An arbitrary string associated with the entry. This @@ -428,8 +446,10 @@ def __new__(cls, message, error=False, total_parts=None, warning=None): :param total_parts: The total number of parts for multipart transfers. :param warning: Boolean indicating a warning """ - return super(PrintTask, cls).__new__(cls, message, error, total_parts, - warning) + return super(PrintTask, cls).__new__( + cls, message, error, total_parts, warning + ) + WarningResult = PrintTask @@ -462,6 +482,7 @@ class RequestParamsMapper(object): Note that existing parameters in ``request_params`` will be overriden if a parameter in ``cli_params`` maps to the existing parameter. """ + @classmethod def map_put_object_params(cls, request_params, cli_params): """Map CLI params to PutObject request params""" @@ -498,7 +519,8 @@ def map_copy_object_params(cls, request_params, cli_params): cls._auto_populate_metadata_directive(request_params) cls._set_sse_request_params(request_params, cli_params) cls._set_sse_c_and_copy_source_request_params( - request_params, cli_params) + request_params, cli_params + ) cls._set_request_payer_param(request_params, cli_params) cls._set_checksum_algorithm_param(request_params, cli_params) @@ -527,7 +549,8 @@ def map_upload_part_params(cls, request_params, cli_params): def map_upload_part_copy_params(cls, request_params, cli_params): """Map CLI params to UploadPartCopy request params""" cls._set_sse_c_and_copy_source_request_params( - request_params, cli_params) + request_params, cli_params + ) cls._set_request_payer_param(request_params, cli_params) @classmethod @@ -551,7 +574,9 @@ def _set_checksum_mode_param(cls, request_params, cli_params): @classmethod def _set_checksum_algorithm_param(cls, request_params, cli_params): if cli_params.get('checksum_algorithm'): - request_params['ChecksumAlgorithm'] = cli_params['checksum_algorithm'] + request_params['ChecksumAlgorithm'] = cli_params[ + 'checksum_algorithm' + ] @classmethod def _set_general_object_params(cls, request_params, cli_params): @@ -567,7 +592,7 @@ def _set_general_object_params(cls, request_params, cli_params): 'content_disposition': 'ContentDisposition', 'content_encoding': 'ContentEncoding', 'content_language': 'ContentLanguage', - 'expires': 'Expires' + 'expires': 'Expires', } for cli_param_name in general_param_translation: if cli_params.get(cli_param_name): @@ -608,21 +633,23 @@ def _set_metadata_params(cls, request_params, cli_params): @classmethod def _auto_populate_metadata_directive(cls, request_params): - if request_params.get('Metadata') and \ - not request_params.get('MetadataDirective'): + if request_params.get('Metadata') and not request_params.get( + 'MetadataDirective' + ): request_params['MetadataDirective'] = 'REPLACE' @classmethod def _set_metadata_directive_param(cls, request_params, cli_params): if cli_params.get('metadata_directive'): request_params['MetadataDirective'] = cli_params[ - 'metadata_directive'] + 'metadata_directive' + ] @classmethod def _set_sse_request_params(cls, request_params, cli_params): if cli_params.get('sse'): request_params['ServerSideEncryption'] = cli_params['sse'] - if cli_params.get('sse_kms_key_id'): + if cli_params.get('sse_kms_key_id'): request_params['SSEKMSKeyId'] = cli_params['sse_kms_key_id'] @classmethod @@ -635,13 +662,16 @@ def _set_sse_c_request_params(cls, request_params, cli_params): def _set_sse_c_copy_source_request_params(cls, request_params, cli_params): if cli_params.get('sse_c_copy_source'): request_params['CopySourceSSECustomerAlgorithm'] = cli_params[ - 'sse_c_copy_source'] + 'sse_c_copy_source' + ] request_params['CopySourceSSECustomerKey'] = cli_params[ - 'sse_c_copy_source_key'] + 'sse_c_copy_source_key' + ] @classmethod - def _set_sse_c_and_copy_source_request_params(cls, request_params, - cli_params): + def _set_sse_c_and_copy_source_request_params( + cls, request_params, cli_params + ): cls._set_sse_c_request_params(request_params, cli_params) cls._set_sse_c_copy_source_request_params(request_params, cli_params) @@ -664,6 +694,7 @@ class NonSeekableStream(object): for certain that a fileobj is non seekable. """ + def __init__(self, fileobj): self._fileobj = fileobj @@ -696,10 +727,12 @@ def __init__(self, s3control_client, sts_client): def has_underlying_s3_path(self, path): bucket, _ = split_s3_bucket_key(path) return bool( - self._S3_ACCESSPOINT_ARN_TO_ACCOUNT_NAME_REGEX.match(bucket) or - self._S3_OUTPOST_ACCESSPOINT_ARN_TO_ACCOUNT_REGEX.match(bucket) or - self._S3_MRAP_ARN_TO_ACCOUNT_ALIAS_REGEX.match(bucket) or - bucket.endswith('-s3alias') or bucket.endswith('--op-s3')) + self._S3_ACCESSPOINT_ARN_TO_ACCOUNT_NAME_REGEX.match(bucket) + or self._S3_OUTPOST_ACCESSPOINT_ARN_TO_ACCOUNT_REGEX.match(bucket) + or self._S3_MRAP_ARN_TO_ACCOUNT_ALIAS_REGEX.match(bucket) + or bucket.endswith('-s3alias') + or bucket.endswith('--op-s3') + ) @classmethod def from_session(cls, session, region, verify_ssl): @@ -756,8 +789,7 @@ def _resolve_mrap_alias(self, account, alias, key): def _get_access_point_bucket(self, account, name): return self._s3control_client.get_access_point( - AccountId=account, - Name=name + AccountId=account, Name=name )['Bucket'] def _get_account_id(self): diff --git a/awscli/customizations/s3errormsg.py b/awscli/customizations/s3errormsg.py index a7a0b9eb4f32..e4eabf442d96 100644 --- a/awscli/customizations/s3errormsg.py +++ b/awscli/customizations/s3errormsg.py @@ -10,9 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -"""Give better S3 error messages. -""" - +"""Give better S3 error messages.""" REGION_ERROR_MSG = ( 'You can fix this issue by explicitly providing the correct region ' @@ -54,8 +52,9 @@ def enhance_error_msg(parsed, **kwargs): def _is_sigv4_error_message(parsed): - return ('Please use AWS4-HMAC-SHA256' in - parsed.get('Error', {}).get('Message', '')) + return 'Please use AWS4-HMAC-SHA256' in parsed.get('Error', {}).get( + 'Message', '' + ) def _is_permanent_redirect_message(parsed): @@ -63,5 +62,7 @@ def _is_permanent_redirect_message(parsed): def _is_kms_sigv4_error_message(parsed): - return ('AWS KMS managed keys require AWS Signature Version 4' in - parsed.get('Error', {}).get('Message', '')) + return ( + 'AWS KMS managed keys require AWS Signature Version 4' + in parsed.get('Error', {}).get('Message', '') + ) diff --git a/awscli/customizations/s3events.py b/awscli/customizations/s3events.py index 122c4ca14be7..2a0c31d307a7 100644 --- a/awscli/customizations/s3events.py +++ b/awscli/customizations/s3events.py @@ -11,8 +11,8 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Add S3 specific event streaming output arg.""" -from awscli.arguments import CustomArgument +from awscli.arguments import CustomArgument STREAM_HELP_TEXT = 'Filename where the records will be saved' @@ -24,28 +24,29 @@ class DocSectionNotFoundError(Exception): def register_event_stream_arg(event_handlers): event_handlers.register( 'building-argument-table.s3api.select-object-content', - add_event_stream_output_arg) + add_event_stream_output_arg, + ) event_handlers.register_last( - 'doc-output.s3api.select-object-content', - replace_event_stream_docs + 'doc-output.s3api.select-object-content', replace_event_stream_docs ) def register_document_expires_string(event_handlers): - event_handlers.register_last( - 'doc-output.s3api', - document_expires_string - ) + event_handlers.register_last('doc-output.s3api', document_expires_string) -def add_event_stream_output_arg(argument_table, operation_model, - session, **kwargs): +def add_event_stream_output_arg( + argument_table, operation_model, session, **kwargs +): argument_table['outfile'] = S3SelectStreamOutputArgument( - name='outfile', help_text=STREAM_HELP_TEXT, - cli_type_name='string', positional_arg=True, + name='outfile', + help_text=STREAM_HELP_TEXT, + cli_type_name='string', + positional_arg=True, stream_key=operation_model.output_shape.serialization['payload'], - session=session) + session=session, + ) def replace_event_stream_docs(help_command, **kwargs): @@ -59,10 +60,13 @@ def replace_event_stream_docs(help_command, **kwargs): # we should be raising something with a helpful error message. raise DocSectionNotFoundError( 'Could not find the "output" section for the command: %s' - % help_command) + % help_command + ) doc.write('======\nOutput\n======\n') - doc.write("This command generates no output. The selected " - "object content is written to the specified outfile.\n") + doc.write( + "This command generates no output. The selected " + "object content is written to the specified outfile.\n" + ) def document_expires_string(help_command, **kwargs): @@ -81,7 +85,7 @@ def document_expires_string(help_command, **kwargs): f'\n\n{" " * doc.style.indentation * doc.style.indent_width}', 'ExpiresString -> (string)\n\n', '\tThe raw, unparsed value of the ``Expires`` field.', - f'\n\n{" " * doc.style.indentation * doc.style.indent_width}' + f'\n\n{" " * doc.style.indentation * doc.style.indent_width}', ] for idx, write in enumerate(deprecation_note_and_expires_string): @@ -103,8 +107,9 @@ def __init__(self, stream_key, session, **kwargs): def add_to_params(self, parameters, value): self._output_file = value - self._session.register('after-call.s3.SelectObjectContent', - self.save_file) + self._session.register( + 'after-call.s3.SelectObjectContent', self.save_file + ) def save_file(self, parsed, **kwargs): # This method is hooked into after-call which fires diff --git a/awscli/customizations/s3uploader.py b/awscli/customizations/s3uploader.py index e640b94ba55a..da9cf4f01264 100644 --- a/awscli/customizations/s3uploader.py +++ b/awscli/customizations/s3uploader.py @@ -13,17 +13,16 @@ import hashlib import logging -import threading import os import sys +import threading import botocore import botocore.exceptions +from awscli.compat import collections_abc from s3transfer.manager import TransferManager from s3transfer.subscribers import BaseSubscriber -from awscli.compat import collections_abc - LOG = logging.getLogger(__name__) @@ -33,11 +32,12 @@ def __init__(self, **kwargs): Exception.__init__(self, msg) self.kwargs = kwargs - - fmt = ("S3 Bucket does not exist. " - "Execute the command to create a new bucket" - "\n" - "aws s3 mb s3://{bucket_name}") + fmt = ( + "S3 Bucket does not exist. " + "Execute the command to create a new bucket" + "\n" + "aws s3 mb s3://{bucket_name}" + ) class S3Uploader(object): @@ -59,12 +59,15 @@ def artifact_metadata(self, val): raise TypeError("Artifact metadata should be in dict type") self._artifact_metadata = val - def __init__(self, s3_client, - bucket_name, - prefix=None, - kms_key_id=None, - force_upload=False, - transfer_manager=None): + def __init__( + self, + s3_client, + bucket_name, + prefix=None, + kms_key_id=None, + force_upload=False, + transfer_manager=None, + ): self.bucket_name = bucket_name self.prefix = prefix self.kms_key_id = kms_key_id or None @@ -90,17 +93,16 @@ def upload(self, file_name, remote_path): # Check if a file with same data exists if not self.force_upload and self.file_exists(remote_path): - LOG.debug("File with same data already exists at {0}. " - "Skipping upload".format(remote_path)) + LOG.debug( + "File with same data already exists at {0}. " + "Skipping upload".format(remote_path) + ) return self.make_url(remote_path) try: - # Default to regular server-side encryption unless customer has # specified their own KMS keys - additional_args = { - "ServerSideEncryption": "AES256" - } + additional_args = {"ServerSideEncryption": "AES256"} if self.kms_key_id: additional_args["ServerSideEncryption"] = "aws:kms" @@ -109,13 +111,16 @@ def upload(self, file_name, remote_path): if self.artifact_metadata: additional_args["Metadata"] = self.artifact_metadata - print_progress_callback = \ - ProgressPercentage(file_name, remote_path) - future = self.transfer_manager.upload(file_name, - self.bucket_name, - remote_path, - additional_args, - [print_progress_callback]) + print_progress_callback = ProgressPercentage( + file_name, remote_path + ) + future = self.transfer_manager.upload( + file_name, + self.bucket_name, + remote_path, + additional_args, + [print_progress_callback], + ) future.result() return self.make_url(remote_path) @@ -157,8 +162,7 @@ def file_exists(self, remote_path): try: # Find the object that matches this ETag - self.s3.head_object( - Bucket=self.bucket_name, Key=remote_path) + self.s3.head_object(Bucket=self.bucket_name, Key=remote_path) return True except botocore.exceptions.ClientError: # Either File does not exist or we are unable to get @@ -166,11 +170,9 @@ def file_exists(self, remote_path): return False def make_url(self, obj_path): - return "s3://{0}/{1}".format( - self.bucket_name, obj_path) + return "s3://{0}/{1}".format(self.bucket_name, obj_path) def file_checksum(self, file_name): - with open(file_name, "rb") as file_handle: md5 = hashlib.md5() # Read file in chunks of 4096 bytes @@ -192,8 +194,8 @@ def file_checksum(self, file_name): def to_path_style_s3_url(self, key, version=None): """ - This link describes the format of Path Style URLs - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + This link describes the format of Path Style URLs + http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro """ base = self.s3.meta.endpoint_url result = "{0}/{1}/{2}".format(base, self.bucket_name, key) @@ -214,14 +216,18 @@ def __init__(self, filename, remote_path): self._lock = threading.Lock() def on_progress(self, future, bytes_transferred, **kwargs): - # To simplify we'll assume this is hooked up # to a single filename. with self._lock: self._seen_so_far += bytes_transferred percentage = (self._seen_so_far / self._size) * 100 sys.stderr.write( - "\rUploading to %s %s / %s (%.2f%%)" % - (self._remote_path, self._seen_so_far, - self._size, percentage)) + "\rUploading to %s %s / %s (%.2f%%)" + % ( + self._remote_path, + self._seen_so_far, + self._size, + percentage, + ) + ) sys.stderr.flush() diff --git a/awscli/customizations/servicecatalog/__init__.py b/awscli/customizations/servicecatalog/__init__.py index 18cf606449f0..01eaa95e987d 100644 --- a/awscli/customizations/servicecatalog/__init__.py +++ b/awscli/customizations/servicecatalog/__init__.py @@ -11,13 +11,13 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.servicecatalog.generate \ - import GenerateCommand +from awscli.customizations.servicecatalog.generate import GenerateCommand def register_servicecatalog_commands(event_emitter): - event_emitter.register('building-command-table.servicecatalog', - inject_commands) + event_emitter.register( + 'building-command-table.servicecatalog', inject_commands + ) def inject_commands(command_table, session, **kwargs): diff --git a/awscli/customizations/servicecatalog/generate.py b/awscli/customizations/servicecatalog/generate.py index 51e69ed821c6..3c8b58d482e8 100644 --- a/awscli/customizations/servicecatalog/generate.py +++ b/awscli/customizations/servicecatalog/generate.py @@ -13,20 +13,23 @@ from awscli.customizations.commands import BasicCommand from awscli.customizations.servicecatalog import helptext -from awscli.customizations.servicecatalog.generateproduct \ - import GenerateProductCommand -from awscli.customizations.servicecatalog.generateprovisioningartifact \ - import GenerateProvisioningArtifactCommand +from awscli.customizations.servicecatalog.generateproduct import ( + GenerateProductCommand, +) +from awscli.customizations.servicecatalog.generateprovisioningartifact import ( + GenerateProvisioningArtifactCommand, +) class GenerateCommand(BasicCommand): NAME = "generate" DESCRIPTION = helptext.GENERATE_COMMAND SUBCOMMANDS = [ - {'name': 'product', - 'command_class': GenerateProductCommand}, - {'name': 'provisioning-artifact', - 'command_class': GenerateProvisioningArtifactCommand} + {'name': 'product', 'command_class': GenerateProductCommand}, + { + 'name': 'provisioning-artifact', + 'command_class': GenerateProvisioningArtifactCommand, + }, ] def _run_main(self, parsed_args, parsed_globals): diff --git a/awscli/customizations/servicecatalog/generatebase.py b/awscli/customizations/servicecatalog/generatebase.py index 3e7b59373f7a..eb160762341a 100644 --- a/awscli/customizations/servicecatalog/generatebase.py +++ b/awscli/customizations/servicecatalog/generatebase.py @@ -12,28 +12,27 @@ # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand -from awscli.customizations.servicecatalog.utils \ - import make_url, get_s3_path from awscli.customizations.s3uploader import S3Uploader from awscli.customizations.servicecatalog import exceptions +from awscli.customizations.servicecatalog.utils import get_s3_path, make_url class GenerateBaseCommand(BasicCommand): - def _run_main(self, parsed_args, parsed_globals): self.region = self.get_and_validate_region(parsed_globals) self.s3_client = self._session.create_client( 's3', region_name=self.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, + ) + self.s3_uploader = S3Uploader( + self.s3_client, parsed_args.bucket_name, force_upload=True ) - self.s3_uploader = S3Uploader(self.s3_client, - parsed_args.bucket_name, - force_upload=True) try: - self.s3_uploader.upload(parsed_args.file_path, - get_s3_path(parsed_args.file_path)) + self.s3_uploader.upload( + parsed_args.file_path, get_s3_path(parsed_args.file_path) + ) except OSError as ex: raise RuntimeError("%s cannot be found" % parsed_args.file_path) @@ -44,10 +43,10 @@ def get_and_validate_region(self, parsed_globals): if region not in self._session.get_available_regions('servicecatalog'): raise exceptions.InvalidParametersException( message="Region {0} is not supported".format( - parsed_globals.region)) + parsed_globals.region + ) + ) return region def create_s3_url(self, bucket_name, file_path): - return make_url(self.region, - bucket_name, - get_s3_path(file_path)) + return make_url(self.region, bucket_name, get_s3_path(file_path)) diff --git a/awscli/customizations/servicecatalog/generateproduct.py b/awscli/customizations/servicecatalog/generateproduct.py index 2f3786563c83..c84243c21a4e 100644 --- a/awscli/customizations/servicecatalog/generateproduct.py +++ b/awscli/customizations/servicecatalog/generateproduct.py @@ -14,8 +14,9 @@ import sys from awscli.customizations.servicecatalog import helptext -from awscli.customizations.servicecatalog.generatebase \ - import GenerateBaseCommand +from awscli.customizations.servicecatalog.generatebase import ( + GenerateBaseCommand, +) from botocore.compat import json @@ -26,71 +27,66 @@ class GenerateProductCommand(GenerateBaseCommand): { 'name': 'product-name', 'required': True, - 'help_text': helptext.PRODUCT_NAME + 'help_text': helptext.PRODUCT_NAME, }, { 'name': 'product-owner', 'required': True, - 'help_text': helptext.OWNER + 'help_text': helptext.OWNER, }, { 'name': 'product-type', 'required': True, 'help_text': helptext.PRODUCT_TYPE, - 'choices': ['CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE'] + 'choices': ['CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE'], }, { 'name': 'product-description', 'required': False, - 'help_text': helptext.PRODUCT_DESCRIPTION + 'help_text': helptext.PRODUCT_DESCRIPTION, }, { 'name': 'product-distributor', 'required': False, - 'help_text': helptext.DISTRIBUTOR + 'help_text': helptext.DISTRIBUTOR, }, { 'name': 'tags', 'required': False, - 'schema': { - 'type': 'array', - 'items': { - 'type': 'string' - } - }, + 'schema': {'type': 'array', 'items': {'type': 'string'}}, 'default': [], 'synopsis': '--tags Key=key1,Value=value1 Key=key2,Value=value2', - 'help_text': helptext.TAGS + 'help_text': helptext.TAGS, }, { 'name': 'file-path', 'required': True, - 'help_text': helptext.FILE_PATH + 'help_text': helptext.FILE_PATH, }, { 'name': 'bucket-name', 'required': True, - 'help_text': helptext.BUCKET_NAME + 'help_text': helptext.BUCKET_NAME, }, { 'name': 'support-description', 'required': False, - 'help_text': helptext.SUPPORT_DESCRIPTION + 'help_text': helptext.SUPPORT_DESCRIPTION, }, { 'name': 'support-email', 'required': False, - 'help_text': helptext.SUPPORT_EMAIL + 'help_text': helptext.SUPPORT_EMAIL, }, { 'name': 'provisioning-artifact-name', 'required': True, - 'help_text': helptext.PA_NAME + 'help_text': helptext.PA_NAME, }, { 'name': 'provisioning-artifact-description', 'required': True, - 'help_text': helptext.PA_DESCRIPTION + 'help_text': helptext.PA_DESCRIPTION, }, { 'name': 'provisioning-artifact-type', @@ -99,27 +95,30 @@ class GenerateProductCommand(GenerateBaseCommand): 'choices': [ 'CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE_AMI', - 'MARKETPLACE_CAR' - ] - } + 'MARKETPLACE_CAR', + ], + }, ] def _run_main(self, parsed_args, parsed_globals): - super(GenerateProductCommand, self)._run_main(parsed_args, - parsed_globals) + super(GenerateProductCommand, self)._run_main( + parsed_args, parsed_globals + ) self.region = self.get_and_validate_region(parsed_globals) - self.s3_url = self.create_s3_url(parsed_args.bucket_name, - parsed_args.file_path) + self.s3_url = self.create_s3_url( + parsed_args.bucket_name, parsed_args.file_path + ) self.scs_client = self._session.create_client( - 'servicecatalog', region_name=self.region, + 'servicecatalog', + region_name=self.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) - response = self.create_product(self.build_args(parsed_args, - self.s3_url), - parsed_globals) + response = self.create_product( + self.build_args(parsed_args, self.s3_url), parsed_globals + ) sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False)) return 0 @@ -145,11 +144,9 @@ def build_args(self, parsed_args, s3_url): "ProvisioningArtifactParameters": { 'Name': parsed_args.provisioning_artifact_name, 'Description': parsed_args.provisioning_artifact_description, - 'Info': { - 'LoadTemplateFromURL': s3_url - }, - 'Type': parsed_args.provisioning_artifact_type - } + 'Info': {'LoadTemplateFromURL': s3_url}, + 'Type': parsed_args.provisioning_artifact_type, + }, } # Non-required args diff --git a/awscli/customizations/servicecatalog/generateprovisioningartifact.py b/awscli/customizations/servicecatalog/generateprovisioningartifact.py index e79a378357a3..8cb7aa33b9df 100644 --- a/awscli/customizations/servicecatalog/generateprovisioningartifact.py +++ b/awscli/customizations/servicecatalog/generateprovisioningartifact.py @@ -14,8 +14,9 @@ import sys from awscli.customizations.servicecatalog import helptext -from awscli.customizations.servicecatalog.generatebase \ - import GenerateBaseCommand +from awscli.customizations.servicecatalog.generatebase import ( + GenerateBaseCommand, +) from botocore.compat import json @@ -26,22 +27,22 @@ class GenerateProvisioningArtifactCommand(GenerateBaseCommand): { 'name': 'file-path', 'required': True, - 'help_text': helptext.FILE_PATH + 'help_text': helptext.FILE_PATH, }, { 'name': 'bucket-name', 'required': True, - 'help_text': helptext.BUCKET_NAME + 'help_text': helptext.BUCKET_NAME, }, { 'name': 'provisioning-artifact-name', 'required': True, - 'help_text': helptext.PA_NAME + 'help_text': helptext.PA_NAME, }, { 'name': 'provisioning-artifact-description', 'required': True, - 'help_text': helptext.PA_DESCRIPTION + 'help_text': helptext.PA_DESCRIPTION, }, { 'name': 'provisioning-artifact-type', @@ -50,31 +51,33 @@ class GenerateProvisioningArtifactCommand(GenerateBaseCommand): 'choices': [ 'CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE_AMI', - 'MARKETPLACE_CAR' - ] + 'MARKETPLACE_CAR', + ], }, { 'name': 'product-id', 'required': True, - 'help_text': helptext.PRODUCT_ID - } + 'help_text': helptext.PRODUCT_ID, + }, ] def _run_main(self, parsed_args, parsed_globals): super(GenerateProvisioningArtifactCommand, self)._run_main( - parsed_args, parsed_globals) + parsed_args, parsed_globals + ) self.region = self.get_and_validate_region(parsed_globals) - self.s3_url = self.create_s3_url(parsed_args.bucket_name, - parsed_args.file_path) + self.s3_url = self.create_s3_url( + parsed_args.bucket_name, parsed_args.file_path + ) self.scs_client = self._session.create_client( - 'servicecatalog', region_name=self.region, + 'servicecatalog', + region_name=self.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl + verify=parsed_globals.verify_ssl, ) - response = self.create_provisioning_artifact(parsed_args, - self.s3_url) + response = self.create_provisioning_artifact(parsed_args, self.s3_url) sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False)) @@ -86,11 +89,9 @@ def create_provisioning_artifact(self, parsed_args, s3_url): Parameters={ 'Name': parsed_args.provisioning_artifact_name, 'Description': parsed_args.provisioning_artifact_description, - 'Info': { - 'LoadTemplateFromURL': s3_url - }, - 'Type': parsed_args.provisioning_artifact_type - } + 'Info': {'LoadTemplateFromURL': s3_url}, + 'Type': parsed_args.provisioning_artifact_type, + }, ) if 'ResponseMetadata' in response: diff --git a/awscli/customizations/servicecatalog/helptext.py b/awscli/customizations/servicecatalog/helptext.py index 7c4c72961206..446e8b7d676b 100644 --- a/awscli/customizations/servicecatalog/helptext.py +++ b/awscli/customizations/servicecatalog/helptext.py @@ -14,8 +14,10 @@ TAGS = "Tags to associate with the new product." -BUCKET_NAME = ("Name of the S3 bucket name where the CloudFormation " - "template will be uploaded to") +BUCKET_NAME = ( + "Name of the S3 bucket name where the CloudFormation " + "template will be uploaded to" +) SUPPORT_DESCRIPTION = "Support information about the product" @@ -39,15 +41,21 @@ PRODUCT_DESCRIPTION = "The text description of the product" -PRODUCT_COMMAND_DESCRIPTION = ("Create a new product using a CloudFormation " - "template specified as a local file path") - -PA_COMMAND_DESCRIPTION = ("Create a new provisioning artifact for the " - "specified product using a CloudFormation template " - "specified as a local file path") - -GENERATE_COMMAND = ("Generate a Service Catalog product or provisioning " - "artifact using a CloudFormation template specified " - "as a local file path") +PRODUCT_COMMAND_DESCRIPTION = ( + "Create a new product using a CloudFormation " + "template specified as a local file path" +) + +PA_COMMAND_DESCRIPTION = ( + "Create a new provisioning artifact for the " + "specified product using a CloudFormation template " + "specified as a local file path" +) + +GENERATE_COMMAND = ( + "Generate a Service Catalog product or provisioning " + "artifact using a CloudFormation template specified " + "as a local file path" +) FILE_PATH = "A local file path that references the CloudFormation template" diff --git a/awscli/customizations/servicecatalog/utils.py b/awscli/customizations/servicecatalog/utils.py index 510ebb7d85f1..a6826ac36b87 100644 --- a/awscli/customizations/servicecatalog/utils.py +++ b/awscli/customizations/servicecatalog/utils.py @@ -16,8 +16,8 @@ def make_url(region, bucket_name, obj_path, version=None): """ - This link describes the format of Path Style URLs - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + This link describes the format of Path Style URLs + http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro """ base = "https://s3.amazonaws.com" if region and region != "us-east-1": diff --git a/awscli/customizations/sessendemail.py b/awscli/customizations/sessendemail.py index 8215342982bf..ea290a63b685 100644 --- a/awscli/customizations/sessendemail.py +++ b/awscli/customizations/sessendemail.py @@ -22,52 +22,61 @@ """ -from awscli.customizations import utils from awscli.arguments import CustomArgument +from awscli.customizations import utils from awscli.customizations.utils import validate_mutually_exclusive_handler - -TO_HELP = ('The email addresses of the primary recipients. ' - 'You can specify multiple recipients as space-separated values') -CC_HELP = ('The email addresses of copy recipients (Cc). ' - 'You can specify multiple recipients as space-separated values') -BCC_HELP = ('The email addresses of blind-carbon-copy recipients (Bcc). ' - 'You can specify multiple recipients as space-separated values') +TO_HELP = ( + 'The email addresses of the primary recipients. ' + 'You can specify multiple recipients as space-separated values' +) +CC_HELP = ( + 'The email addresses of copy recipients (Cc). ' + 'You can specify multiple recipients as space-separated values' +) +BCC_HELP = ( + 'The email addresses of blind-carbon-copy recipients (Bcc). ' + 'You can specify multiple recipients as space-separated values' +) SUBJECT_HELP = 'The subject of the message' TEXT_HELP = 'The raw text body of the message' HTML_HELP = 'The HTML body of the message' def register_ses_send_email(event_handler): - event_handler.register('building-argument-table.ses.send-email', - _promote_args) + event_handler.register( + 'building-argument-table.ses.send-email', _promote_args + ) event_handler.register( 'operation-args-parsed.ses.send-email', validate_mutually_exclusive_handler( - ['destination'], ['to', 'cc', 'bcc'])) + ['destination'], ['to', 'cc', 'bcc'] + ), + ) event_handler.register( 'operation-args-parsed.ses.send-email', - validate_mutually_exclusive_handler( - ['message'], ['text', 'html'])) + validate_mutually_exclusive_handler(['message'], ['text', 'html']), + ) def _promote_args(argument_table, **kwargs): argument_table['message'].required = False argument_table['destination'].required = False - utils.rename_argument(argument_table, 'source', - new_name='from') + utils.rename_argument(argument_table, 'source', new_name='from') argument_table['to'] = AddressesArgument( - 'to', 'ToAddresses', help_text=TO_HELP) + 'to', 'ToAddresses', help_text=TO_HELP + ) argument_table['cc'] = AddressesArgument( - 'cc', 'CcAddresses', help_text=CC_HELP) + 'cc', 'CcAddresses', help_text=CC_HELP + ) argument_table['bcc'] = AddressesArgument( - 'bcc', 'BccAddresses', help_text=BCC_HELP) + 'bcc', 'BccAddresses', help_text=BCC_HELP + ) argument_table['subject'] = BodyArgument( - 'subject', 'Subject', help_text=SUBJECT_HELP) - argument_table['text'] = BodyArgument( - 'text', 'Text', help_text=TEXT_HELP) - argument_table['html'] = BodyArgument( - 'html', 'Html', help_text=HTML_HELP) + 'subject', 'Subject', help_text=SUBJECT_HELP + ) + argument_table['text'] = BodyArgument('text', 'Text', help_text=TEXT_HELP) + argument_table['html'] = BodyArgument('html', 'Html', help_text=HTML_HELP) def _build_destination(params, key, value): @@ -88,11 +97,21 @@ def _build_message(params, key, value): class AddressesArgument(CustomArgument): - - def __init__(self, name, json_key, help_text='', dest=None, default=None, - action=None, required=None, choices=None, cli_type_name=None): - super(AddressesArgument, self).__init__(name=name, help_text=help_text, - required=required, nargs='+') + def __init__( + self, + name, + json_key, + help_text='', + dest=None, + default=None, + action=None, + required=None, + choices=None, + cli_type_name=None, + ): + super(AddressesArgument, self).__init__( + name=name, help_text=help_text, required=required, nargs='+' + ) self._json_key = json_key def add_to_params(self, parameters, value): @@ -101,13 +120,12 @@ def add_to_params(self, parameters, value): class BodyArgument(CustomArgument): - def __init__(self, name, json_key, help_text='', required=None): - super(BodyArgument, self).__init__(name=name, help_text=help_text, - required=required) + super(BodyArgument, self).__init__( + name=name, help_text=help_text, required=required + ) self._json_key = json_key def add_to_params(self, parameters, value): if value: _build_message(parameters, self._json_key, value) - diff --git a/awscli/customizations/sessionmanager.py b/awscli/customizations/sessionmanager.py index cfbffe22a298..16e0868cb0e2 100644 --- a/awscli/customizations/sessionmanager.py +++ b/awscli/customizations/sessionmanager.py @@ -10,15 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import logging -import json import errno +import json +import logging import os import re - from subprocess import check_call, check_output + +from awscli.clidriver import CLIOperationCaller, ServiceOperation from awscli.compat import ignore_user_entered_signals -from awscli.clidriver import ServiceOperation, CLIOperationCaller logger = logging.getLogger(__name__) @@ -26,13 +26,14 @@ 'SessionManagerPlugin is not found. ', 'Please refer to SessionManager Documentation here: ', 'http://docs.aws.amazon.com/console/systems-manager/', - 'session-manager-plugin-not-found' + 'session-manager-plugin-not-found', ) def register_ssm_session(event_handlers): - event_handlers.register('building-command-table.ssm', - add_custom_start_session) + event_handlers.register( + 'building-command-table.ssm', add_custom_start_session + ) def add_custom_start_session(session, command_table, **kwargs): @@ -40,8 +41,9 @@ def add_custom_start_session(session, command_table, **kwargs): name='start-session', parent_name='ssm', session=session, - operation_model=session.get_service_model( - 'ssm').operation_model('StartSession'), + operation_model=session.get_service_model('ssm').operation_model( + 'StartSession' + ), operation_caller=StartSessionCaller(session), ) @@ -84,8 +86,7 @@ def _normalize(self, v1, v2): class StartSessionCommand(ServiceOperation): def create_help_command(self): - help_command = super( - StartSessionCommand, self).create_help_command() + help_command = super(StartSessionCommand, self).create_help_command() # Change the output shape because the command provides no output. self._operation_model.output_shape = None return help_command @@ -95,12 +96,13 @@ class StartSessionCaller(CLIOperationCaller): LAST_PLUGIN_VERSION_WITHOUT_ENV_VAR = "1.2.497.0" DEFAULT_SSM_ENV_NAME = "AWS_SSM_START_SESSION_RESPONSE" - def invoke(self, service_name, operation_name, parameters, - parsed_globals): + def invoke(self, service_name, operation_name, parameters, parsed_globals): client = self._session.create_client( - service_name, region_name=parsed_globals.region, + service_name, + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) response = client.start_session(**parameters) session_id = response['SessionId'] region_name = client.meta.region_name @@ -108,8 +110,11 @@ def invoke(self, service_name, operation_name, parameters, # to fetch same profile credentials to make an api call in the plugin. # If --profile flag is configured, pass it to Session Manager plugin. # If not, set empty string. - profile_name = parsed_globals.profile \ - if parsed_globals.profile is not None else '' + profile_name = ( + parsed_globals.profile + if parsed_globals.profile is not None + else '' + ) endpoint_url = client.meta.endpoint_url ssm_env_name = self.DEFAULT_SSM_ENV_NAME @@ -147,19 +152,25 @@ def invoke(self, service_name, operation_name, parameters, # and handling in there with ignore_user_entered_signals(): # call executable with necessary input - check_call(["session-manager-plugin", - start_session_response, - region_name, - "StartSession", - profile_name, - json.dumps(parameters), - endpoint_url], env=env) + check_call( + [ + "session-manager-plugin", + start_session_response, + region_name, + "StartSession", + profile_name, + json.dumps(parameters), + endpoint_url, + ], + env=env, + ) return 0 except OSError as ex: if ex.errno == errno.ENOENT: - logger.debug('SessionManagerPlugin is not present', - exc_info=True) + logger.debug( + 'SessionManagerPlugin is not present', exc_info=True + ) # start-session api call returns response and starts the # session on ssm-agent and response is forwarded to # session-manager-plugin. If plugin is not present, terminate diff --git a/awscli/customizations/sso/__init__.py b/awscli/customizations/sso/__init__.py index b5e2a6cc2219..563f6bc0b44e 100644 --- a/awscli/customizations/sso/__init__.py +++ b/awscli/customizations/sso/__init__.py @@ -10,22 +10,22 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from botocore.exceptions import ProfileNotFound -from botocore.exceptions import UnknownCredentialError -from botocore.credentials import JSONFileCache - from awscli.customizations.sso.login import LoginCommand from awscli.customizations.sso.logout import LogoutCommand from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR +from botocore.credentials import JSONFileCache +from botocore.exceptions import ProfileNotFound, UnknownCredentialError def register_sso_commands(event_emitter): event_emitter.register( - 'building-command-table.sso', add_sso_commands, + 'building-command-table.sso', + add_sso_commands, ) event_emitter.register( - 'session-initialized', inject_json_file_cache, - unique_id='inject_sso_json_file_cache' + 'session-initialized', + inject_json_file_cache, + unique_id='inject_sso_json_file_cache', ) diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py index 6a298cf3985f..a16a946ed260 100644 --- a/awscli/customizations/sso/login.py +++ b/awscli/customizations/sso/login.py @@ -11,7 +11,10 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from awscli.customizations.sso.utils import ( - do_sso_login, PrintOnlyHandler, LOGIN_ARGS, BaseSSOCommand, + LOGIN_ARGS, + BaseSSOCommand, + PrintOnlyHandler, + do_sso_login, ) from awscli.customizations.utils import uni_print @@ -31,11 +34,11 @@ class LoginCommand(BaseSSOCommand): { 'name': 'sso-session', 'help_text': ( - 'An explicit SSO session to use to login. By default, this ' - 'command will login using the SSO session configured as part ' - 'of the requested profile and generally does not require this ' - 'argument to be set.' - ) + 'An explicit SSO session to use to login. By default, this ' + 'command will login using the SSO session configured as part ' + 'of the requested profile and generally does not require this ' + 'argument to be set.' + ), } ] diff --git a/awscli/customizations/sso/logout.py b/awscli/customizations/sso/logout.py index be2e6e356e2c..1d101f3fdc6f 100644 --- a/awscli/customizations/sso/logout.py +++ b/awscli/customizations/sso/logout.py @@ -14,12 +14,9 @@ import logging import os -from botocore.exceptions import ClientError - from awscli.customizations.commands import BasicCommand -from awscli.customizations.sso.utils import SSO_TOKEN_DIR -from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR - +from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR, SSO_TOKEN_DIR +from botocore.exceptions import ClientError LOG = logging.getLogger(__name__) @@ -35,7 +32,9 @@ class LogoutCommand(BasicCommand): ARG_TABLE = [] def _run_main(self, parsed_args, parsed_globals): - SSOTokenSweeper(self._session, parsed_globals).delete_credentials(SSO_TOKEN_DIR) + SSOTokenSweeper(self._session, parsed_globals).delete_credentials( + SSO_TOKEN_DIR + ) SSOCredentialSweeper().delete_credentials(AWS_CREDS_CACHE_DIR) return 0 diff --git a/awscli/customizations/sso/utils.py b/awscli/customizations/sso/utils.py index 5c947d15406c..d0acf6e3753e 100644 --- a/awscli/customizations/sso/utils.py +++ b/awscli/customizations/sso/utils.py @@ -18,28 +18,28 @@ import time import webbrowser from functools import partial -from http.server import HTTPServer, BaseHTTPRequestHandler - -from botocore.compat import urlparse, parse_qs -from botocore.credentials import JSONFileCache -from botocore.exceptions import ( - AuthCodeFetcherError, - PendingAuthorizationExpiredError, -) -from botocore.utils import SSOTokenFetcher, SSOTokenFetcherAuth -from botocore.utils import original_ld_library_path +from http.server import BaseHTTPRequestHandler, HTTPServer from awscli import __version__ as awscli_version from awscli.customizations.assumerole import CACHE_DIR as AWS_CREDS_CACHE_DIR from awscli.customizations.commands import BasicCommand from awscli.customizations.exceptions import ConfigurationError from awscli.customizations.utils import uni_print +from botocore.compat import parse_qs, urlparse +from botocore.credentials import JSONFileCache +from botocore.exceptions import ( + AuthCodeFetcherError, + PendingAuthorizationExpiredError, +) +from botocore.utils import ( + SSOTokenFetcher, + SSOTokenFetcherAuth, + original_ld_library_path, +) LOG = logging.getLogger(__name__) -SSO_TOKEN_DIR = os.path.expanduser( - os.path.join('~', '.aws', 'sso', 'cache') -) +SSO_TOKEN_DIR = os.path.expanduser(os.path.join('~', '.aws', 'sso', 'cache')) LOGIN_ARGS = [ { @@ -49,7 +49,7 @@ 'help_text': ( 'Disables automatically opening the verification URL in the ' 'default browser.' - ) + ), }, { 'name': 'use-device-code', @@ -58,8 +58,8 @@ 'help_text': ( 'Uses the Device Code authorization grant and login flow ' 'instead of the Authorization Code flow.' - ) - } + ), + }, ] @@ -74,16 +74,16 @@ def _sso_json_dumps(obj): def do_sso_login( - session, - sso_region, - start_url, - parsed_globals, - token_cache=None, - on_pending_authorization=None, - force_refresh=False, - registration_scopes=None, - session_name=None, - use_device_code=False, + session, + sso_region, + start_url, + parsed_globals, + token_cache=None, + on_pending_authorization=None, + force_refresh=False, + registration_scopes=None, + session_name=None, + use_device_code=False, ): if token_cache is None: token_cache = JSONFileCache(SSO_TOKEN_DIR, dumps_func=_sso_json_dumps) @@ -153,7 +153,6 @@ def __call__( f'Browser will not be automatically opened.\n' f'Please visit the following URL:\n' f'\n{verificationUri}\n' - ) user_code_msg = ( @@ -187,10 +186,7 @@ def __call__( f'\n{verificationUri}\n' ) - user_code_msg = ( - f'\nThen enter the code:\n' - f'\n{userCode}\n' - ) + user_code_msg = f'\nThen enter the code:\n' f'\n{userCode}\n' uni_print(opening_msg, self._outfile) if userCode: uni_print(user_code_msg, self._outfile) @@ -206,6 +202,7 @@ class AuthCodeFetcher: """Manages the local web server that will be used to retrieve the authorization code from the OAuth callback """ + # How many seconds handle_request should wait for an incoming request _REQUEST_TIMEOUT = 10 # How long we wait overall for the callback @@ -229,14 +226,18 @@ def redirect_uri_without_port(self): return 'http://127.0.0.1/oauth/callback' def redirect_uri_with_port(self): - return f'http://127.0.0.1:{self.http_server.server_port}/oauth/callback' + return ( + f'http://127.0.0.1:{self.http_server.server_port}/oauth/callback' + ) def get_auth_code_and_state(self): """Blocks until the expected redirect request with either the authorization code/state or and error is handled """ start = time.time() - while not self._is_done and time.time() < start + self._OVERALL_TIMEOUT: + while ( + not self._is_done and time.time() < start + self._OVERALL_TIMEOUT + ): self.http_server.handle_request() self.http_server.server_close() @@ -256,6 +257,7 @@ class OAuthCallbackHandler(BaseHTTPRequestHandler): the auth code and state parameters, and displaying a page directing the user to return to the CLI. """ + def __init__(self, auth_code_fetcher, *args, **kwargs): self._auth_code_fetcher = auth_code_fetcher super().__init__(*args, **kwargs) diff --git a/awscli/customizations/streamingoutputarg.py b/awscli/customizations/streamingoutputarg.py index 2cba59a03ff4..1515389ed79b 100644 --- a/awscli/customizations/streamingoutputarg.py +++ b/awscli/customizations/streamingoutputarg.py @@ -10,13 +10,13 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from botocore.model import Shape - from awscli.arguments import BaseCLIArgument +from botocore.model import Shape -def add_streaming_output_arg(argument_table, operation_model, - session, **kwargs): +def add_streaming_output_arg( + argument_table, operation_model, session, **kwargs +): # Implementation detail: hooked up to 'building-argument-table' # event. if _has_streaming_output(operation_model): @@ -24,7 +24,9 @@ def add_streaming_output_arg(argument_table, operation_model, argument_table['outfile'] = StreamingOutputArgument( response_key=streaming_argument_name, operation_model=operation_model, - session=session, name='outfile') + session=session, + name='outfile', + ) def _has_streaming_output(model): @@ -36,15 +38,16 @@ def _get_streaming_argument_name(model): class StreamingOutputArgument(BaseCLIArgument): - BUFFER_SIZE = 32768 HELP = 'Filename where the content will be saved' - def __init__(self, response_key, operation_model, name, - session, buffer_size=None): + def __init__( + self, response_key, operation_model, name, session, buffer_size=None + ): self._name = name - self.argument_model = Shape('StreamingOutputArgument', - {'type': 'string'}) + self.argument_model = Shape( + 'StreamingOutputArgument', {'type': 'string'} + ) if buffer_size is None: buffer_size = self.BUFFER_SIZE self._buffer_size = buffer_size @@ -81,15 +84,15 @@ def documentation(self): return self.HELP def add_to_parser(self, parser): - parser.add_argument(self._name, metavar=self.py_name, - help=self.HELP) + parser.add_argument(self._name, metavar=self.py_name, help=self.HELP) def add_to_params(self, parameters, value): self._output_file = value service_id = self._operation_model.service_model.service_id.hyphenize() operation_name = self._operation_model.name - self._session.register('after-call.%s.%s' % ( - service_id, operation_name), self.save_file) + self._session.register( + 'after-call.%s.%s' % (service_id, operation_name), self.save_file + ) def save_file(self, parsed, **kwargs): if self._response_key not in parsed: diff --git a/awscli/customizations/timestampformat.py b/awscli/customizations/timestampformat.py index d7e1987b1fd1..ee3d87062262 100644 --- a/awscli/customizations/timestampformat.py +++ b/awscli/customizations/timestampformat.py @@ -27,9 +27,10 @@ in the future. """ -from botocore.utils import parse_timestamp -from botocore.exceptions import ProfileNotFound + from awscli.customizations.exceptions import ConfigurationError +from botocore.exceptions import ProfileNotFound +from botocore.utils import parse_timestamp def register_timestamp_format(event_handlers): diff --git a/awscli/customizations/toplevelbool.py b/awscli/customizations/toplevelbool.py index 8014d2dd98d5..826d6c5ccb04 100644 --- a/awscli/customizations/toplevelbool.py +++ b/awscli/customizations/toplevelbool.py @@ -16,15 +16,14 @@ """ + import logging from functools import partial - -from awscli.argprocess import detect_shape_structure from awscli import arguments -from awscli.customizations.utils import validate_mutually_exclusive_handler +from awscli.argprocess import detect_shape_structure from awscli.customizations.exceptions import ParamValidationError - +from awscli.customizations.utils import validate_mutually_exclusive_handler LOG = logging.getLogger(__name__) # This sentinel object is used to distinguish when @@ -34,17 +33,20 @@ def register_bool_params(event_handler): - event_handler.register('building-argument-table.ec2.*', - partial(pull_up_bool, - event_handler=event_handler)) + event_handler.register( + 'building-argument-table.ec2.*', + partial(pull_up_bool, event_handler=event_handler), + ) def _qualifies_for_simplification(arg_model): if detect_shape_structure(arg_model) == 'structure(scalar)': members = arg_model.members - if (len(members) == 1 and - list(members.keys())[0] == 'Value' and - list(members.values())[0].type_name == 'boolean'): + if ( + len(members) == 1 + and list(members.keys())[0] == 'Value' + and list(members.values())[0].type_name == 'boolean' + ): return True return False @@ -56,8 +58,8 @@ def pull_up_bool(argument_table, event_handler, **kwargs): boolean_pairs = [] event_handler.register( 'operation-args-parsed.ec2.*', - partial(validate_boolean_mutex_groups, - boolean_pairs=boolean_pairs)) + partial(validate_boolean_mutex_groups, boolean_pairs=boolean_pairs), + ) for value in list(argument_table.values()): if hasattr(value, 'argument_model'): arg_model = value.argument_model @@ -66,18 +68,25 @@ def pull_up_bool(argument_table, event_handler, **kwargs): # one that supports --option and --option # and another arg of --no-option. new_arg = PositiveBooleanArgument( - value.name, arg_model, value._operation_model, + value.name, + arg_model, + value._operation_model, value._event_emitter, group_name=value.name, - serialized_name=value._serialized_name) + serialized_name=value._serialized_name, + ) argument_table[value.name] = new_arg negative_name = 'no-%s' % value.name negative_arg = NegativeBooleanParameter( - negative_name, arg_model, value._operation_model, + negative_name, + arg_model, + value._operation_model, value._event_emitter, - action='store_true', dest='no_%s' % new_arg.py_name, + action='store_true', + dest='no_%s' % new_arg.py_name, group_name=value.name, - serialized_name=value._serialized_name) + serialized_name=value._serialized_name, + ) argument_table[negative_name] = negative_arg # If we've pulled up a structure(scalar) arg # into a pair of top level boolean args, we need @@ -90,19 +99,33 @@ def pull_up_bool(argument_table, event_handler, **kwargs): def validate_boolean_mutex_groups(boolean_pairs, parsed_args, **kwargs): # Validate we didn't pass in an --option and a --no-option. for positive, negative in boolean_pairs: - if getattr(parsed_args, positive.py_name) is not _NOT_SPECIFIED and \ - getattr(parsed_args, negative.py_name) is not _NOT_SPECIFIED: + if ( + getattr(parsed_args, positive.py_name) is not _NOT_SPECIFIED + and getattr(parsed_args, negative.py_name) is not _NOT_SPECIFIED + ): raise ParamValidationError( 'Cannot specify both the "%s" option and ' - 'the "%s" option.' % (positive.cli_name, negative.cli_name)) + 'the "%s" option.' % (positive.cli_name, negative.cli_name) + ) class PositiveBooleanArgument(arguments.CLIArgument): - def __init__(self, name, argument_model, operation_model, - event_emitter, serialized_name, group_name): + def __init__( + self, + name, + argument_model, + operation_model, + event_emitter, + serialized_name, + group_name, + ): super(PositiveBooleanArgument, self).__init__( - name, argument_model, operation_model, event_emitter, - serialized_name=serialized_name) + name, + argument_model, + operation_model, + event_emitter, + serialized_name=serialized_name, + ) self._group_name = group_name @property @@ -113,11 +136,13 @@ def add_to_parser(self, parser): # We need to support three forms: # --option-name # --option-name Value=(true|false) - parser.add_argument(self.cli_name, - help=self.documentation, - action='store', - default=_NOT_SPECIFIED, - nargs='?') + parser.add_argument( + self.cli_name, + help=self.documentation, + action='store', + default=_NOT_SPECIFIED, + nargs='?', + ) def add_to_params(self, parameters, value): if value is _NOT_SPECIFIED: @@ -131,17 +156,29 @@ def add_to_params(self, parameters, value): parameters[self._serialized_name] = {'Value': True} else: # Otherwise the arg was specified with a value. - parameters[self._serialized_name] = self._unpack_argument( - value) + parameters[self._serialized_name] = self._unpack_argument(value) class NegativeBooleanParameter(arguments.BooleanArgument): - def __init__(self, name, argument_model, operation_model, - event_emitter, serialized_name, action='store_true', - dest=None, group_name=None): + def __init__( + self, + name, + argument_model, + operation_model, + event_emitter, + serialized_name, + action='store_true', + dest=None, + group_name=None, + ): super(NegativeBooleanParameter, self).__init__( - name, argument_model, operation_model, event_emitter, - default=_NOT_SPECIFIED, serialized_name=serialized_name) + name, + argument_model, + operation_model, + event_emitter, + default=_NOT_SPECIFIED, + serialized_name=serialized_name, + ) self._group_name = group_name def add_to_params(self, parameters, value): diff --git a/awscli/customizations/translate.py b/awscli/customizations/translate.py index 38add1564dc1..f398dd94183b 100644 --- a/awscli/customizations/translate.py +++ b/awscli/customizations/translate.py @@ -12,10 +12,10 @@ # language governing permissions and limitations under the License. import copy -from awscli.arguments import CustomArgument, CLIArgument +from awscli.arguments import CLIArgument, CustomArgument from awscli.customizations.binaryhoist import ( - BinaryBlobArgumentHoister, ArgumentParameters, + BinaryBlobArgumentHoister, ) FILE_DOCSTRING = ( @@ -41,22 +41,24 @@ def register_translate_import_terminology(cli): - cli.register( - "building-argument-table.translate.import-terminology", - BinaryBlobArgumentHoister( - new_argument=ArgumentParameters( - name="data-file", - help_text=FILE_DOCSTRING, - required=True, - ), - original_argument=ArgumentParameters( - name="terminology-data", - member="File", - required=False, + ( + cli.register( + "building-argument-table.translate.import-terminology", + BinaryBlobArgumentHoister( + new_argument=ArgumentParameters( + name="data-file", + help_text=FILE_DOCSTRING, + required=True, + ), + original_argument=ArgumentParameters( + name="terminology-data", + member="File", + required=False, + ), + error_if_original_used=FILE_ERRORSTRING, ), - error_if_original_used=FILE_ERRORSTRING, ), - ), + ) cli.register( "building-argument-table.translate.translate-document", diff --git a/awscli/customizations/utils.py b/awscli/customizations/utils.py index 2c281cf0e53e..4bccb54207f8 100644 --- a/awscli/customizations/utils.py +++ b/awscli/customizations/utils.py @@ -14,20 +14,18 @@ Utility functions to make it easier to work with customizations. """ + import copy import re import sys import xml -from botocore.exceptions import ClientError from awscli.customizations.exceptions import ParamValidationError - +from botocore.exceptions import ClientError _SENTENCE_DELIMETERS_REGEX = re.compile(r'[.:]+') -_LINE_BREAK_CHARS = [ - '\n', - '\u2028' -] +_LINE_BREAK_CHARS = ['\n', '\u2028'] + def rename_argument(argument_table, existing_name, new_name): current = argument_table[existing_name] @@ -93,6 +91,7 @@ def alias_command(command_table, existing_name, new_name): def validate_mutually_exclusive_handler(*groups): def _handler(parsed_args, **kwargs): return validate_mutually_exclusive(parsed_args, *groups) + return _handler @@ -140,8 +139,9 @@ def s3_bucket_exists(s3_client, bucket_name): return bucket_exists -def create_client_from_parsed_globals(session, service_name, parsed_globals, - overrides=None): +def create_client_from_parsed_globals( + session, service_name, parsed_globals, overrides=None +): """Creates a service client, taking parsed_globals into account Any values specified in overrides will override the returned dict. Note @@ -197,8 +197,9 @@ def uni_print(statement, out_file=None): # ``sys.stdout.encoding`` is ``None``. if new_encoding is None: new_encoding = 'ascii' - new_statement = statement.encode( - new_encoding, 'replace').decode(new_encoding) + new_statement = statement.encode(new_encoding, 'replace').decode( + new_encoding + ) out_file.write(new_statement) out_file.flush() diff --git a/awscli/customizations/waiters.py b/awscli/customizations/waiters.py index dd90bd8aa0f8..468dfbbbfdcc 100644 --- a/awscli/customizations/waiters.py +++ b/awscli/customizations/waiters.py @@ -10,13 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +from awscli.clidriver import ServiceOperation +from awscli.customizations.commands import ( + BasicCommand, + BasicDocHandler, + BasicHelp, +) from botocore import xform_name from botocore.exceptions import DataNotFoundError -from awscli.clidriver import ServiceOperation -from awscli.customizations.commands import BasicCommand, BasicHelp, \ - BasicDocHandler - def register_add_waiters(cli): cli.register('building-command-table', add_waiters) @@ -29,15 +31,17 @@ def add_waiters(command_table, session, command_object, **kwargs): service_model = getattr(command_object, 'service_model', None) if service_model is not None: # Get a client out of the service object. - waiter_model = get_waiter_model_from_service_model(session, - service_model) + waiter_model = get_waiter_model_from_service_model( + session, service_model + ) if waiter_model is None: return waiter_names = waiter_model.waiter_names # If there are waiters make a wait command. if waiter_names: command_table['wait'] = WaitCommand( - session, waiter_model, service_model) + session, waiter_model, service_model + ) def get_waiter_model_from_service_model(session, service_model): @@ -50,9 +54,11 @@ def get_waiter_model_from_service_model(session, service_model): class WaitCommand(BasicCommand): NAME = 'wait' - DESCRIPTION = ('Wait until a particular condition is satisfied. Each ' - 'subcommand polls an API until the listed requirement ' - 'is met.') + DESCRIPTION = ( + 'Wait until a particular condition is satisfied. Each ' + 'subcommand polls an API until the listed requirement ' + 'is met.' + ) def __init__(self, session, waiter_model, service_model): self._model = waiter_model @@ -60,7 +66,7 @@ def __init__(self, session, waiter_model, service_model): self.waiter_cmd_builder = WaiterStateCommandBuilder( session=session, model=self._model, - service_model=self._service_model + service_model=self._service_model, ) super(WaitCommand, self).__init__(session) @@ -76,10 +82,13 @@ def _build_subcommand_table(self): return subcommand_table def create_help_command(self): - return BasicHelp(self._session, self, - command_table=self.subcommand_table, - arg_table=self.arg_table, - event_handler_class=WaiterCommandDocHandler) + return BasicHelp( + self._session, + self, + command_table=self.subcommand_table, + arg_table=self.arg_table, + event_handler_class=WaiterCommandDocHandler, + ) class WaiterStateCommandBuilder(object): @@ -97,8 +106,9 @@ def build_all_waiter_state_cmds(self, subcommand_table): waiter_names = self._model.waiter_names for waiter_name in waiter_names: waiter_cli_name = xform_name(waiter_name, '-') - subcommand_table[waiter_cli_name] = \ - self._build_waiter_state_cmd(waiter_name) + subcommand_table[waiter_cli_name] = self._build_waiter_state_cmd( + waiter_name + ) def _build_waiter_state_cmd(self, waiter_name): # Get the waiter @@ -117,7 +127,8 @@ def _build_waiter_state_cmd(self, waiter_name): operation_model = self._service_model.operation_model(operation_name) waiter_state_command = WaiterStateCommand( - name=waiter_cli_name, parent_name='wait', + name=waiter_cli_name, + parent_name='wait', operation_caller=WaiterCaller(self._session, waiter_name), session=self._session, operation_model=operation_model, @@ -133,11 +144,11 @@ def _build_waiter_state_cmd(self, waiter_name): class WaiterStateDocBuilder(object): SUCCESS_DESCRIPTIONS = { - 'error': u'%s is thrown ', - 'path': u'%s ', - 'pathAll': u'%s for all elements ', - 'pathAny': u'%s for any element ', - 'status': u'%s response is received ' + 'error': '%s is thrown ', + 'path': '%s ', + 'pathAll': '%s for all elements ', + 'pathAny': '%s for any element ', + 'status': '%s response is received ', } def __init__(self, waiter_config): @@ -149,7 +160,7 @@ def build_waiter_state_description(self): # description is provided, use a heuristic to generate a description # for the waiter. if not description: - description = u'Wait until ' + description = 'Wait until ' # Look at all of the acceptors and find the success state # acceptor. for acceptor in self._waiter_config.acceptors: @@ -159,9 +170,11 @@ def build_waiter_state_description(self): break # Include what operation is being used. description += self._build_operation_description( - self._waiter_config.operation) + self._waiter_config.operation + ) description += self._build_polling_description( - self._waiter_config.delay, self._waiter_config.max_attempts) + self._waiter_config.delay, self._waiter_config.max_attempts + ) return description def _build_success_description(self, acceptor): @@ -172,8 +185,9 @@ def _build_success_description(self, acceptor): # If success is based off of the state of a resource include the # description about what resource is looked at. if matcher in ['path', 'pathAny', 'pathAll']: - resource_description = u'JMESPath query %s returns ' % \ - acceptor.argument + resource_description = ( + 'JMESPath query %s returns ' % acceptor.argument + ) # Prepend the resource description to the template description success_description = resource_description + success_description # Complete the description by filling in the expected success state. @@ -182,14 +196,14 @@ def _build_success_description(self, acceptor): def _build_operation_description(self, operation): operation_name = xform_name(operation).replace('_', '-') - return u'when polling with ``%s``.' % operation_name + return 'when polling with ``%s``.' % operation_name def _build_polling_description(self, delay, max_attempts): description = ( ' It will poll every %s seconds until a successful state ' 'has been reached. This will exit with a return code of 255 ' - 'after %s failed checks.' - % (delay, max_attempts)) + 'after %s failed checks.' % (delay, max_attempts) + ) return description @@ -200,9 +214,11 @@ def __init__(self, session, waiter_name): def invoke(self, service_name, operation_name, parameters, parsed_globals): client = self._session.create_client( - service_name, region_name=parsed_globals.region, + service_name, + region_name=parsed_globals.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) waiter = client.get_waiter(xform_name(self._waiter_name)) waiter.wait(**parameters) return 0 diff --git a/awscli/customizations/wizard/app.py b/awscli/customizations/wizard/app.py index f62b7f0f93b1..da4f14eafa55 100644 --- a/awscli/customizations/wizard/app.py +++ b/awscli/customizations/wizard/app.py @@ -18,11 +18,13 @@ from prompt_toolkit.application import Application from awscli.customizations.wizard import core -from awscli.customizations.wizard.ui.style import get_default_style -from awscli.customizations.wizard.ui.keybindings import get_default_keybindings from awscli.customizations.wizard.exceptions import ( - UnexpectedWizardException, UnableToRunWizardError, InvalidChoiceException + InvalidChoiceException, + UnableToRunWizardError, + UnexpectedWizardException, ) +from awscli.customizations.wizard.ui.keybindings import get_default_keybindings +from awscli.customizations.wizard.ui.style import get_default_style from awscli.utils import json_encoder @@ -39,9 +41,19 @@ def run(self, loaded): class WizardApp(Application): - def __init__(self, layout, values, traverser, executor, style=None, - key_bindings=None, full_screen=True, output=None, - app_input=None, file_io=None): + def __init__( + self, + layout, + values, + traverser, + executor, + style=None, + key_bindings=None, + full_screen=True, + output=None, + app_input=None, + file_io=None, + ): self.values = values self.traverser = traverser self.executor = executor @@ -56,8 +68,12 @@ def __init__(self, layout, values, traverser, executor, style=None, file_io = FileIO() self.file_io = file_io super().__init__( - layout=layout, style=style, key_bindings=key_bindings, - full_screen=full_screen, output=output, input=app_input, + layout=layout, + style=style, + key_bindings=key_bindings, + full_screen=full_screen, + output=output, + input=app_input, ) def run(self, pre_run=None, **kwargs): @@ -71,9 +87,7 @@ def run(self, pre_run=None, **kwargs): loop.close() def _handle_exception(self, loop, context): - self.exit( - exception=UnexpectedWizardException(context['exception']) - ) + self.exit(exception=UnexpectedWizardException(context['exception'])) class WizardTraverser: @@ -106,18 +120,19 @@ def get_current_prompt_choices(self): def current_prompt_has_details(self): return 'details' in self._prompt_definitions.get( - self._current_prompt, {}) + self._current_prompt, {} + ) def submit_prompt_answer(self, answer): definition = self._prompt_definitions[self._current_prompt] if 'choices' in definition: answer = self._convert_display_value_to_actual_value( - self._get_choices(self._current_prompt), - answer + self._get_choices(self._current_prompt), answer ) if 'datatype' in definition: answer = core.DataTypeConverter.convert( - definition['datatype'], answer) + definition['datatype'], answer + ) self._values[self._current_prompt] = answer @@ -153,8 +168,11 @@ def is_prompt_visible(self, value_name): return self._prompt_meets_condition(value_name) def is_prompt_details_visible_by_default(self, value_name): - return self._prompt_definitions[value_name].get( - 'details', {}).get('visible', False) + return ( + self._prompt_definitions[value_name] + .get('details', {}) + .get('visible', False) + ) def has_visited_section(self, section_name): return section_name in self._visited_sections @@ -233,10 +251,7 @@ def _get_normalized_choice_values(self, choices): for choice in choices: if isinstance(choice, str): normalized_choices.append( - { - 'display': choice, - 'actual_value': choice - } + {'display': choice, 'actual_value': choice} ) else: normalized_choices.append(choice) @@ -255,7 +270,7 @@ def _convert_display_value_to_actual_value(self, choices, display_value): def _get_next_prompt(self): prompts = list(self._prompt_definitions) current_pos = prompts.index(self._current_prompt) - for prompt in prompts[current_pos+1:]: + for prompt in prompts[current_pos + 1 :]: if self._prompt_meets_condition(prompt): return prompt return self.DONE @@ -271,12 +286,14 @@ def _prompt_meets_condition(self, value_name): def get_output(self): template_step = core.TemplateStep() return template_step.run_step( - self._definition[self.OUTPUT], self._values) + self._definition[self.OUTPUT], self._values + ) class WizardValues(MutableMapping): - def __init__(self, definition, value_retrieval_steps=None, - exception_handler=None): + def __init__( + self, definition, value_retrieval_steps=None, exception_handler=None + ): self._definition = definition if value_retrieval_steps is None: value_retrieval_steps = {} diff --git a/awscli/customizations/wizard/commands.py b/awscli/customizations/wizard/commands.py index 3c1d7f9f3cba..1ab72137aaa2 100644 --- a/awscli/customizations/wizard/commands.py +++ b/awscli/customizations/wizard/commands.py @@ -10,10 +10,10 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.wizard import devcommands, factory -from awscli.customizations.wizard.loader import WizardLoader from awscli.customizations.commands import BasicCommand, BasicHelp from awscli.customizations.exceptions import ParamValidationError +from awscli.customizations.wizard import devcommands, factory +from awscli.customizations.wizard.loader import WizardLoader def register_wizard_commands(event_handlers): @@ -25,8 +25,9 @@ def register_wizard_commands(event_handlers): def _register_wizards_for_commands(commands, event_handlers): for command in commands: - event_handlers.register('building-command-table.%s' % command, - _add_wizard_command) + event_handlers.register( + 'building-command-table.%s' % command, _add_wizard_command + ) def _add_wizard_command(session, command_object, command_table, **kwargs): @@ -36,7 +37,7 @@ def _add_wizard_command(session, command_object, command_table, **kwargs): session=session, loader=WizardLoader(), parent_command=command_object.name, - runner={'0.1': v1_runner, '0.2': v2_runner} + runner={'0.1': v1_runner, '0.2': v2_runner}, ) command_table['wizard'] = cmd @@ -47,8 +48,9 @@ class TopLevelWizardCommand(BasicCommand): 'Interactive command for creating and configuring AWS resources.' ) - def __init__(self, session, loader, parent_command, runner, - wizard_name='_main'): + def __init__( + self, session, loader, parent_command, runner, wizard_name='_main' + ): super(TopLevelWizardCommand, self).__init__(session) self._session = session self._loader = loader @@ -58,12 +60,17 @@ def __init__(self, session, loader, parent_command, runner, def _build_subcommand_table(self): subcommand_table = super( - TopLevelWizardCommand, self)._build_subcommand_table() + TopLevelWizardCommand, self + )._build_subcommand_table() wizards = self._get_available_wizards() for name in wizards: - cmd = SingleWizardCommand(self._session, self._loader, - self._parent_command, self._runner, - wizard_name=name) + cmd = SingleWizardCommand( + self._session, + self._loader, + self._parent_command, + self._runner, + wizard_name=name, + ) subcommand_table[name] = cmd self._add_lineage(subcommand_table) return subcommand_table @@ -80,12 +87,14 @@ def _run_main(self, parsed_args, parsed_globals): self._raise_usage_error() def _wizard_exists(self): - return self._loader.wizard_exists(self._parent_command, - self._wizard_name) + return self._loader.wizard_exists( + self._parent_command, self._wizard_name + ) def _run_wizard(self): loaded = self._loader.load_wizard( - self._parent_command, self._wizard_name) + self._parent_command, self._wizard_name + ) version = loaded.get('version') if version in self._runner: self._runner[version].run(loaded) @@ -95,15 +104,19 @@ def _run_wizard(self): ) def create_help_command(self): - return BasicHelp(self._session, self, - command_table=self.subcommand_table, - arg_table=self.arg_table) + return BasicHelp( + self._session, + self, + command_table=self.subcommand_table, + arg_table=self.arg_table, + ) class SingleWizardCommand(TopLevelWizardCommand): def __init__(self, session, loader, parent_command, runner, wizard_name): super(SingleWizardCommand, self).__init__( - session, loader, parent_command, runner, wizard_name) + session, loader, parent_command, runner, wizard_name + ) self._session = session self._loader = loader self._runner = runner @@ -119,15 +132,19 @@ def _run_main(self, parsed_args, parsed_globals): def create_help_command(self): loaded = self._loader.load_wizard( - self._parent_command, self._wizard_name, + self._parent_command, + self._wizard_name, + ) + return WizardHelpCommand( + self._session, self, self.subcommand_table, self.arg_table, loaded ) - return WizardHelpCommand(self._session, self, self.subcommand_table, - self.arg_table, loaded) class WizardHelpCommand(BasicHelp): - def __init__(self, session, command_object, command_table, arg_table, - loaded_wizard): - super(WizardHelpCommand, self).__init__(session, command_object, - command_table, arg_table) + def __init__( + self, session, command_object, command_table, arg_table, loaded_wizard + ): + super(WizardHelpCommand, self).__init__( + session, command_object, command_table, arg_table + ) self._description = loaded_wizard.get('description', '') diff --git a/awscli/customizations/wizard/core.py b/awscli/customizations/wizard/core.py index 97f1185c026f..bb2290e0009f 100644 --- a/awscli/customizations/wizard/core.py +++ b/awscli/customizations/wizard/core.py @@ -11,19 +11,19 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Core planner and executor for wizards.""" -import re + import json import os +import re from functools import partial -from botocore import xform_name import jmespath -from awscli.utils import json_encoder from awscli.customizations.wizard.exceptions import ( - InvalidDataTypeConversionException + InvalidDataTypeConversionException, ) - +from awscli.utils import json_encoder +from botocore import xform_name DONE_SECTION_NAME = '__DONE__' OUTPUT_SECTION_NAME = '__OUTPUT__' @@ -118,7 +118,6 @@ def run_step(self, step_definition, parameters): class StaticStep(BaseStep): - NAME = 'static' def run_step(self, step_definition, parameters): @@ -126,7 +125,6 @@ def run_step(self, step_definition, parameters): class PromptStep(BaseStep): - NAME = 'prompt' def __init__(self, prompter): @@ -135,13 +133,14 @@ def __init__(self, prompter): 'int': int, 'float': float, 'str': str, - 'bool': lambda x: True if x.lower() == 'true' else False + 'bool': lambda x: True if x.lower() == 'true' else False, } def run_step(self, step_definition, parameters): choices = self._get_choices(step_definition, parameters) - response = self._prompter.prompt(step_definition['description'], - choices=choices) + response = self._prompter.prompt( + step_definition['description'], choices=choices + ) return self._convert_data_type_if_needed(response, step_definition) def _get_choices(self, step_definition, parameters): @@ -180,13 +179,13 @@ def run_step(self, step_definition, parameters): # They want the "No" choice to be the starting value so we # need to reverse the choices. choices[:] = choices[::-1] - response = self._prompter.prompt(step_definition['question'], - choices=choices) + response = self._prompter.prompt( + step_definition['question'], choices=choices + ) return response class FilePromptStep(BaseStep): - NAME = 'fileprompt' def __init__(self, prompter): @@ -198,13 +197,12 @@ def run_step(self, step_definition, parameters): class TemplateStep(BaseStep): - NAME = 'template' CONDITION_PATTERN = re.compile( r'(?:^[ \t]*)?{%\s*if\s+(?P.+?)\s+%}(?:\s*[$|\n])?' r'(?P.+?)[ \t]*{%\s*endif\s*%}[$|\n]?', - re.DOTALL | re.MULTILINE | re.IGNORECASE + re.DOTALL | re.MULTILINE | re.IGNORECASE, ) _SUPPORTED_CONDITION_OPERATORS = [ '==', @@ -247,7 +245,6 @@ def run_step(self, step_definition, parameters): class APICallStep(BaseStep): - NAME = 'apicall' def __init__(self, api_invoker): @@ -256,18 +253,18 @@ def __init__(self, api_invoker): def run_step(self, step_definition, parameters): service, op_name = step_definition['operation'].split('.', 1) return self._api_invoker.invoke( - service=service, operation=op_name, + service=service, + operation=op_name, api_params=step_definition['params'], plan_variables=parameters, optional_api_params=step_definition.get('optional_params'), query=step_definition.get('query'), cache=step_definition.get('cache', False), - paginate=step_definition.get('paginate', False) + paginate=step_definition.get('paginate', False), ) class SharedConfigStep(BaseStep): - NAME = 'sharedconfig' def __init__(self, config_api): @@ -289,7 +286,8 @@ class LoadDataStep(BaseStep): def run_step(self, step_definition, parameters): var_resolver = VariableResolver() value = var_resolver.resolve_variables( - parameters, step_definition['value'], + parameters, + step_definition['value'], ) load_type = step_definition['load_type'] if load_type == 'json': @@ -304,7 +302,8 @@ class DumpDataStep(BaseStep): def run_step(self, step_definition, parameters): var_resolver = VariableResolver() value = var_resolver.resolve_variables( - parameters, step_definition['value'], + parameters, + step_definition['value'], ) dump_type = step_definition['dump_type'] if dump_type == 'json': @@ -314,7 +313,6 @@ def run_step(self, step_definition, parameters): class VariableResolver(object): - _VAR_MATCH = re.compile(r'^{(.*?)}$') def resolve_variables(self, variables, params): @@ -372,37 +370,49 @@ class APIInvoker(object): between the two steps. """ + def __init__(self, session): self._session = session self._response_cache = {} - def invoke(self, service, operation, api_params, plan_variables, - optional_api_params=None, query=None, cache=False, - paginate=False): + def invoke( + self, + service, + operation, + api_params, + plan_variables, + optional_api_params=None, + query=None, + cache=False, + paginate=False, + ): # TODO: All of the params that come from prompting the user # are strings. We need a way to convert values to their # appropriate types. We can either add typing into the wizard # spec or we possibly auto-convert based on the service # model (or both). resolved_params = self._resolve_params( - api_params, optional_api_params, plan_variables) + api_params, optional_api_params, plan_variables + ) if cache: response = self._get_cached_api_call( - service, operation, resolved_params, paginate) + service, operation, resolved_params, paginate + ) else: response = self._make_api_call( - service, operation, resolved_params, paginate) + service, operation, resolved_params, paginate + ) if query is not None: response = jmespath.search(query, response) return response def _resolve_params(self, api_params, optional_params, plan_vars): resolver = VariableResolver() - api_params_resolved = resolver.resolve_variables( - plan_vars, api_params) + api_params_resolved = resolver.resolve_variables(plan_vars, api_params) if optional_params is not None: optional_params_resolved = resolver.resolve_variables( - plan_vars, optional_params) + plan_vars, optional_params + ) for key, value in optional_params_resolved.items(): if key not in api_params_resolved and value is not None: api_params_resolved[key] = value @@ -417,14 +427,14 @@ def _make_api_call(self, service, operation, resolved_params, paginate): else: return getattr(client, client_method_name)(**resolved_params) - def _get_cached_api_call(self, service, operation, resolved_params, - paginate): - cache_key = self._get_cache_key( - service, operation, resolved_params - ) + def _get_cached_api_call( + self, service, operation, resolved_params, paginate + ): + cache_key = self._get_cache_key(service, operation, resolved_params) if cache_key not in self._response_cache: response = self._make_api_call( - service, operation, resolved_params, paginate) + service, operation, resolved_params, paginate + ) self._response_cache[cache_key] = response return self._response_cache[cache_key] @@ -432,12 +442,11 @@ def _get_cache_key(self, service_name, operation, resolved_params): return ( service_name, operation, - json.dumps(resolved_params, default=json_encoder) + json.dumps(resolved_params, default=json_encoder), ) class Executor(object): - def __init__(self, step_handlers): self._step_handlers = step_handlers @@ -466,8 +475,7 @@ def evaluate(self, condition, parameters): if not isinstance(condition, list): condition = [condition] for single in condition: - statuses.append(self._check_single_condition( - single, parameters)) + statuses.append(self._check_single_condition(single, parameters)) return all(statuses) def _check_single_condition(self, single, parameters): @@ -483,7 +491,7 @@ class DataTypeConverter: 'int': int, 'float': float, 'str': str, - 'bool': lambda x: x.lower() == 'true' + 'bool': lambda x: x.lower() == 'true', } @classmethod @@ -495,7 +503,6 @@ def convert(cls, datatype, value): class ExecutorStep(object): - # Subclasses must implement this to specify what name to use # for the `type` in a wizard definition. NAME = '' @@ -505,7 +512,6 @@ def run_step(self, step_definition, parameters): class APICallExecutorStep(ExecutorStep): - NAME = 'apicall' def __init__(self, api_invoker): @@ -514,7 +520,8 @@ def __init__(self, api_invoker): def run_step(self, step_definition, parameters): service, op_name = step_definition['operation'].split('.', 1) response = self._api_invoker.invoke( - service=service, operation=op_name, + service=service, + operation=op_name, api_params=step_definition['params'], plan_variables=parameters, optional_api_params=step_definition.get('optional_params'), @@ -525,7 +532,6 @@ def run_step(self, step_definition, parameters): class SharedConfigExecutorStep(ExecutorStep): - NAME = 'sharedconfig' def __init__(self, config_api): @@ -535,8 +541,9 @@ def run_step(self, step_definition, parameters): config_params = {} profile = None if 'profile' in step_definition: - profile = self._resolve_params(step_definition['profile'], - parameters) + profile = self._resolve_params( + step_definition['profile'], parameters + ) config_params = self._resolve_params( step_definition['params'], parameters ) @@ -555,6 +562,7 @@ class SharedConfigAPI(object): This allows similar logic to be shared by the planner and executor. """ + def __init__(self, session, config_writer): self._session = session self._config_writer = config_writer @@ -575,24 +583,24 @@ def set_values(self, values, profile=None): config_params['__section__'] = section config_params.update(values) config_filename = os.path.expanduser( - self._session.get_config_variable('config_file')) + self._session.get_config_variable('config_file') + ) self._config_writer.update_config(config_params, config_filename) class DefineVariableStep(ExecutorStep): - NAME = 'define-variable' def run_step(self, step_definition, parameters): value = step_definition['value'] resolved_value = VariableResolver().resolve_variables( - parameters, value) + parameters, value + ) key = step_definition['varname'] parameters[key] = resolved_value class MergeDictStep(ExecutorStep): - NAME = 'merge-dict' def run_step(self, step_definition, parameters): @@ -600,7 +608,8 @@ def run_step(self, step_definition, parameters): result = {} for overlay in step_definition['overlays']: resolved_overlay = var_resolver.resolve_variables( - parameters, overlay, + parameters, + overlay, ) result = self._deep_merge(result, resolved_overlay) parameters[step_definition['output_var']] = result diff --git a/awscli/customizations/wizard/devcommands.py b/awscli/customizations/wizard/devcommands.py index 27e6b20dd628..a3e4a64867d0 100644 --- a/awscli/customizations/wizard/devcommands.py +++ b/awscli/customizations/wizard/devcommands.py @@ -11,13 +11,15 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from ruamel.yaml import YAML + from awscli.customizations.commands import BasicCommand from awscli.customizations.wizard.factory import create_wizard_app def register_dev_commands(event_handlers): - event_handlers.register('building-command-table.cli-dev', - WizardDev.add_command) + event_handlers.register( + 'building-command-table.cli-dev', WizardDev.add_command + ) def create_default_wizard_dev_runner(session): @@ -57,8 +59,10 @@ class WizardDev(BasicCommand): 'future versions.\n' ) ARG_TABLE = [ - {'name': 'run-wizard', - 'help_text': 'Run a wizard given a wizard file.'} + { + 'name': 'run-wizard', + 'help_text': 'Run a wizard given a wizard file.', + } ] def __init__(self, session, dev_runner=None): diff --git a/awscli/customizations/wizard/exceptions.py b/awscli/customizations/wizard/exceptions.py index c27ff63e98be..b4bfc809b1d8 100644 --- a/awscli/customizations/wizard/exceptions.py +++ b/awscli/customizations/wizard/exceptions.py @@ -45,6 +45,6 @@ def __init__(self, original_exception): message = self.MSG_FORMAT.format( original_tb=''.join(format_tb(original_exception.__traceback__)), original_exception_cls=self.original_exception.__class__.__name__, - original_exception=self.original_exception + original_exception=self.original_exception, ) super().__init__(message) diff --git a/awscli/customizations/wizard/factory.py b/awscli/customizations/wizard/factory.py index 612e9fa483f1..ec8222f1f284 100644 --- a/awscli/customizations/wizard/factory.py +++ b/awscli/customizations/wizard/factory.py @@ -10,21 +10,26 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.wizard.ui.layout import WizardLayoutFactory +from awscli.customizations.configure.writer import ConfigFileWriter from awscli.customizations.wizard import core, ui from awscli.customizations.wizard.app import ( - WizardAppRunner, WizardApp, WizardValues, WizardTraverser, + WizardApp, + WizardAppRunner, + WizardTraverser, + WizardValues, ) -from awscli.customizations.configure.writer import ConfigFileWriter +from awscli.customizations.wizard.ui.layout import WizardLayoutFactory def create_default_executor(api_invoker, shared_config): return core.Executor( step_handlers={ core.APICallExecutorStep.NAME: core.APICallExecutorStep( - api_invoker), + api_invoker + ), core.SharedConfigExecutorStep.NAME: core.SharedConfigExecutorStep( - shared_config), + shared_config + ), core.DefineVariableStep.NAME: core.DefineVariableStep(), core.MergeDictStep.NAME: core.MergeDictStep(), core.LoadDataExecutorStep.NAME: core.LoadDataExecutorStep(), @@ -35,19 +40,22 @@ def create_default_executor(api_invoker, shared_config): def create_default_wizard_v1_runner(session): api_invoker = core.APIInvoker(session=session) - shared_config = core.SharedConfigAPI(session=session, - config_writer=ConfigFileWriter()) + shared_config = core.SharedConfigAPI( + session=session, config_writer=ConfigFileWriter() + ) planner = core.Planner( step_handlers={ core.StaticStep.NAME: core.StaticStep(), core.PromptStep.NAME: core.PromptStep(ui.UIPrompter()), core.YesNoPrompt.NAME: core.YesNoPrompt(ui.UIPrompter()), core.FilePromptStep.NAME: core.FilePromptStep( - ui.UIFilePrompter(ui.FileCompleter())), + ui.UIFilePrompter(ui.FileCompleter()) + ), core.TemplateStep.NAME: core.TemplateStep(), core.APICallStep.NAME: core.APICallStep(api_invoker=api_invoker), core.SharedConfigStep.NAME: core.SharedConfigStep( - config_api=shared_config), + config_api=shared_config + ), } ) executor = create_default_executor(api_invoker, shared_config) @@ -61,24 +69,30 @@ def create_default_wizard_v2_runner(session): def create_wizard_app(definition, session, output=None, app_input=None): api_invoker = core.APIInvoker(session=session) - shared_config = core.SharedConfigAPI(session=session, - config_writer=ConfigFileWriter()) + shared_config = core.SharedConfigAPI( + session=session, config_writer=ConfigFileWriter() + ) layout = WizardLayoutFactory().create_wizard_layout(definition) values = WizardValues( definition, value_retrieval_steps={ core.APICallStep.NAME: core.APICallStep(api_invoker=api_invoker), core.SharedConfigStep.NAME: core.SharedConfigStep( - config_api=shared_config), + config_api=shared_config + ), core.TemplateStep.NAME: core.TemplateStep(), core.LoadDataStep.NAME: core.LoadDataStep(), core.DumpDataStep.NAME: core.DumpDataStep(), }, - exception_handler=layout.error_bar.display_error + exception_handler=layout.error_bar.display_error, ) executor = create_default_executor(api_invoker, shared_config) traverser = WizardTraverser(definition, values, executor) return WizardApp( - layout=layout, values=values, traverser=traverser, - executor=executor, output=output, app_input=app_input + layout=layout, + values=values, + traverser=traverser, + executor=executor, + output=output, + app_input=app_input, ) diff --git a/awscli/customizations/wizard/loader.py b/awscli/customizations/wizard/loader.py index 8a4b977bfd6b..f58b0c33dd58 100644 --- a/awscli/customizations/wizard/loader.py +++ b/awscli/customizations/wizard/loader.py @@ -38,12 +38,14 @@ """ + import os -from ruamel.yaml import YAML +from ruamel.yaml import YAML WIZARD_SPEC_DIR = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'wizards', + os.path.dirname(os.path.abspath(__file__)), + 'wizards', ) @@ -60,8 +62,7 @@ def __init__(self, spec_dir=None): self._yaml = YAML(typ='rt') def list_commands_with_wizards(self): - """Returns a list of commands with at least one wizard. - """ + """Returns a list of commands with at least one wizard.""" return os.listdir(self._spec_dir) def list_available_wizards(self, command_name): @@ -81,21 +82,24 @@ def load_wizard(self, command_name, wizard_name): of the file. """ - filename = os.path.join(self._spec_dir, command_name, - wizard_name + '.yml') + filename = os.path.join( + self._spec_dir, command_name, wizard_name + '.yml' + ) try: with open(filename) as f: return self._load_yaml(f.read()) except (OSError, IOError): - raise WizardNotExistError("Wizard does not exist for command " - "'%s', name: '%s'" % (command_name, - wizard_name)) + raise WizardNotExistError( + "Wizard does not exist for command " + "'%s', name: '%s'" % (command_name, wizard_name) + ) def _load_yaml(self, contents): data = self._yaml.load(contents) return data def wizard_exists(self, command_name, wizard_name): - filename = os.path.join(self._spec_dir, command_name, - wizard_name + '.yml') + filename = os.path.join( + self._spec_dir, command_name, wizard_name + '.yml' + ) return os.path.isfile(filename) diff --git a/awscli/customizations/wizard/ui/__init__.py b/awscli/customizations/wizard/ui/__init__.py index 1924999b6d8f..cd14f697e34e 100644 --- a/awscli/customizations/wizard/ui/__init__.py +++ b/awscli/customizations/wizard/ui/__init__.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import unicode_literals + import os import prompt_toolkit @@ -34,7 +35,8 @@ def prompt(self, display_text, choices=None): return selectmenu.select_menu(choices) else: response = selectmenu.select_menu( - choices, display_format=self._display_text) + choices, display_format=self._display_text + ) result = response['actual_value'] return result @@ -52,8 +54,7 @@ def get_completions(self, document, complete_event): for child in sorted(children): if child.startswith(partial): result = os.path.join(dirname, child) - yield Completion(result, - start_position=-len(result)) + yield Completion(result, start_position=-len(result)) except OSError: return @@ -64,4 +65,5 @@ def __init__(self, completer): def prompt(self, display_text): return prompt_toolkit.prompt( - '%s: ' % display_text, completer=self._completer) + '%s: ' % display_text, completer=self._completer + ) diff --git a/awscli/customizations/wizard/ui/keybindings.py b/awscli/customizations/wizard/ui/keybindings.py index cd120a6683bc..c52970b8bc5a 100644 --- a/awscli/customizations/wizard/ui/keybindings.py +++ b/awscli/customizations/wizard/ui/keybindings.py @@ -11,15 +11,17 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from prompt_toolkit.application import get_app -from prompt_toolkit.filters import has_focus, Condition +from prompt_toolkit.filters import Condition, has_focus from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.keys import Keys +from awscli.customizations.wizard.exceptions import BaseWizardException from awscli.customizations.wizard.ui.utils import ( - get_ui_control_by_buffer_name, move_to_previous_prompt, - show_details_if_visible_by_default, refresh_details_view + get_ui_control_by_buffer_name, + move_to_previous_prompt, + refresh_details_view, + show_details_if_visible_by_default, ) -from awscli.customizations.wizard.exceptions import BaseWizardException @Condition @@ -52,11 +54,13 @@ def exit_(event): def submit_current_answer(event): current_prompt = event.app.traverser.get_current_prompt() prompt_buffer = get_ui_control_by_buffer_name( - event.app.layout, current_prompt).buffer + event.app.layout, current_prompt + ).buffer try: event.app.traverser.submit_prompt_answer(prompt_buffer.text) - if isinstance(event.app.layout.error_bar.current_error, - BaseWizardException): + if isinstance( + event.app.layout.error_bar.current_error, BaseWizardException + ): event.app.layout.error_bar.clear() except BaseWizardException as e: event.app.layout.error_bar.display_error(e) @@ -91,11 +95,14 @@ def previous_prompt(event): def focus_on_details_panel(event): if event.app.details_visible: layout = event.app.layout - if layout.current_buffer and \ - layout.current_buffer.name == 'details_buffer': + if ( + layout.current_buffer + and layout.current_buffer.name == 'details_buffer' + ): current_prompt = event.app.traverser.get_current_prompt() current_control = get_ui_control_by_buffer_name( - layout, current_prompt) + layout, current_prompt + ) layout.focus(current_control) else: details_buffer = layout.get_buffer_by_name('details_buffer') @@ -106,8 +113,7 @@ def show_details(event): event.app.details_visible = not event.app.details_visible layout = event.app.layout current_prompt = event.app.traverser.get_current_prompt() - current_control = get_ui_control_by_buffer_name( - layout, current_prompt) + current_control = get_ui_control_by_buffer_name(layout, current_prompt) if not event.app.details_visible: layout.focus(current_control) else: @@ -121,7 +127,8 @@ def show_save_details_dialogue(event): refresh_details_view(event.app, current_prompt) event.app.save_details_visible = True save_dialogue = get_ui_control_by_buffer_name( - event.app.layout, 'save_details_dialogue') + event.app.layout, 'save_details_dialogue' + ) event.app.layout.focus(save_dialogue) @kb.add(Keys.F4, filter=error_bar_enabled) diff --git a/awscli/customizations/wizard/ui/layout.py b/awscli/customizations/wizard/ui/layout.py index d0b41fcbbc3b..6eab5e796833 100644 --- a/awscli/customizations/wizard/ui/layout.py +++ b/awscli/customizations/wizard/ui/layout.py @@ -15,36 +15,55 @@ from prompt_toolkit.application import get_app from prompt_toolkit.buffer import Buffer from prompt_toolkit.completion import PathCompleter -from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl -from prompt_toolkit.filters import has_focus, Condition +from prompt_toolkit.filters import Condition, has_focus from prompt_toolkit.formatted_text import HTML, to_formatted_text from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.key_binding.bindings.focus import ( - focus_next, focus_previous + focus_next, + focus_previous, ) from prompt_toolkit.keys import Keys from prompt_toolkit.layout import Layout from prompt_toolkit.layout.containers import ( - Window, HSplit, Dimension, ConditionalContainer, WindowAlign, VSplit, - to_container, to_filter + ConditionalContainer, + Dimension, + HSplit, + VSplit, + Window, + WindowAlign, + to_container, + to_filter, ) +from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl +from prompt_toolkit.utils import is_windows from prompt_toolkit.widgets import ( - HorizontalLine, Box, Button, Label, Shadow, Frame, VerticalLine, - Dialog, TextArea + Box, + Button, + Dialog, + Frame, + HorizontalLine, + Label, + Shadow, + TextArea, + VerticalLine, ) -from prompt_toolkit.utils import is_windows from awscli.autoprompt.widgets import BaseToolbarView, TitleLine from awscli.customizations.wizard import core -from awscli.customizations.wizard.ui.section import ( - WizardSectionTab, WizardSectionBody -) from awscli.customizations.wizard.ui.keybindings import ( - details_visible, prompt_has_details, error_bar_enabled, - save_details_visible + details_visible, + error_bar_enabled, + prompt_has_details, + save_details_visible, +) +from awscli.customizations.wizard.ui.section import ( + WizardSectionBody, + WizardSectionTab, ) from awscli.customizations.wizard.ui.utils import ( - move_to_previous_prompt, Spacer, get_ui_control_by_buffer_name + Spacer, + get_ui_control_by_buffer_name, + move_to_previous_prompt, ) @@ -56,13 +75,15 @@ def create_wizard_layout(self, definition): [ self._create_title(definition), self._create_sections( - definition, run_wizard_dialog, error_bar), - HorizontalLine() + definition, run_wizard_dialog, error_bar + ), + HorizontalLine(), ] ) return WizardLayout( - container=container, run_wizard_dialog=run_wizard_dialog, - error_bar=error_bar + container=container, + run_wizard_dialog=run_wizard_dialog, + error_bar=error_bar, ) def _create_title(self, definition): @@ -90,17 +111,19 @@ def _create_sections(self, definition, run_wizard_dialog, error_bar): return VSplit( [ HSplit( - section_tabs, - padding=1, - style='class:wizard.section.tab' + section_tabs, padding=1, style='class:wizard.section.tab' ), ConditionalContainer( VerticalLine(), filter=Condition(is_windows) ), - HSplit([*section_bodies, + HSplit( + [ + *section_bodies, WizardDetailsPanel(), error_bar, - ToolbarView()]) + ToolbarView(), + ] + ), ] ) @@ -144,25 +167,33 @@ def _get_title(self): def _get_container(self): return ConditionalContainer( - HSplit([ - TitleLine(self._get_title), - VSplit([ - Window( - content=BufferControl( - buffer=Buffer( - name='details_buffer', read_only=True), - ), - height=Dimension( - max=self.DIMENSIONS['details_window_height_max'], - preferred=self.DIMENSIONS[ - 'details_window_height_pref'] - ), - wrap_lines=True + HSplit( + [ + TitleLine(self._get_title), + VSplit( + [ + Window( + content=BufferControl( + buffer=Buffer( + name='details_buffer', read_only=True + ), + ), + height=Dimension( + max=self.DIMENSIONS[ + 'details_window_height_max' + ], + preferred=self.DIMENSIONS[ + 'details_window_height_pref' + ], + ), + wrap_lines=True, + ), + SaveFileDialogue(), + ] ), - SaveFileDialogue(), - ]) - ]), - details_visible + ] + ), + details_visible, ) def __pt_container__(self): @@ -175,8 +206,7 @@ def __init__(self): def _get_container(self): return ConditionalContainer( - self._create_dialog(), - save_details_visible + self._create_dialog(), save_details_visible ) def __pt_container__(self): @@ -188,17 +218,21 @@ def _create_dialog(self): cancel_button = self._create_cancel_button(textfield) dialog = Dialog( title='Save to file', - body=HSplit([ - Label(text='Filename', dont_extend_height=True), - textfield, - ], padding=Dimension(preferred=1, max=1)), + body=HSplit( + [ + Label(text='Filename', dont_extend_height=True), + textfield, + ], + padding=Dimension(preferred=1, max=1), + ), buttons=[save_button, cancel_button], - with_background=True) + with_background=True, + ) dialog.container.container.style = 'class:wizard.dialog.save' dialog.container.body.container.style = 'class:wizard.dialog.body' dialog.container.body.container.content.key_bindings.add( - Keys.Enter, filter=has_focus('save_details_dialogue'))( - save_button.handler) + Keys.Enter, filter=has_focus('save_details_dialogue') + )(save_button.handler) return dialog def _create_textfield(self): @@ -214,11 +248,13 @@ def _create_save_button(self, textfield): def save_handler(*args, **kwargs): app = get_app() contents = app.layout.get_buffer_by_name( - 'details_buffer').document.text + 'details_buffer' + ).document.text app.file_io.write_file_contents(textfield.text, contents) app.save_details_visible = False current_control = get_ui_control_by_buffer_name( - app.layout, app.traverser.get_current_prompt()) + app.layout, app.traverser.get_current_prompt() + ) app.layout.focus(current_control) return Button(text='Save', handler=save_handler) @@ -228,6 +264,7 @@ def cancel_handler(*args, **kwargs): app = get_app() app.save_details_visible = False app.layout.focus('details_buffer') + return Button(text='Cancel', handler=cancel_handler) def _create_key_bindings(self, save_button, cancel_button): @@ -252,29 +289,29 @@ def __init__(self): def create_window(self, help_text): text_control = FormattedTextControl(text=lambda: help_text) text_control.name = 'toolbar_panel' - return HSplit([ - HorizontalLine(), - Window( - content=text_control, - wrap_lines=True, - **self.DIMENSIONS - ) - ]) + return HSplit( + [ + HorizontalLine(), + Window( + content=text_control, wrap_lines=True, **self.DIMENSIONS + ), + ] + ) def help_text(self): app = get_app() output = [] if prompt_has_details(): title = getattr(app, 'details_title', 'Details panel') - output.extend([ - f'{self.STYLE}[F2] Switch to {title}', - f'{self.STYLE}[F3] Show/Hide {title}', - f'{self.STYLE}[CTRL+S] Save {title}', - ]) - if error_bar_enabled(): - output.append( - f'{self.STYLE}[F4] Show/Hide error message' + output.extend( + [ + f'{self.STYLE}[F2] Switch to {title}', + f'{self.STYLE}[F3] Show/Hide {title}', + f'{self.STYLE}[CTRL+S] Save {title}', + ] ) + if error_bar_enabled(): + output.append(f'{self.STYLE}[F4] Show/Hide error message') return to_formatted_text(HTML(f'{self.SPACING}'.join(output))) @@ -367,7 +404,7 @@ def _create_default_buttons(self): def _create_dialog_frame(self): frame_body = Box( body=self._create_buttons_container(), - height=Dimension(min=1, max=3, preferred=3) + height=Dimension(min=1, max=3, preferred=3), ) return Shadow( body=Frame( @@ -405,8 +442,7 @@ def __init__(self): def display_error(self, exception): self.current_error = exception self._error_bar_buffer.text = ( - 'Encountered following error in wizard:\n\n' - f'{exception}' + 'Encountered following error in wizard:\n\n' f'{exception}' ) get_app().error_bar_visible = True @@ -420,19 +456,20 @@ def _get_error_bar_buffer(self): def _get_container(self): return ConditionalContainer( - HSplit([ - TitleLine('Wizard exception'), - Window( - content=BufferControl( - buffer=self._error_bar_buffer, - focusable=False + HSplit( + [ + TitleLine('Wizard exception'), + Window( + content=BufferControl( + buffer=self._error_bar_buffer, focusable=False + ), + style='class:wizard.error', + dont_extend_height=True, + wrap_lines=True, ), - style='class:wizard.error', - dont_extend_height=True, - wrap_lines=True, - ), - ]), - Condition(self._is_visible) + ] + ), + Condition(self._is_visible), ) def _is_visible(self): diff --git a/awscli/customizations/wizard/ui/prompt.py b/awscli/customizations/wizard/ui/prompt.py index 6373762ed2d7..e63164b81306 100644 --- a/awscli/customizations/wizard/ui/prompt.py +++ b/awscli/customizations/wizard/ui/prompt.py @@ -12,21 +12,26 @@ # language governing permissions and limitations under the License. from prompt_toolkit.application import get_app from prompt_toolkit.buffer import Buffer +from prompt_toolkit.completion import PathCompleter from prompt_toolkit.document import Document from prompt_toolkit.filters import Condition from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.keys import Keys from prompt_toolkit.layout.containers import ( - Window, VSplit, Dimension, ConditionalContainer, FloatContainer, Float + ConditionalContainer, + Dimension, + Float, + FloatContainer, + ScrollOffsets, + VSplit, + Window, ) from prompt_toolkit.layout.controls import BufferControl from prompt_toolkit.layout.margins import ScrollbarMargin -from prompt_toolkit.layout.containers import ScrollOffsets from prompt_toolkit.layout.menus import CompletionsMenu -from prompt_toolkit.completion import PathCompleter from awscli.customizations.wizard.ui.selectmenu import ( - CollapsableSelectionMenuControl + CollapsableSelectionMenuControl, ) from awscli.customizations.wizard.ui.utils import FullyExtendedWidthWindow @@ -46,7 +51,7 @@ def _get_container(self): answer = WizardPromptCompletionAnswer( self._value_name, default_value=self._value_definition.get('default_value'), - completer=self._value_definition['completer'] + completer=self._value_definition['completer'], ) else: answer = WizardPromptAnswer( @@ -57,13 +62,12 @@ def _get_container(self): VSplit( [ WizardPromptDescription( - self._value_name, - self._value_definition['description'] + self._value_name, self._value_definition['description'] ), - answer + answer, ], ), - Condition(self._is_visible) + Condition(self._is_visible), ) def _is_visible(self): @@ -81,14 +85,9 @@ def __init__(self, value_name, value_description): def _get_container(self): content = f'{self._value_description}:' - buffer = Buffer( - document=Document(content), - read_only=True - ) + buffer = Buffer(document=Document(content), read_only=True) return Window( - content=BufferControl( - buffer=buffer, focusable=False - ), + content=BufferControl(buffer=buffer, focusable=False), style=self._get_style, width=Dimension.exact(len(content) + 1), dont_extend_height=True, @@ -114,14 +113,13 @@ def __init__(self, value_name, default_value=None): self.container = self._get_answer_container() def _get_answer_buffer(self): - return Buffer(name=self._value_name, - document=Document(text=self._default_value)) + return Buffer( + name=self._value_name, document=Document(text=self._default_value) + ) def _get_answer_container(self): return FullyExtendedWidthWindow( - content=BufferControl( - buffer=self._buffer - ), + content=BufferControl(buffer=self._buffer), style=self._get_style, dont_extend_height=True, ) @@ -144,20 +142,22 @@ def __init__(self, value_name, default_value=None, completer=None): super().__init__(value_name, default_value) def _get_completer(self, completer): - return { - 'file_completer': PathCompleter(expanduser=True) - }[completer] + return {'file_completer': PathCompleter(expanduser=True)}[completer] def _get_answer_buffer(self): - return Buffer(name=self._value_name, - completer=self._completer, - complete_while_typing=True, - document=Document(text=self._default_value)) + return Buffer( + name=self._value_name, + completer=self._completer, + complete_while_typing=True, + document=Document(text=self._default_value), + ) def _get_menu_height(self): if self._buffer.complete_state: - return min(len(self._buffer.complete_state.completions), - self.COMPLETION_MENU_MAX_HEIGHT) + return min( + len(self._buffer.complete_state.completions), + self.COMPLETION_MENU_MAX_HEIGHT, + ) return 0 def _get_answer_container(self): @@ -165,16 +165,18 @@ def _get_answer_container(self): FullyExtendedWidthWindow( content=BufferControl(buffer=self._buffer), style=self._get_style, - wrap_lines=True + wrap_lines=True, ), floats=[ Float( - xcursor=True, ycursor=True, top=1, + xcursor=True, + ycursor=True, + top=1, height=self._get_menu_height, content=CompletionsMenu(), ) ], - key_bindings=self._get_key_bindings() + key_bindings=self._get_key_bindings(), ) def _get_key_bindings(self): @@ -205,7 +207,7 @@ def _get_answer_container(self): content=CollapsableSelectionMenuControl( items=self._get_choices, selection_capture_buffer=self._buffer, - on_toggle=self._show_details + on_toggle=self._show_details, ), style=self._get_style, always_hide_cursor=True, @@ -218,8 +220,7 @@ def _get_choices(self): def _show_details(self, choice): app = get_app() - details_buffer = app.layout.get_buffer_by_name( - 'details_buffer') + details_buffer = app.layout.get_buffer_by_name('details_buffer') details, title = self._get_details(choice) app.details_title = title details_buffer.reset() diff --git a/awscli/customizations/wizard/ui/section.py b/awscli/customizations/wizard/ui/section.py index fdee3ed5a87b..493ac77e0a6e 100644 --- a/awscli/customizations/wizard/ui/section.py +++ b/awscli/customizations/wizard/ui/section.py @@ -15,7 +15,10 @@ from prompt_toolkit.document import Document from prompt_toolkit.filters import Condition from prompt_toolkit.layout.containers import ( - Window, HSplit, Dimension, ConditionalContainer + ConditionalContainer, + Dimension, + HSplit, + Window, ) from prompt_toolkit.layout.controls import BufferControl from prompt_toolkit.widgets import Box @@ -31,14 +34,9 @@ def __init__(self, section_name, section_definition): def _get_container(self): content = f" {self._definition['shortname']}" - buffer = Buffer( - document=Document(content), - read_only=True - ) + buffer = Buffer(document=Document(content), read_only=True) return Window( - content=BufferControl( - buffer=buffer, focusable=False - ), + content=BufferControl(buffer=buffer, focusable=False), style=self._get_style, width=Dimension.exact(len(content) + 1), dont_extend_height=True, @@ -66,12 +64,12 @@ def _get_container(self): return ConditionalContainer( Box( HSplit( - self._create_prompts_from_section_definition(), - padding=1 + self._create_prompts_from_section_definition(), padding=1 ), - padding_left=2, padding_top=1 + padding_left=2, + padding_top=1, ), - Condition(self._is_current_section) + Condition(self._is_current_section), ) def _is_current_section(self): diff --git a/awscli/customizations/wizard/ui/selectmenu.py b/awscli/customizations/wizard/ui/selectmenu.py index 8a257d54357d..3c9e1c150780 100644 --- a/awscli/customizations/wizard/ui/selectmenu.py +++ b/awscli/customizations/wizard/ui/selectmenu.py @@ -11,21 +11,22 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import unicode_literals + from prompt_toolkit import Application from prompt_toolkit.application import get_app from prompt_toolkit.buffer import Buffer -from prompt_toolkit.utils import get_cwidth -from prompt_toolkit.layout import Layout, FloatContainer, Float -from prompt_toolkit.layout.controls import UIControl, UIContent -from prompt_toolkit.layout.screen import Point -from prompt_toolkit.layout.dimension import Dimension from prompt_toolkit.key_binding.key_bindings import KeyBindings -from prompt_toolkit.layout.margins import ScrollbarMargin +from prompt_toolkit.layout import Float, FloatContainer, Layout from prompt_toolkit.layout.containers import ScrollOffsets, Window +from prompt_toolkit.layout.controls import UIContent, UIControl +from prompt_toolkit.layout.dimension import Dimension +from prompt_toolkit.layout.margins import ScrollbarMargin +from prompt_toolkit.layout.screen import Point +from prompt_toolkit.utils import get_cwidth def select_menu(items, display_format=None, max_height=10): - """ Presents a list of options and allows the user to select one. + """Presents a list of options and allows the user to select one. This presents a static list of options and prompts the user to select one. This is similar to a completion menu but is different in that it does not @@ -64,7 +65,8 @@ def exit_app(event): # limit the height and width of the window. content = FloatContainer( Window(height=Dimension(min=min_height, max=min_height)), - [Float(menu_window, top=0, left=0)]) + [Float(menu_window, top=0, left=0)], + ) app = Application( layout=Layout(content), key_bindings=app_bindings, @@ -84,7 +86,7 @@ def _trim_text(text, max_width): if width > max_width: # When there are no double width characters, just use slice operation. if len(text) == width: - trimmed_text = (text[:max(1, max_width - 3)] + '...')[:max_width] + trimmed_text = (text[: max(1, max_width - 3)] + '...')[:max_width] return trimmed_text, len(trimmed_text) # Otherwise, loop until we have the desired width. (Rather @@ -151,13 +153,13 @@ def _menu_item_fragment(self, item, is_selected, menu_width): def create_content(self, width, height): def get_line(i): item = self._get_items()[i] - is_selected = (i == self._selection) + is_selected = i == self._selection return self._menu_item_fragment(item, is_selected, width) return UIContent( get_line=get_line, cursor_position=Point(x=0, y=self._selection or 0), - line_count=len(self._get_items()) + line_count=len(self._get_items()), ) def _move_cursor(self, delta): @@ -190,8 +192,15 @@ def app_result(event): class CollapsableSelectionMenuControl(SelectionMenuControl): """Menu that collapses to text with selection when loses focus""" - def __init__(self, items, display_format=None, cursor='>', - selection_capture_buffer=None, on_toggle=None): + + def __init__( + self, + items, + display_format=None, + cursor='>', + selection_capture_buffer=None, + on_toggle=None, + ): super().__init__(items, display_format=display_format, cursor=cursor) if not selection_capture_buffer: selection_capture_buffer = Buffer() @@ -204,6 +213,7 @@ def create_content(self, width, height): self._has_ever_entered_select_menu = True return super().create_content(width, height) else: + def get_line(i): content = '' if self._has_ever_entered_select_menu: @@ -212,11 +222,11 @@ def get_line(i): return UIContent(get_line=get_line, line_count=1) - def preferred_height(self, width, max_height, wrap_lines, - get_line_prefix): + def preferred_height(self, width, max_height, wrap_lines, get_line_prefix): if get_app().layout.has_focus(self): return super().preferred_height( - width, max_height, wrap_lines, get_line_prefix) + width, max_height, wrap_lines, get_line_prefix + ) else: return 1 diff --git a/awscli/customizations/wizard/ui/style.py b/awscli/customizations/wizard/ui/style.py index c59a6ea744aa..7bd7435bf558 100644 --- a/awscli/customizations/wizard/ui/style.py +++ b/awscli/customizations/wizard/ui/style.py @@ -16,26 +16,25 @@ def get_default_style(): basic_styles = [ - # Wizard-specific classes - ('wizard', ''), - ('wizard.title', 'underline bold'), - ('wizard.prompt.description', 'bold'), - ('wizard.prompt.description.current', 'white'), - ('wizard.prompt.answer', 'bg:#aaaaaa black'), - ('wizard.prompt.answer.current', 'white'), - ('wizard.section.tab.current', 'white'), - ('wizard.section.tab.unvisited', '#777777'), - ('wizard.section.tab.visited', ''), - ('wizard.dialog', ''), - ('wizard.dialog frame.label', 'white bold'), - ('wizard.dialog.save frame.label', 'black bold'), - ('wizard.dialog.body', 'bg:#aaaaaa black'), - ('wizard.error', 'bg:#550000 #ffffff'), - - # Prompt-toolkit built-in classes - ('button.focused', 'bg:#777777 white'), - ('completion-menu.completion', 'underline'), - ] + # Wizard-specific classes + ('wizard', ''), + ('wizard.title', 'underline bold'), + ('wizard.prompt.description', 'bold'), + ('wizard.prompt.description.current', 'white'), + ('wizard.prompt.answer', 'bg:#aaaaaa black'), + ('wizard.prompt.answer.current', 'white'), + ('wizard.section.tab.current', 'white'), + ('wizard.section.tab.unvisited', '#777777'), + ('wizard.section.tab.visited', ''), + ('wizard.dialog', ''), + ('wizard.dialog frame.label', 'white bold'), + ('wizard.dialog.save frame.label', 'black bold'), + ('wizard.dialog.body', 'bg:#aaaaaa black'), + ('wizard.error', 'bg:#550000 #ffffff'), + # Prompt-toolkit built-in classes + ('button.focused', 'bg:#777777 white'), + ('completion-menu.completion', 'underline'), + ] if is_windows(): os_related_styles = [ ('wizard.section.tab', 'bold black'), diff --git a/awscli/customizations/wizard/ui/utils.py b/awscli/customizations/wizard/ui/utils.py index 0aedb8d049ea..68cc9af6b740 100644 --- a/awscli/customizations/wizard/ui/utils.py +++ b/awscli/customizations/wizard/ui/utils.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. from prompt_toolkit.buffer import Buffer from prompt_toolkit.document import Document -from prompt_toolkit.layout.containers import Window, Dimension +from prompt_toolkit.layout.containers import Dimension, Window from prompt_toolkit.layout.controls import BufferControl @@ -28,7 +28,8 @@ def get_ui_control_by_buffer_name(layout, buffer_name): if hasattr(control, 'buffer') and control.buffer.name == buffer_name: return control raise ValueError( - f"Couldn't find buffer in the current layout: {buffer_name}") + f"Couldn't find buffer in the current layout: {buffer_name}" + ) def move_to_previous_prompt(app): @@ -37,13 +38,15 @@ def move_to_previous_prompt(app): show_details_if_visible_by_default(app, previous_prompt) refresh_details_view(app, previous_prompt) previous_control = get_ui_control_by_buffer_name( - app.layout, previous_prompt) + app.layout, previous_prompt + ) app.layout.focus(previous_control) def show_details_if_visible_by_default(app, prompt): - app.details_visible = \ - app.traverser.is_prompt_details_visible_by_default(prompt) + app.details_visible = app.traverser.is_prompt_details_visible_by_default( + prompt + ) def refresh_details_view(app, prompt): @@ -59,19 +62,13 @@ class Spacer: element such as expanding tab column in the wizard app with the color gray. """ + def __init__(self): self.container = self._get_container() def _get_container(self): - buffer = Buffer( - document=Document(''), - read_only=True - ) - return Window( - content=BufferControl( - buffer=buffer, focusable=False - ) - ) + buffer = Buffer(document=Document(''), read_only=True) + return Window(content=BufferControl(buffer=buffer, focusable=False)) def __pt_container__(self): return self.container @@ -79,5 +76,6 @@ def __pt_container__(self): class FullyExtendedWidthWindow(Window): """Window that fully extends its available width""" + def preferred_width(self, max_available_width): return Dimension(preferred=max_available_width) diff --git a/awscli/customizations/wizard/wizards/lambda/new-function.yml b/awscli/customizations/wizard/wizards/lambda/new-function.yml index 85aa318f8860..7f755e3d54b3 100644 --- a/awscli/customizations/wizard/wizards/lambda/new-function.yml +++ b/awscli/customizations/wizard/wizards/lambda/new-function.yml @@ -132,7 +132,7 @@ __OUTPUT__: Wizard successfully created Lambda Function: Function name: {function_name} Function ARN: {function_arn} - + {% if {preview_type} == preview_cli_command %} Steps to create function is equivalent to running the following sample AWS CLI commands: diff --git a/awscli/data/metadata.json b/awscli/data/metadata.json index ed95c3da54fa..d2763dbe39ee 100644 --- a/awscli/data/metadata.json +++ b/awscli/data/metadata.json @@ -1,3 +1,3 @@ { "distribution_source": "source" -} \ No newline at end of file +} diff --git a/awscli/errorhandler.py b/awscli/errorhandler.py index 198f908ad23e..7ac60221191a 100644 --- a/awscli/errorhandler.py +++ b/awscli/errorhandler.py @@ -13,24 +13,25 @@ import logging import signal -from botocore.exceptions import ( - NoRegionError, NoCredentialsError, ClientError, - ParamValidationError as BotocoreParamValidationError, -) - +from awscli.argparser import USAGE, ArgParseException from awscli.argprocess import ParamError, ParamSyntaxError from awscli.arguments import UnknownArgumentError -from awscli.argparser import ArgParseException, USAGE +from awscli.autoprompt.factory import PrompterKeyboardInterrupt from awscli.constants import ( - PARAM_VALIDATION_ERROR_RC, CONFIGURATION_ERROR_RC, CLIENT_ERROR_RC, - GENERAL_ERROR_RC + CLIENT_ERROR_RC, + CONFIGURATION_ERROR_RC, + GENERAL_ERROR_RC, + PARAM_VALIDATION_ERROR_RC, ) -from awscli.utils import PagerInitializationException -from awscli.autoprompt.factory import PrompterKeyboardInterrupt from awscli.customizations.exceptions import ( - ParamValidationError, ConfigurationError + ConfigurationError, + ParamValidationError, +) +from awscli.utils import PagerInitializationException +from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError +from botocore.exceptions import ( + ParamValidationError as BotocoreParamValidationError, ) - LOG = logging.getLogger(__name__) @@ -40,7 +41,7 @@ def construct_entry_point_handlers_chain(): ParamValidationErrorsHandler(), PrompterInterruptExceptionHandler(), InterruptExceptionHandler(), - GeneralExceptionHandler() + GeneralExceptionHandler(), ] return ChainedExceptionHandler(exception_handlers=handlers) @@ -55,7 +56,7 @@ def construct_cli_error_handlers_chain(): PagerErrorHandler(), InterruptExceptionHandler(), ClientErrorHandler(), - GeneralExceptionHandler() + GeneralExceptionHandler(), ] return ChainedExceptionHandler(exception_handlers=handlers) @@ -84,8 +85,11 @@ def _do_handle_exception(self, exception, stdout, stderr): class ParamValidationErrorsHandler(FilteredExceptionHandler): EXCEPTIONS_TO_HANDLE = ( - ParamError, ParamSyntaxError, ArgParseException, - ParamValidationError, BotocoreParamValidationError + ParamError, + ParamSyntaxError, + ArgParseException, + ParamValidationError, + BotocoreParamValidationError, ) RC = PARAM_VALIDATION_ERROR_RC @@ -108,7 +112,9 @@ class ConfigurationErrorHandler(FilteredExceptionHandler): class NoRegionErrorHandler(FilteredExceptionHandler): EXCEPTIONS_TO_HANDLE = NoRegionError RC = CONFIGURATION_ERROR_RC - MESSAGE = '%s You can also configure your region by running "aws configure".' + MESSAGE = ( + '%s You can also configure your region by running "aws configure".' + ) class NoCredentialsErrorHandler(FilteredExceptionHandler): diff --git a/awscli/formatter.py b/awscli/formatter.py index 2fbd9a20cbec..44cf017852ec 100644 --- a/awscli/formatter.py +++ b/awscli/formatter.py @@ -13,16 +13,14 @@ import logging from datetime import datetime -from botocore.compat import json -from botocore.utils import set_value_from_jmespath -from botocore.paginate import PageIterator from ruamel.yaml import YAML -from awscli.table import MultiTable, Styler, ColorizedStyler -from awscli import text -from awscli import compat +from awscli import compat, text +from awscli.table import ColorizedStyler, MultiTable, Styler from awscli.utils import json_encoder - +from botocore.compat import json +from botocore.paginate import PageIterator +from botocore.utils import set_value_from_jmespath LOG = logging.getLogger(__name__) @@ -78,7 +76,8 @@ def __call__(self, command_name, response, stream=None): else: response_data = response response_data = self._get_transformed_response_for_output( - response_data) + response_data + ) try: self._format_response(command_name, response_data, stream) except IOError as e: @@ -92,15 +91,19 @@ def __call__(self, command_name, response, stream=None): class JSONFormatter(FullyBufferedFormatter): - def _format_response(self, command_name, response, stream): # For operations that have no response body (e.g. s3 put-object) # the response will be an empty string. We don't want to print # that out to the user but other "falsey" values like an empty # dictionary should be printed. if response != {}: - json.dump(response, stream, indent=4, default=json_encoder, - ensure_ascii=False) + json.dump( + response, + stream, + indent=4, + default=json_encoder, + ensure_ascii=False, + ) stream.write('\n') @@ -178,7 +181,8 @@ def __call__(self, command_name, response, stream=None): def _get_response_stream(self, response): if is_response_paginated(response): return compat.imap( - self._get_transformed_response_for_output, response) + self._get_transformed_response_for_output, response + ) else: output = self._get_transformed_response_for_output(response) if output == {}: @@ -196,19 +200,23 @@ class TableFormatter(FullyBufferedFormatter): using the output definition from the model. """ + def __init__(self, args, table=None): super(TableFormatter, self).__init__(args) if args.color == 'auto': - self.table = MultiTable(initial_section=False, - column_separator='|') + self.table = MultiTable( + initial_section=False, column_separator='|' + ) elif args.color == 'off': styler = Styler() - self.table = MultiTable(initial_section=False, - column_separator='|', styler=styler) + self.table = MultiTable( + initial_section=False, column_separator='|', styler=styler + ) elif args.color == 'on': styler = ColorizedStyler() - self.table = MultiTable(initial_section=False, - column_separator='|', styler=styler) + self.table = MultiTable( + initial_section=False, column_separator='|', styler=styler + ) else: raise ValueError("Unknown color option: %s" % args.color) @@ -257,8 +265,9 @@ def _build_sub_table_from_dict(self, current, indent_level): self.table.add_row_header(headers) self.table.add_row([current[k] for k in headers]) for remaining in more: - self._build_table(remaining, current[remaining], - indent_level=indent_level + 1) + self._build_table( + remaining, current[remaining], indent_level=indent_level + 1 + ) def _build_sub_table_from_list(self, current, indent_level, title): headers, more = self._group_scalar_keys_from_list(current) @@ -266,8 +275,7 @@ def _build_sub_table_from_list(self, current, indent_level, title): first = True for element in current: if not first and more: - self.table.new_section(title, - indent_level=indent_level) + self.table.new_section(title, indent_level=indent_level) self.table.add_row_header(headers) first = False # Use .get() to account for the fact that sometimes an element @@ -278,8 +286,11 @@ def _build_sub_table_from_list(self, current, indent_level, title): # be in every single element of the list, so we need to # check this condition before recursing. if remaining in element: - self._build_table(remaining, element[remaining], - indent_level=indent_level + 1) + self._build_table( + remaining, + element[remaining], + indent_level=indent_level + 1, + ) def _scalar_type(self, element): return not isinstance(element, (list, dict)) @@ -315,7 +326,6 @@ def _group_scalar_keys(self, current): class TextFormatter(Formatter): - def __call__(self, command_name, response, stream=None): if stream is None: stream = self._get_default_stream() @@ -331,9 +341,7 @@ def __call__(self, command_name, response, stream=None): for result_key in result_keys: data = result_key.search(page) set_value_from_jmespath( - current, - result_key.expression, - data + current, result_key.expression, data ) self._format_response(current, stream) if response.resume_token: @@ -341,7 +349,8 @@ def __call__(self, command_name, response, stream=None): # if they want. self._format_response( {'NextToken': {'NextToken': response.resume_token}}, - stream) + stream, + ) else: self._remove_request_id(response) self._format_response(response, stream) diff --git a/awscli/handlers.py b/awscli/handlers.py index 4cbaa6ea6472..ef3abe07fc3a 100644 --- a/awscli/handlers.py +++ b/awscli/handlers.py @@ -16,93 +16,121 @@ registered with the event system. """ + +from awscli.alias import register_alias_commands from awscli.argprocess import ParamShorthandParser -from awscli.customizations.ec2instanceconnect import register_ec2_instance_connect_commands -from awscli.paramfile import register_uri_param_handler from awscli.clidriver import no_pager_handler from awscli.customizations import datapipeline from awscli.customizations.addexamples import add_examples from awscli.customizations.argrename import register_arg_renames from awscli.customizations.assumerole import register_assume_role_provider from awscli.customizations.awslambda import register_lambda_create_function +from awscli.customizations.binaryformat import add_binary_formatter from awscli.customizations.cliinput import register_cli_input_args -from awscli.customizations.cloudformation import initialize as cloudformation_init +from awscli.customizations.cloudformation import ( + initialize as cloudformation_init, +) from awscli.customizations.cloudfront import register as register_cloudfront from awscli.customizations.cloudsearch import initialize as cloudsearch_init from awscli.customizations.cloudsearchdomain import register_cloudsearchdomain from awscli.customizations.cloudtrail import initialize as cloudtrail_init from awscli.customizations.codeartifact import register_codeartifact_commands from awscli.customizations.codecommit import initialize as codecommit_init -from awscli.customizations.codedeploy.codedeploy import initialize as \ - codedeploy_init +from awscli.customizations.codedeploy.codedeploy import ( + initialize as codedeploy_init, +) from awscli.customizations.configservice.getstatus import register_get_status -from awscli.customizations.configservice.putconfigurationrecorder import \ - register_modify_put_configuration_recorder -from awscli.customizations.configservice.rename_cmd import \ - register_rename_config +from awscli.customizations.configservice.putconfigurationrecorder import ( + register_modify_put_configuration_recorder, +) +from awscli.customizations.configservice.rename_cmd import ( + register_rename_config, +) from awscli.customizations.configservice.subscribe import register_subscribe from awscli.customizations.configure.configure import register_configure_cmd +from awscli.customizations.devcommands import register_dev_commands +from awscli.customizations.dlm.dlm import dlm_initialize +from awscli.customizations.dsql import register_dsql_customizations from awscli.customizations.dynamodb.ddb import register_ddb -from awscli.customizations.dynamodb.paginatorfix import \ - register_dynamodb_paginator_fix -from awscli.customizations.history import register_history_mode -from awscli.customizations.history import register_history_commands +from awscli.customizations.dynamodb.paginatorfix import ( + register_dynamodb_paginator_fix, +) from awscli.customizations.ec2.addcount import register_count_events from awscli.customizations.ec2.bundleinstance import register_bundleinstance from awscli.customizations.ec2.decryptpassword import ec2_add_priv_launch_key +from awscli.customizations.ec2.paginate import register_ec2_page_size_injector from awscli.customizations.ec2.protocolarg import register_protocol_args from awscli.customizations.ec2.runinstances import register_runinstances from awscli.customizations.ec2.secgroupsimplify import register_secgroup -from awscli.customizations.ec2.paginate import register_ec2_page_size_injector +from awscli.customizations.ec2instanceconnect import ( + register_ec2_instance_connect_commands, +) from awscli.customizations.ecr import register_ecr_commands from awscli.customizations.ecr_public import register_ecr_public_commands -from awscli.customizations.emr.emr import emr_initialize -from awscli.customizations.emrcontainers import \ - initialize as emrcontainers_initialize -from awscli.customizations.eks import initialize as eks_initialize from awscli.customizations.ecs import initialize as ecs_initialize +from awscli.customizations.eks import initialize as eks_initialize +from awscli.customizations.emr.emr import emr_initialize +from awscli.customizations.emrcontainers import ( + initialize as emrcontainers_initialize, +) from awscli.customizations.gamelift import register_gamelift_commands -from awscli.customizations.generatecliskeleton import \ - register_generate_cli_skeleton +from awscli.customizations.generatecliskeleton import ( + register_generate_cli_skeleton, +) from awscli.customizations.globalargs import register_parse_global_args +from awscli.customizations.history import ( + register_history_commands, + register_history_mode, +) from awscli.customizations.iamvirtmfa import IAMVMFAWrapper -from awscli.customizations.iot import register_create_keys_and_cert_arguments -from awscli.customizations.iot import register_create_keys_from_csr_arguments +from awscli.customizations.iot import ( + register_create_keys_and_cert_arguments, + register_create_keys_from_csr_arguments, +) from awscli.customizations.iot_data import register_custom_endpoint_note +from awscli.customizations.kinesis import ( + register_kinesis_list_streams_pagination_backcompat, +) from awscli.customizations.kms import register_fix_kms_create_grant_docs -from awscli.customizations.dlm.dlm import dlm_initialize +from awscli.customizations.lightsail import initialize as lightsail_initialize +from awscli.customizations.logs import register_logs_commands from awscli.customizations.opsworks import initialize as opsworks_init +from awscli.customizations.opsworkscm import register_alias_opsworks_cm from awscli.customizations.paginate import register_pagination from awscli.customizations.putmetricdata import register_put_metric_data -from awscli.customizations.rds import register_rds_modify_split -from awscli.customizations.rds import register_add_generate_db_auth_token -from awscli.customizations.dsql import register_dsql_customizations -from awscli.customizations.rekognition import register_rekognition_detect_labels +from awscli.customizations.quicksight import ( + register_quicksight_asset_bundle_customizations, +) +from awscli.customizations.rds import ( + register_add_generate_db_auth_token, + register_rds_modify_split, +) +from awscli.customizations.rekognition import ( + register_rekognition_detect_labels, +) from awscli.customizations.removals import register_removals from awscli.customizations.route53 import register_create_hosted_zone_doc_fix from awscli.customizations.s3.s3 import s3_plugin_initialize from awscli.customizations.s3errormsg import register_s3_error_msg -from awscli.customizations.timestampformat import register_timestamp_format +from awscli.customizations.s3events import ( + register_document_expires_string, + register_event_stream_arg, +) +from awscli.customizations.servicecatalog import ( + register_servicecatalog_commands, +) from awscli.customizations.sessendemail import register_ses_send_email +from awscli.customizations.sessionmanager import register_ssm_session from awscli.customizations.sso import register_sso_commands from awscli.customizations.streamingoutputarg import add_streaming_output_arg -from awscli.customizations.translate import register_translate_import_terminology +from awscli.customizations.timestampformat import register_timestamp_format from awscli.customizations.toplevelbool import register_bool_params +from awscli.customizations.translate import ( + register_translate_import_terminology, +) from awscli.customizations.waiters import register_add_waiters -from awscli.customizations.opsworkscm import register_alias_opsworks_cm -from awscli.customizations.servicecatalog import register_servicecatalog_commands -from awscli.customizations.s3events import register_event_stream_arg, register_document_expires_string -from awscli.customizations.sessionmanager import register_ssm_session -from awscli.customizations.logs import register_logs_commands -from awscli.customizations.devcommands import register_dev_commands from awscli.customizations.wizard.commands import register_wizard_commands -from awscli.customizations.binaryformat import add_binary_formatter -from awscli.customizations.lightsail import initialize as lightsail_initialize -from awscli.alias import register_alias_commands -from awscli.customizations.kinesis import \ - register_kinesis_list_streams_pagination_backcompat -from awscli.customizations.quicksight import \ - register_quicksight_asset_bundle_customizations +from awscli.paramfile import register_uri_param_handler def awscli_initialize(event_handlers): @@ -114,23 +142,25 @@ def awscli_initialize(event_handlers): # The s3 error mesage needs to registered before the # generic error handler. register_s3_error_msg(event_handlers) -# # The following will get fired for every option we are -# # documenting. It will attempt to add an example_fn on to -# # the parameter object if the parameter supports shorthand -# # syntax. The documentation event handlers will then use -# # the examplefn to generate the sample shorthand syntax -# # in the docs. Registering here should ensure that this -# # handler gets called first but it still feels a bit brittle. -# event_handlers.register('doc-option-example.*.*.*', -# param_shorthand.add_example_fn) - event_handlers.register('doc-examples.*.*', - add_examples) + # # The following will get fired for every option we are + # # documenting. It will attempt to add an example_fn on to + # # the parameter object if the parameter supports shorthand + # # syntax. The documentation event handlers will then use + # # the examplefn to generate the sample shorthand syntax + # # in the docs. Registering here should ensure that this + # # handler gets called first but it still feels a bit brittle. + # event_handlers.register('doc-option-example.*.*.*', + # param_shorthand.add_example_fn) + event_handlers.register('doc-examples.*.*', add_examples) register_cli_input_args(event_handlers) - event_handlers.register('building-argument-table.*', - add_streaming_output_arg) + event_handlers.register( + 'building-argument-table.*', add_streaming_output_arg + ) register_count_events(event_handlers) - event_handlers.register('building-argument-table.ec2.get-password-data', - ec2_add_priv_launch_key) + event_handlers.register( + 'building-argument-table.ec2.get-password-data', + ec2_add_priv_launch_key, + ) register_parse_global_args(event_handlers) register_pagination(event_handlers) register_secgroup(event_handlers) @@ -179,10 +209,12 @@ def awscli_initialize(event_handlers): register_custom_endpoint_note(event_handlers) event_handlers.register( 'building-argument-table.iot.create-keys-and-certificate', - register_create_keys_and_cert_arguments) + register_create_keys_and_cert_arguments, + ) event_handlers.register( 'building-argument-table.iot.create-certificate-from-csr', - register_create_keys_from_csr_arguments) + register_create_keys_from_csr_arguments, + ) register_cloudfront(event_handlers) register_gamelift_commands(event_handlers) register_ec2_page_size_injector(event_handlers) diff --git a/awscli/help.py b/awscli/help.py index 1ce8571f3aec..b1f429f0334e 100644 --- a/awscli/help.py +++ b/awscli/help.py @@ -12,35 +12,37 @@ # language governing permissions and limitations under the License. import logging import os -import sys import platform import shlex -from subprocess import Popen, PIPE +import sys +from subprocess import PIPE, Popen from docutils.core import publish_string from docutils.writers import manpage -from awscli.clidocs import ProviderDocumentEventHandler -from awscli.clidocs import ServiceDocumentEventHandler -from awscli.clidocs import OperationDocumentEventHandler -from awscli.clidocs import TopicListerDocumentEventHandler -from awscli.clidocs import TopicDocumentEventHandler +from awscli.argparser import ArgTableArgParser +from awscli.argprocess import ParamShorthandParser from awscli.bcdoc import docevents from awscli.bcdoc.restdoc import ReSTDocument from awscli.bcdoc.textwriter import TextWriter -from awscli.argprocess import ParamShorthandParser -from awscli.argparser import ArgTableArgParser +from awscli.clidocs import ( + OperationDocumentEventHandler, + ProviderDocumentEventHandler, + ServiceDocumentEventHandler, + TopicDocumentEventHandler, + TopicListerDocumentEventHandler, +) from awscli.topictags import TopicTagDB from awscli.utils import ignore_ctrl_c - LOG = logging.getLogger('awscli.help') class ExecutableNotFoundError(Exception): def __init__(self, executable_name): super(ExecutableNotFoundError, self).__init__( - 'Could not find executable named "%s"' % executable_name) + 'Could not find executable named "%s"' % executable_name + ) def get_renderer(): @@ -62,6 +64,7 @@ class PagingHelpRenderer(object): a particular platform. """ + def __init__(self, output_stream=sys.stdout): self.output_stream = output_stream @@ -118,7 +121,8 @@ def _convert_doc_content(self, contents): settings_overrides = self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES.copy() settings_overrides["report_level"] = 3 man_contents = publish_string( - contents, writer=manpage.Writer(), + contents, + writer=manpage.Writer(), settings_overrides=self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES, ) if self._exists_on_path('groff'): @@ -135,8 +139,9 @@ def _convert_doc_content(self, contents): def _send_output_to_pager(self, output): cmdline = self.get_pager_cmdline() if not self._exists_on_path(cmdline[0]): - LOG.debug("Pager '%s' not found in PATH, printing raw help." % - cmdline[0]) + LOG.debug( + "Pager '%s' not found in PATH, printing raw help." % cmdline[0] + ) self.output_stream.write(output.decode('utf-8') + "\n") self.output_stream.flush() return @@ -159,8 +164,12 @@ def _send_output_to_pager(self, output): def _exists_on_path(self, name): # Since we're only dealing with POSIX systems, we can # ignore things like PATHEXT. - return any([os.path.exists(os.path.join(p, name)) - for p in os.environ.get('PATH', '').split(os.pathsep)]) + return any( + [ + os.path.exists(os.path.join(p, name)) + for p in os.environ.get('PATH', '').split(os.pathsep) + ] + ) class WindowsHelpRenderer(PagingHelpRenderer): @@ -170,7 +179,8 @@ class WindowsHelpRenderer(PagingHelpRenderer): def _convert_doc_content(self, contents): text_output = publish_string( - contents, writer=TextWriter(), + contents, + writer=TextWriter(), settings_overrides=self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES, ) return text_output @@ -280,8 +290,9 @@ def __call__(self, args, parsed_globals): subcommand_parser = ArgTableArgParser({}, self.subcommand_table) parsed, remaining = subcommand_parser.parse_known_args(args) if getattr(parsed, 'subcommand', None) is not None: - return self.subcommand_table[parsed.subcommand](remaining, - parsed_globals) + return self.subcommand_table[parsed.subcommand]( + remaining, parsed_globals + ) # Create an event handler for a Provider Document instance = self.EventHandlerClass(self) @@ -299,12 +310,13 @@ class ProviderHelpCommand(HelpCommand): This is what is called when ``aws help`` is run. """ + EventHandlerClass = ProviderDocumentEventHandler - def __init__(self, session, command_table, arg_table, - description, synopsis, usage): - HelpCommand.__init__(self, session, None, - command_table, arg_table) + def __init__( + self, session, command_table, arg_table, description, synopsis, usage + ): + HelpCommand.__init__(self, session, None, command_table, arg_table) self.description = description self.synopsis = synopsis self.help_usage = usage @@ -353,10 +365,12 @@ class ServiceHelpCommand(HelpCommand): EventHandlerClass = ServiceDocumentEventHandler - def __init__(self, session, obj, command_table, arg_table, name, - event_class): - super(ServiceHelpCommand, self).__init__(session, obj, command_table, - arg_table) + def __init__( + self, session, obj, command_table, arg_table, name, event_class + ): + super(ServiceHelpCommand, self).__init__( + session, obj, command_table, arg_table + ) self._name = name self._event_class = event_class @@ -376,10 +390,10 @@ class OperationHelpCommand(HelpCommand): e.g. ``aws ec2 describe-instances help``. """ + EventHandlerClass = OperationDocumentEventHandler - def __init__(self, session, operation_model, arg_table, name, - event_class): + def __init__(self, session, operation_model, arg_table, name, event_class): HelpCommand.__init__(self, session, operation_model, None, arg_table) self.param_shorthand = ParamShorthandParser() self._name = name diff --git a/awscli/logger.py b/awscli/logger.py index 38b16fe7bbe3..e0f818b0d009 100644 --- a/awscli/logger.py +++ b/awscli/logger.py @@ -17,8 +17,7 @@ LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' -def set_stream_logger(logger_name, log_level, stream=None, - format_string=None): +def set_stream_logger(logger_name, log_level, stream=None, format_string=None): """ Convenience method to configure a stream logger. diff --git a/awscli/paramfile.py b/awscli/paramfile.py index da5307f214f6..eaf2a84a922b 100644 --- a/awscli/paramfile.py +++ b/awscli/paramfile.py @@ -10,12 +10,12 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import copy import logging import os -import copy -from awscli.compat import compat_open from awscli import argprocess +from awscli.compat import compat_open logger = logging.getLogger(__name__) @@ -77,7 +77,7 @@ def get_paramfile(path, cases): def get_file(prefix, path, mode): - file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):])) + file_path = os.path.expandvars(os.path.expanduser(path[len(prefix) :])) try: with compat_open(file_path, mode) as f: return f.read() @@ -85,13 +85,15 @@ def get_file(prefix, path, mode): raise ResourceLoadingError( 'Unable to load paramfile (%s), text contents could ' 'not be decoded. If this is a binary file, please use the ' - 'fileb:// prefix instead of the file:// prefix.' % file_path) + 'fileb:// prefix instead of the file:// prefix.' % file_path + ) except (OSError, IOError) as e: - raise ResourceLoadingError('Unable to load paramfile %s: %s' % ( - path, e)) + raise ResourceLoadingError( + 'Unable to load paramfile %s: %s' % (path, e) + ) LOCAL_PREFIX_MAP = { 'file://': (get_file, {'mode': 'r'}), 'fileb://': (get_file, {'mode': 'rb'}), -} \ No newline at end of file +} diff --git a/awscli/plugin.py b/awscli/plugin.py index 1c2331ae1cbe..46a26a4fc1a7 100644 --- a/awscli/plugin.py +++ b/awscli/plugin.py @@ -10,9 +10,9 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import sys -import os import logging +import os +import sys from botocore.hooks import HierarchicalEmitter @@ -80,6 +80,9 @@ def _import_plugins(plugin_mapping): def _add_plugin_path_to_sys_path(plugin_path): for dirname in plugin_path.split(os.pathsep): - log.debug("Adding additional path from cli_legacy_plugin_path " - "configuration: %s", dirname) + log.debug( + "Adding additional path from cli_legacy_plugin_path " + "configuration: %s", + dirname, + ) sys.path.append(dirname) diff --git a/awscli/schema.py b/awscli/schema.py index 17ec6ba416cd..4c3a60cff67d 100644 --- a/awscli/schema.py +++ b/awscli/schema.py @@ -63,6 +63,7 @@ class SchemaTransformer(object): $ aws foo bar --baz arg1=Value1,arg2=5 arg1=Value2 """ + JSON_SCHEMA_TO_AWS_TYPES = { 'object': 'structure', 'array': 'list', @@ -116,7 +117,8 @@ def _transform_structure(self, schema, shapes): for key, value in schema['properties'].items(): current_type_name = self._json_schema_to_aws_type(value) current_shape_name = self._shape_namer.new_shape_name( - current_type_name) + current_type_name + ) members[key] = {'shape': current_shape_name} if value.get('required', False): required_members.append(key) diff --git a/awscli/shorthand.py b/awscli/shorthand.py index 7443dba4a141..f1edbcd1f435 100644 --- a/awscli/shorthand.py +++ b/awscli/shorthand.py @@ -38,6 +38,7 @@ ``BackCompatVisitor`` class. """ + import re import string @@ -57,25 +58,24 @@ def match(self, value): class ShorthandParseError(Exception): - def _error_location(self): consumed, remaining, num_spaces = self.value, '', self.index - if '\n' in self.value[:self.index]: + if '\n' in self.value[: self.index]: # If there's newlines in the consumed expression, we want # to make sure we're only counting the spaces # from the last newline: # foo=bar,\n # bar==baz # ^ - last_newline = self.value[:self.index].rindex('\n') + last_newline = self.value[: self.index].rindex('\n') num_spaces = self.index - last_newline - 1 - if '\n' in self.value[self.index:]: + if '\n' in self.value[self.index :]: # If there's newline in the remaining, divide value # into consumed and remainig # foo==bar,\n # ^ # bar=baz - next_newline = self.index + self.value[self.index:].index('\n') + next_newline = self.index + self.value[self.index :].index('\n') consumed = self.value[:next_newline] remaining = self.value[next_newline:] return '%s\n%s%s' % (consumed, (' ' * num_spaces) + '^', remaining) @@ -91,10 +91,11 @@ def __init__(self, value, expected, actual, index): super(ShorthandParseSyntaxError, self).__init__(msg) def _construct_msg(self): - msg = ( - "Expected: '%s', received: '%s' for input:\n" - "%s" - ) % (self.expected, self.actual, self._error_location()) + msg = ("Expected: '%s', received: '%s' for input:\n" "%s") % ( + self.expected, + self.actual, + self._error_location(), + ) return msg @@ -135,20 +136,22 @@ class ShorthandParser(object): _ESCAPED_COMMA = '(\\\\,)' _FIRST_VALUE = _NamedRegex( 'first', - u'({escaped_comma}|[{start_word}])' - u'({escaped_comma}|[{follow_chars}])*'.format( + '({escaped_comma}|[{start_word}])' + '({escaped_comma}|[{follow_chars}])*'.format( escaped_comma=_ESCAPED_COMMA, start_word=_START_WORD, follow_chars=_FIRST_FOLLOW_CHARS, - )) + ), + ) _SECOND_VALUE = _NamedRegex( 'second', - u'({escaped_comma}|[{start_word}])' - u'({escaped_comma}|[{follow_chars}])*'.format( + '({escaped_comma}|[{start_word}])' + '({escaped_comma}|[{follow_chars}])*'.format( escaped_comma=_ESCAPED_COMMA, start_word=_START_WORD, follow_chars=_SECOND_FOLLOW_CHARS, - )) + ), + ) def __init__(self): self._tokens = [] @@ -213,7 +216,7 @@ def _key(self): if self._current() not in valid_chars: break self._index += 1 - return self._input_value[start:self._index] + return self._input_value[start : self._index] def _values(self): # values = csv-list / explicit-list / hash-literal @@ -275,11 +278,15 @@ def _csv_value(self): return csv_list def _value(self): - result = self._FIRST_VALUE.match(self._input_value[self._index:]) + result = self._FIRST_VALUE.match(self._input_value[self._index :]) if result is not None: consumed = self._consume_matched_regex(result) processed = consumed.replace('\\,', ',').rstrip() - return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed + return ( + self._resolve_paramfiles(processed) + if self._should_resolve_paramfiles + else processed + ) return '' def _explicit_list(self): @@ -339,7 +346,11 @@ def _single_quoted_value(self): # val-escaped-single = %x20-26 / %x28-7F / escaped-escape / # (escape single-quote) processed = self._consume_quoted(self._SINGLE_QUOTED, escaped_char="'") - return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed + return ( + self._resolve_paramfiles(processed) + if self._should_resolve_paramfiles + else processed + ) def _consume_quoted(self, regex, escaped_char=None): value = self._must_consume_regex(regex)[1:-1] @@ -350,7 +361,11 @@ def _consume_quoted(self, regex, escaped_char=None): def _double_quoted_value(self): processed = self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='"') - return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed + return ( + self._resolve_paramfiles(processed) + if self._should_resolve_paramfiles + else processed + ) def _second_value(self): if self._current() == "'": @@ -360,7 +375,11 @@ def _second_value(self): else: consumed = self._must_consume_regex(self._SECOND_VALUE) processed = consumed.replace('\\,', ',').rstrip() - return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed + return ( + self._resolve_paramfiles(processed) + if self._should_resolve_paramfiles + else processed + ) def _resolve_paramfiles(self, val): if (paramfile := get_paramfile(val, LOCAL_PREFIX_MAP)) is not None: @@ -371,27 +390,30 @@ def _expect(self, char, consume_whitespace=False): if consume_whitespace: self._consume_whitespace() if self._index >= len(self._input_value): - raise ShorthandParseSyntaxError(self._input_value, char, - 'EOF', self._index) + raise ShorthandParseSyntaxError( + self._input_value, char, 'EOF', self._index + ) actual = self._input_value[self._index] if actual != char: - raise ShorthandParseSyntaxError(self._input_value, char, - actual, self._index) + raise ShorthandParseSyntaxError( + self._input_value, char, actual, self._index + ) self._index += 1 if consume_whitespace: self._consume_whitespace() def _must_consume_regex(self, regex): - result = regex.match(self._input_value[self._index:]) + result = regex.match(self._input_value[self._index :]) if result is not None: return self._consume_matched_regex(result) - raise ShorthandParseSyntaxError(self._input_value, '<%s>' % regex.name, - '', self._index) + raise ShorthandParseSyntaxError( + self._input_value, '<%s>' % regex.name, '', self._index + ) def _consume_matched_regex(self, result): start, end = result.span() - v = self._input_value[self._index+start:self._index+end] - self._index += (end - start) + v = self._input_value[self._index + start : self._index + end] + self._index += end - start return v def _current(self): @@ -418,16 +440,18 @@ def visit(self, params, model): self._visit({}, model, '', params) def _visit(self, parent, shape, name, value): - method = getattr(self, '_visit_%s' % shape.type_name, - self._visit_scalar) + method = getattr( + self, '_visit_%s' % shape.type_name, self._visit_scalar + ) method(parent, shape, name, value) def _visit_structure(self, parent, shape, name, value): if not isinstance(value, dict): return for member_name, member_shape in shape.members.items(): - self._visit(value, member_shape, member_name, - value.get(member_name)) + self._visit( + value, member_shape, member_name, value.get(member_name) + ) def _visit_list(self, parent, shape, name, value): if not isinstance(value, list): @@ -453,8 +477,9 @@ def _visit_structure(self, parent, shape, name, value): return for member_name, member_shape in shape.members.items(): try: - self._visit(value, member_shape, member_name, - value.get(member_name)) + self._visit( + value, member_shape, member_name, value.get(member_name) + ) except DocumentTypesNotSupportedError: # Catch and propagate the document type error to a better # error message as when the original error is thrown there is @@ -474,7 +499,8 @@ def _visit_list(self, parent, shape, name, value): parent[name] = [value] else: return super(BackCompatVisitor, self)._visit_list( - parent, shape, name, value) + parent, shape, name, value + ) def _visit_scalar(self, parent, shape, name, value): if value is None: diff --git a/awscli/table.py b/awscli/table.py index 8ebfc454d0ed..f971d791a896 100644 --- a/awscli/table.py +++ b/awscli/table.py @@ -6,19 +6,19 @@ # http://aws.amazon.com/apache2.0/ +import struct + # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys -import struct import unicodedata import colorama from awscli.utils import is_a_tty - # `autoreset` allows us to not have to sent reset sequences for every # string. `strip` lets us preserve color when redirecting. COLORAMA_KWARGS = { @@ -35,28 +35,32 @@ def get_text_length(text): # * F(Fullwidth) # * W(Wide) text = str(text) - return sum(2 if unicodedata.east_asian_width(char) in 'WFA' else 1 - for char in text) + return sum( + 2 if unicodedata.east_asian_width(char) in 'WFA' else 1 + for char in text + ) def determine_terminal_width(default_width=80): # If we can't detect the terminal width, the default_width is returned. try: - from termios import TIOCGWINSZ from fcntl import ioctl + from termios import TIOCGWINSZ except ImportError: return default_width try: - height, width = struct.unpack('hhhh', ioctl(sys.stdout, - TIOCGWINSZ, '\000' * 8))[0:2] + height, width = struct.unpack( + 'hhhh', ioctl(sys.stdout, TIOCGWINSZ, '\000' * 8) + )[0:2] except Exception: return default_width else: return width -def center_text(text, length=80, left_edge='|', right_edge='|', - text_length=None): +def center_text( + text, length=80, left_edge='|', right_edge='|', text_length=None +): """Center text with specified edge chars. You can pass in the length of the text as an arg, otherwise it is computed @@ -77,15 +81,24 @@ def center_text(text, length=80, left_edge='|', right_edge='|', return final -def align_left(text, length, left_edge='|', right_edge='|', text_length=None, - left_padding=2): +def align_left( + text, + length, + left_edge='|', + right_edge='|', + text_length=None, + left_padding=2, +): """Left align text.""" # postcondition: get_text_length(returned_text) == length if text_length is None: text_length = get_text_length(text) computed_length = ( - text_length + left_padding + \ - get_text_length(left_edge) + get_text_length(right_edge)) + text_length + + left_padding + + get_text_length(left_edge) + + get_text_length(right_edge) + ) if length - computed_length >= 0: padding = left_padding else: @@ -126,8 +139,9 @@ def convert_to_vertical_table(sections): class IndentedStream(object): - def __init__(self, stream, indent_level, left_indent_char='|', - right_indent_char='|'): + def __init__( + self, stream, indent_level, left_indent_char='|', right_indent_char='|' + ): self._stream = stream self._indent_level = indent_level self._left_indent_char = left_indent_char @@ -167,25 +181,39 @@ def __init__(self): def style_title(self, text): # Originally bold + underline return text - #return colorama.Style.BOLD + text + colorama.Style.RESET_ALL + # return colorama.Style.BOLD + text + colorama.Style.RESET_ALL def style_header_column(self, text): # Originally underline return text def style_row_element(self, text): - return (colorama.Style.BRIGHT + colorama.Fore.BLUE + - text + colorama.Style.RESET_ALL) + return ( + colorama.Style.BRIGHT + + colorama.Fore.BLUE + + text + + colorama.Style.RESET_ALL + ) def style_indentation_char(self, text): - return (colorama.Style.DIM + colorama.Fore.YELLOW + - text + colorama.Style.RESET_ALL) + return ( + colorama.Style.DIM + + colorama.Fore.YELLOW + + text + + colorama.Style.RESET_ALL + ) class MultiTable(object): - def __init__(self, terminal_width=None, initial_section=True, - column_separator='|', terminal=None, - styler=None, auto_reformat=True): + def __init__( + self, + terminal_width=None, + initial_section=True, + column_separator='|', + terminal=None, + styler=None, + auto_reformat=True, + ): self._auto_reformat = auto_reformat if initial_section: self._current_section = Section() @@ -238,16 +266,22 @@ def _determine_conversion_needed(self, max_width): return self._auto_reformat def _calculate_max_width(self): - max_width = max(s.total_width(padding=4, with_border=True, - outer_padding=s.indent_level) - for s in self._sections) + max_width = max( + s.total_width( + padding=4, with_border=True, outer_padding=s.indent_level + ) + for s in self._sections + ) return max_width def _render_section(self, section, max_width, stream): - stream = IndentedStream(stream, section.indent_level, - self._styler.style_indentation_char('|'), - self._styler.style_indentation_char('|')) - max_width -= (section.indent_level * 2) + stream = IndentedStream( + stream, + section.indent_level, + self._styler.style_indentation_char('|'), + self._styler.style_indentation_char('|'), + ) + max_width -= section.indent_level * 2 self._render_title(section, max_width, stream) self._render_column_titles(section, max_width, stream) self._render_rows(section, max_width, stream) @@ -258,8 +292,12 @@ def _render_title(self, section, max_width, stream): # bottom_border: ---------------------------- if section.title: title = self._styler.style_title(section.title) - stream.write(center_text(title, max_width, '|', '|', - get_text_length(section.title)) + '\n') + stream.write( + center_text( + title, max_width, '|', '|', get_text_length(section.title) + ) + + '\n' + ) if not section.headers and not section.rows: stream.write('+%s+' % ('-' * (max_width - 2)) + '\n') @@ -268,8 +306,9 @@ def _render_column_titles(self, section, max_width, stream): return # In order to render the column titles we need to know # the width of each of the columns. - widths = section.calculate_column_widths(padding=4, - max_width=max_width) + widths = section.calculate_column_widths( + padding=4, max_width=max_width + ) # TODO: Built a list instead of +=, it's more efficient. current = '' length_so_far = 0 @@ -283,9 +322,13 @@ def _render_column_titles(self, section, max_width, stream): first = False else: left_edge = '' - current += center_text(text=stylized_header, length=width, - left_edge=left_edge, right_edge='|', - text_length=get_text_length(header)) + current += center_text( + text=stylized_header, + length=width, + left_edge=left_edge, + right_edge='|', + text_length=get_text_length(header), + ) length_so_far += width self._write_line_break(stream, widths) stream.write(current + '\n') @@ -307,8 +350,9 @@ def _write_line_break(self, stream, widths): def _render_rows(self, section, max_width, stream): if not section.rows: return - widths = section.calculate_column_widths(padding=4, - max_width=max_width) + widths = section.calculate_column_widths( + padding=4, max_width=max_width + ) if not widths: return self._write_line_break(stream, widths) @@ -325,10 +369,13 @@ def _render_rows(self, section, max_width, stream): else: left_edge = '' stylized = self._styler.style_row_element(element) - current += align_left(text=stylized, length=width, - left_edge=left_edge, - right_edge=self._column_separator, - text_length=get_text_length(element)) + current += align_left( + text=stylized, + length=width, + left_edge=left_edge, + right_edge=self._column_separator, + text_length=get_text_length(element), + ) length_so_far += width stream.write(current + '\n') self._write_line_break(stream, widths) @@ -344,8 +391,10 @@ def __init__(self): self._max_widths = [] def __repr__(self): - return ("Section(title=%s, headers=%s, indent_level=%s, num_rows=%s)" % - (self.title, self.headers, self.indent_level, len(self.rows))) + return ( + "Section(title=%s, headers=%s, indent_level=%s, num_rows=%s)" + % (self.title, self.headers, self.indent_level, len(self.rows)) + ) def calculate_column_widths(self, padding=0, max_width=None): # postcondition: sum(widths) == max_width @@ -385,8 +434,13 @@ def total_width(self, padding=0, with_border=False, outer_padding=0): if with_border: total += border_padding total += outer_padding + outer_padding - return max(get_text_length(self.title) + border_padding + outer_padding + - outer_padding, total) + return max( + get_text_length(self.title) + + border_padding + + outer_padding + + outer_padding, + total, + ) def add_title(self, title): self.title = title @@ -404,8 +458,10 @@ def add_row(self, row): if self._num_cols is None: self._num_cols = len(row) if len(row) != self._num_cols: - raise ValueError("Row should have %s elements, instead " - "it has %s" % (self._num_cols, len(row))) + raise ValueError( + "Row should have %s elements, instead " + "it has %s" % (self._num_cols, len(row)) + ) row = self._format_row(row) self.rows.append(row) self._update_max_widths(row) @@ -418,4 +474,6 @@ def _update_max_widths(self, row): self._max_widths = [get_text_length(el) for el in row] else: for i, el in enumerate(row): - self._max_widths[i] = max(get_text_length(el), self._max_widths[i]) + self._max_widths[i] = max( + get_text_length(el), self._max_widths[i] + ) diff --git a/awscli/testutils.py b/awscli/testutils.py index 5153fa960559..8893ab30d39a 100644 --- a/awscli/testutils.py +++ b/awscli/testutils.py @@ -19,37 +19,34 @@ advantage of all the testing utilities we provide. """ -import os -import sys + +import binascii +import contextlib import copy -import shutil -import time import json import logging -import tempfile -import platform -import contextlib -import binascii import math +import os +import platform +import shutil +import sys +import tempfile +import time +import unittest from pprint import pformat -from subprocess import Popen, PIPE +from subprocess import PIPE, Popen from unittest import mock -import unittest - -from awscli.compat import BytesIO, StringIO from ruamel.yaml import YAML -from botocore.session import Session -from botocore.exceptions import ClientError -from botocore.exceptions import WaiterError -import botocore.loaders -from botocore.awsrequest import AWSResponse - import awscli.clidriver -from awscli.plugin import load_plugins +import botocore.loaders from awscli.clidriver import CLIDriver - +from awscli.compat import BytesIO, StringIO +from awscli.plugin import load_plugins +from botocore.awsrequest import AWSResponse +from botocore.exceptions import ClientError, WaiterError +from botocore.session import Session _LOADER = botocore.loaders.Loader() INTEG_LOG = logging.getLogger('awscli.tests.integration') @@ -66,9 +63,12 @@ def test_some_non_windows_stuff(self): self.assertEqual(...) """ + def decorator(func): return unittest.skipIf( - platform.system() not in ['Darwin', 'Linux'], reason)(func) + platform.system() not in ['Darwin', 'Linux'], reason + )(func) + return decorator @@ -82,8 +82,10 @@ def test_some_windows_stuff(self): self.assertEqual(...) """ + def decorator(func): return unittest.skipIf(platform.system() != 'Windows', reason)(func) + return decorator @@ -101,6 +103,7 @@ def create_clidriver(): def get_aws_cmd(): global AWS_CMD import awscli + if AWS_CMD is None: # Try /bin/aws repo_root = os.path.dirname(os.path.abspath(awscli.__file__)) @@ -108,10 +111,12 @@ def get_aws_cmd(): if not os.path.isfile(aws_cmd): aws_cmd = _search_path_for_cmd('aws') if aws_cmd is None: - raise ValueError('Could not find "aws" executable. Either ' - 'make sure it is on your PATH, or you can ' - 'explicitly set this value using ' - '"set_aws_cmd()"') + raise ValueError( + 'Could not find "aws" executable. Either ' + 'make sure it is on your PATH, or you can ' + 'explicitly set this value using ' + '"set_aws_cmd()"' + ) AWS_CMD = aws_cmd return AWS_CMD @@ -197,15 +202,12 @@ def create_dir_bucket(session, name=None, location=None): params = { 'Bucket': bucket_name, 'CreateBucketConfiguration': { - 'Location': { - 'Type': 'AvailabilityZone', - 'Name': az - }, + 'Location': {'Type': 'AvailabilityZone', 'Name': az}, 'Bucket': { 'Type': 'Directory', - 'DataRedundancy': 'SingleAvailabilityZone' - } - } + 'DataRedundancy': 'SingleAvailabilityZone', + }, + }, } try: client.create_bucket(**params) @@ -249,6 +251,7 @@ class BaseCLIDriverTest(unittest.TestCase): This will load all the default plugins as well so it will simulate the behavior the user will see. """ + def setUp(self): self.environ = { 'AWS_DATA_PATH': os.environ['AWS_DATA_PATH'], @@ -280,23 +283,29 @@ def tearDown(self): def assert_contains(self, contains): if contains not in self.renderer.rendered_contents: - self.fail("The expected contents:\n%s\nwere not in the " - "actual rendered contents:\n%s" % ( - contains, self.renderer.rendered_contents)) + self.fail( + "The expected contents:\n%s\nwere not in the " + "actual rendered contents:\n%s" + % (contains, self.renderer.rendered_contents) + ) def assert_contains_with_count(self, contains, count): r_count = self.renderer.rendered_contents.count(contains) if r_count != count: - self.fail("The expected contents:\n%s\n, with the " - "count:\n%d\nwere not in the actual rendered " - " contents:\n%s\nwith count:\n%d" % ( - contains, count, self.renderer.rendered_contents, r_count)) + self.fail( + "The expected contents:\n%s\n, with the " + "count:\n%d\nwere not in the actual rendered " + " contents:\n%s\nwith count:\n%d" + % (contains, count, self.renderer.rendered_contents, r_count) + ) def assert_not_contains(self, contents): if contents in self.renderer.rendered_contents: - self.fail("The contents:\n%s\nwere not suppose to be in the " - "actual rendered contents:\n%s" % ( - contents, self.renderer.rendered_contents)) + self.fail( + "The contents:\n%s\nwere not suppose to be in the " + "actual rendered contents:\n%s" + % (contents, self.renderer.rendered_contents) + ) def assert_text_order(self, *args, **kwargs): # First we need to find where the SYNOPSIS section starts. @@ -309,11 +318,15 @@ def assert_text_order(self, *args, **kwargs): previous = arg_indices[0] for i, index in enumerate(arg_indices[1:], 1): if index == -1: - self.fail('The string %r was not found in the contents: %s' - % (args[index], contents)) + self.fail( + 'The string %r was not found in the contents: %s' + % (args[index], contents) + ) if index < previous: - self.fail('The string %r came before %r, but was suppose to come ' - 'after it.\n%s' % (args[i], args[i - 1], contents)) + self.fail( + 'The string %r came before %r, but was suppose to come ' + 'after it.\n%s' % (args[i], args[i - 1], contents) + ) previous = index @@ -388,7 +401,9 @@ def setUp(self): self.http_response = AWSResponse(None, 200, {}, None) self.error_http_response = AWSResponse(None, 400, {}, None) self.parsed_response = {} - self.make_request_patch = mock.patch('botocore.endpoint.Endpoint.make_request') + self.make_request_patch = mock.patch( + 'botocore.endpoint.Endpoint.make_request' + ) self.make_request_is_patched = False self.operations_called = [] self.parsed_responses = None @@ -424,7 +439,10 @@ def patch_make_request(self): if self.parsed_responses is not None: make_request_patch.side_effect = self._request_patch_side_effect else: - make_request_patch.return_value = (self.http_response, self.parsed_response) + make_request_patch.return_value = ( + self.http_response, + self.parsed_response, + ) self.make_request_is_patched = True def _request_patch_side_effect(self, *args, **kwargs): @@ -436,8 +454,14 @@ def _request_patch_side_effect(self, *args, **kwargs): http_response = self.error_http_response return http_response, parsed_response - def assert_params_for_cmd(self, cmd, params=None, expected_rc=0, - stderr_contains=None, ignore_params=None): + def assert_params_for_cmd( + self, + cmd, + params=None, + expected_rc=0, + stderr_contains=None, + ignore_params=None, + ): stdout, stderr, rc = self.run_cmd(cmd, expected_rc) if stderr_contains is not None: self.assertIn(stderr_contains, stderr) @@ -451,11 +475,12 @@ def assert_params_for_cmd(self, cmd, params=None, expected_rc=0, except KeyError: pass if params != last_kwargs: - self.fail("Actual params did not match expected params.\n" - "Expected:\n\n" - "%s\n" - "Actual:\n\n%s\n" % ( - pformat(params), pformat(last_kwargs))) + self.fail( + "Actual params did not match expected params.\n" + "Expected:\n\n" + "%s\n" + "Actual:\n\n%s\n" % (pformat(params), pformat(last_kwargs)) + ) return stdout, stderr, rc def before_parameter_build(self, params, model, **kwargs): @@ -468,7 +493,8 @@ def run_cmd(self, cmd, expected_rc=0): event_emitter = self.driver.session.get_component('event_emitter') event_emitter.register('before-call', self.before_call) event_emitter.register_first( - 'before-parameter-build.*.*', self.before_parameter_build) + 'before-parameter-build.*.*', self.before_parameter_build + ) if not isinstance(cmd, list): cmdlist = cmd.split() else: @@ -478,10 +504,11 @@ def run_cmd(self, cmd, expected_rc=0): stderr = captured.stderr.getvalue() stdout = captured.stdout.getvalue() self.assertEqual( - rc, expected_rc, + rc, + expected_rc, "Unexpected rc (expected: %s, actual: %s) for command: %s\n" - "stdout:\n%sstderr:\n%s" % ( - expected_rc, rc, cmd, stdout, stderr)) + "stdout:\n%sstderr:\n%s" % (expected_rc, rc, cmd, stdout, stderr), + ) return stdout, stderr, rc @@ -492,7 +519,7 @@ def setUp(self): 'AWS_DEFAULT_REGION': 'us-east-1', 'AWS_ACCESS_KEY_ID': 'access_key', 'AWS_SECRET_ACCESS_KEY': 'secret_key', - 'AWS_CONFIG_FILE': '' + 'AWS_CONFIG_FILE': '', } self.environ_patch = mock.patch('os.environ', self.environ) self.environ_patch.start() @@ -502,7 +529,6 @@ def setUp(self): self.driver = create_clidriver() self.entry_point = awscli.clidriver.AWSCLIEntryPoint(self.driver) - def tearDown(self): self.environ_patch.stop() if self.send_is_patched: @@ -514,9 +540,9 @@ def patch_send(self, status_code=200, headers={}, content=b''): self.send_patch.stop() self.send_is_patched = False send_patch = self.send_patch.start() - send_patch.return_value = mock.Mock(status_code=status_code, - headers=headers, - content=content) + send_patch.return_value = mock.Mock( + status_code=status_code, headers=headers, content=content + ) self.send_is_patched = True def run_cmd(self, cmd, expected_rc=0): @@ -532,10 +558,11 @@ def run_cmd(self, cmd, expected_rc=0): stderr = captured.stderr.getvalue() stdout = captured.stdout.getvalue() self.assertEqual( - rc, expected_rc, + rc, + expected_rc, "Unexpected rc (expected: %s, actual: %s) for command: %s\n" - "stdout:\n%sstderr:\n%s" % ( - expected_rc, rc, cmd, stdout, stderr)) + "stdout:\n%sstderr:\n%s" % (expected_rc, rc, cmd, stdout, stderr), + ) return stdout, stderr, rc @@ -547,8 +574,9 @@ def remove_all(self): if os.path.exists(self.rootdir): shutil.rmtree(self.rootdir) - def create_file(self, filename, contents, mtime=None, mode='w', - encoding=None): + def create_file( + self, filename, contents, mtime=None, mode='w', encoding=None + ): """Creates a file in a tmpdir ``filename`` should be a relative path, e.g. "foo/bar/baz.txt" @@ -638,8 +666,14 @@ def _escape_quotes(command): return command -def aws(command, collect_memory=False, env_vars=None, - wait_for_finish=True, input_data=None, input_file=None): +def aws( + command, + collect_memory=False, + env_vars=None, + wait_for_finish=True, + input_data=None, + input_file=None, +): """Run an aws command. This help function abstracts the differences of running the "aws" @@ -687,8 +721,14 @@ def aws(command, collect_memory=False, env_vars=None, env = env_vars if input_file is None: input_file = PIPE - process = Popen(full_command, stdout=PIPE, stderr=PIPE, stdin=input_file, - shell=True, env=env) + process = Popen( + full_command, + stdout=PIPE, + stderr=PIPE, + stdin=input_file, + shell=True, + env=env, + ) if not wait_for_finish: return process memory = None @@ -699,10 +739,12 @@ def aws(command, collect_memory=False, env_vars=None, stdout, stderr = process.communicate(**kwargs) else: stdout, stderr, memory = _wait_and_collect_mem(process) - return Result(process.returncode, - stdout.decode(stdout_encoding), - stderr.decode(stdout_encoding), - memory) + return Result( + process.returncode, + stdout.decode(stdout_encoding), + stderr.decode(stdout_encoding), + memory, + ) def get_stdout_encoding(): @@ -720,8 +762,9 @@ def _wait_and_collect_mem(process): get_memory = _get_memory_with_ps else: raise ValueError( - "Can't collect memory for process on platform %s." % - platform.system()) + "Can't collect memory for process on platform %s." + % platform.system() + ) memory = [] while process.poll() is None: try: @@ -758,6 +801,7 @@ class BaseS3CLICommand(unittest.TestCase): and more streamlined. """ + _PUT_HEAD_SHARED_EXTRAS = [ 'SSECustomerAlgorithm', 'SSECustomerKey', @@ -803,8 +847,10 @@ def assert_key_contents_equal(self, bucket, key, expected_contents): # without necessarily printing the actual contents. self.assertEqual(len(actual_contents), len(expected_contents)) if actual_contents != expected_contents: - self.fail("Contents for %s/%s do not match (but they " - "have the same length)" % (bucket, key)) + self.fail( + "Contents for %s/%s do not match (but they " + "have the same length)" % (bucket, key) + ) def delete_public_access_block(self, bucket_name): client = self.create_client_for_bucket(bucket_name) @@ -825,10 +871,7 @@ def create_bucket(self, name=None, region=None): def put_object(self, bucket_name, key_name, contents='', extra_args=None): client = self.create_client_for_bucket(bucket_name) - call_args = { - 'Bucket': bucket_name, - 'Key': key_name, 'Body': contents - } + call_args = {'Bucket': bucket_name, 'Key': key_name, 'Body': contents} if extra_args is not None: call_args.update(extra_args) response = client.put_object(**call_args) @@ -836,7 +879,8 @@ def put_object(self, bucket_name, key_name, contents='', extra_args=None): extra_head_params = {} if extra_args: extra_head_params = dict( - (k, v) for (k, v) in extra_args.items() + (k, v) + for (k, v) in extra_args.items() if k in self._PUT_HEAD_SHARED_EXTRAS ) self.wait_until_key_exists( @@ -893,7 +937,8 @@ def wait_bucket_exists(self, bucket_name, min_successes=3): client = self.create_client_for_bucket(bucket_name) waiter = client.get_waiter('bucket_exists') consistency_waiter = ConsistencyWaiter( - min_successes=min_successes, delay_initial_poll=True) + min_successes=min_successes, delay_initial_poll=True + ) consistency_waiter.wait( lambda: waiter.wait(Bucket=bucket_name) is None ) @@ -911,7 +956,8 @@ def bucket_not_exists(self, bucket_name): def key_exists(self, bucket_name, key_name, min_successes=3): try: self.wait_until_key_exists( - bucket_name, key_name, min_successes=min_successes) + bucket_name, key_name, min_successes=min_successes + ) return True except (ClientError, WaiterError): return False @@ -919,7 +965,8 @@ def key_exists(self, bucket_name, key_name, min_successes=3): def key_not_exists(self, bucket_name, key_name, min_successes=3): try: self.wait_until_key_not_exists( - bucket_name, key_name, min_successes=min_successes) + bucket_name, key_name, min_successes=min_successes + ) return True except (ClientError, WaiterError): return False @@ -937,18 +984,28 @@ def head_object(self, bucket_name, key_name): response = client.head_object(Bucket=bucket_name, Key=key_name) return response - def wait_until_key_exists(self, bucket_name, key_name, extra_params=None, - min_successes=3): - self._wait_for_key(bucket_name, key_name, extra_params, - min_successes, exists=True) + def wait_until_key_exists( + self, bucket_name, key_name, extra_params=None, min_successes=3 + ): + self._wait_for_key( + bucket_name, key_name, extra_params, min_successes, exists=True + ) - def wait_until_key_not_exists(self, bucket_name, key_name, extra_params=None, - min_successes=3): - self._wait_for_key(bucket_name, key_name, extra_params, - min_successes, exists=False) + def wait_until_key_not_exists( + self, bucket_name, key_name, extra_params=None, min_successes=3 + ): + self._wait_for_key( + bucket_name, key_name, extra_params, min_successes, exists=False + ) - def _wait_for_key(self, bucket_name, key_name, extra_params=None, - min_successes=3, exists=True): + def _wait_for_key( + self, + bucket_name, + key_name, + extra_params=None, + min_successes=3, + exists=True, + ): client = self.create_client_for_bucket(bucket_name) if exists: waiter = client.get_waiter('object_exists') @@ -962,8 +1019,10 @@ def _wait_for_key(self, bucket_name, key_name, extra_params=None, def assert_no_errors(self, p): self.assertEqual( - p.rc, 0, - "Non zero rc (%s) received: %s" % (p.rc, p.stdout + p.stderr)) + p.rc, + 0, + "Non zero rc (%s) received: %s" % (p.rc, p.stdout + p.stderr), + ) self.assertNotIn("Error:", p.stderr) self.assertNotIn("failed:", p.stderr) self.assertNotIn("client error", p.stderr) @@ -1010,8 +1069,14 @@ class ConsistencyWaiter(object): :param delay: The number of seconds to delay the next API call after a failed check call. Default of 5 seconds. """ - def __init__(self, min_successes=1, max_attempts=20, delay=5, - delay_initial_poll=False): + + def __init__( + self, + min_successes=1, + max_attempts=20, + delay=5, + delay_initial_poll=False, + ): self.min_successes = min_successes self.max_attempts = max_attempts self.delay = delay diff --git a/awscli/text.py b/awscli/text.py index a5bd0090829e..0ce5af91b4a4 100644 --- a/awscli/text.py +++ b/awscli/text.py @@ -34,15 +34,18 @@ def _format_list(item, identifier, stream): if any(isinstance(el, dict) for el in item): all_keys = _all_scalar_keys(item) for element in item: - _format_text(element, stream=stream, identifier=identifier, - scalar_keys=all_keys) + _format_text( + element, + stream=stream, + identifier=identifier, + scalar_keys=all_keys, + ) elif any(isinstance(el, list) for el in item): scalar_elements, non_scalars = _partition_list(item) if scalar_elements: _format_scalar_list(scalar_elements, identifier, stream) for non_scalar in non_scalars: - _format_text(non_scalar, stream=stream, - identifier=identifier) + _format_text(non_scalar, stream=stream, identifier=identifier) else: _format_scalar_list(item, identifier, stream) @@ -61,8 +64,7 @@ def _partition_list(item): def _format_scalar_list(elements, identifier, stream): if identifier is not None: for item in elements: - stream.write('%s\t%s\n' % (identifier.upper(), - item)) + stream.write('%s\t%s\n' % (identifier.upper(), item)) else: # For a bare list, just print the contents. stream.write('\t'.join([str(item) for item in elements])) @@ -77,8 +79,7 @@ def _format_dict(scalar_keys, item, identifier, stream): stream.write('\t'.join(scalars)) stream.write('\n') for new_identifier, non_scalar in non_scalars: - _format_text(item=non_scalar, stream=stream, - identifier=new_identifier) + _format_text(item=non_scalar, stream=stream, identifier=new_identifier) def _all_scalar_keys(list_of_dicts): diff --git a/awscli/topictags.py b/awscli/topictags.py index 93d281b8add2..6372f476c689 100644 --- a/awscli/topictags.py +++ b/awscli/topictags.py @@ -19,8 +19,9 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -import os import json +import os + import docutils.core @@ -67,19 +68,25 @@ class TopicTagDB(object): that all tag values for a specific tag of a specific topic are unique. """ - VALID_TAGS = ['category', 'description', 'title', 'related topic', - 'related command'] + VALID_TAGS = [ + 'category', + 'description', + 'title', + 'related topic', + 'related command', + ] # The default directory to look for topics. TOPIC_DIR = os.path.join( - os.path.dirname( - os.path.abspath(__file__)), 'topics') + os.path.dirname(os.path.abspath(__file__)), 'topics' + ) # The default JSON index to load. JSON_INDEX = os.path.join(TOPIC_DIR, 'topic-tags.json') - def __init__(self, tag_dictionary=None, index_file=JSON_INDEX, - topic_dir=TOPIC_DIR): + def __init__( + self, tag_dictionary=None, index_file=JSON_INDEX, topic_dir=TOPIC_DIR + ): """ :param index_file: The path to a specific JSON index to load. If nothing is specified it will default to the default JSON @@ -164,7 +171,8 @@ def scan(self, topic_files): topic_content = f.read() # Record the tags and the values self._add_tag_and_values_from_content( - topic_name, topic_content) + topic_name, topic_content + ) def _find_topic_name(self, topic_src_file): # Get the name of each of these files @@ -259,9 +267,9 @@ def query(self, tag, values=None): # no value constraints are provided or if the tag value # falls in the allowed tag values. if values is None or tag_value in values: - self._add_key_values(query_dict, - key=tag_value, - values=[topic_name]) + self._add_key_values( + query_dict, key=tag_value, values=[topic_name] + ) return query_dict def get_tag_value(self, topic_name, tag, default_value=None): diff --git a/awscli/utils.py b/awscli/utils.py index c8424bba997b..98d25a123a9f 100644 --- a/awscli/utils.py +++ b/awscli/utils.py @@ -10,24 +10,28 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import contextlib import csv -import signal import datetime -import contextlib +import logging import os import re +import signal import sys -from subprocess import Popen, PIPE -import logging +from subprocess import PIPE, Popen -from awscli.compat import get_stdout_text_writer -from awscli.compat import get_popen_kwargs_for_pager_cmd -from awscli.compat import StringIO -from botocore.useragent import UserAgentComponent -from botocore.utils import resolve_imds_endpoint_mode -from botocore.utils import IMDSFetcher -from botocore.utils import BadIMDSRequestError +from awscli.compat import ( + StringIO, + get_popen_kwargs_for_pager_cmd, + get_stdout_text_writer, +) from botocore.configprovider import BaseProvider +from botocore.useragent import UserAgentComponent +from botocore.utils import ( + BadIMDSRequestError, + IMDSFetcher, + resolve_imds_endpoint_mode, +) logger = logging.getLogger(__name__) @@ -128,12 +132,15 @@ def _get_fetcher(self): def _create_fetcher(self): metadata_timeout = self._session.get_config_variable( - 'metadata_service_timeout') + 'metadata_service_timeout' + ) metadata_num_attempts = self._session.get_config_variable( - 'metadata_service_num_attempts') + 'metadata_service_num_attempts' + ) imds_config = { 'ec2_metadata_service_endpoint': self._session.get_config_variable( - 'ec2_metadata_service_endpoint'), + 'ec2_metadata_service_endpoint' + ), 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode( self._session ), @@ -175,13 +182,14 @@ def retrieve_region(self): logger.debug( "Max number of attempts exceeded (%s) when " "attempting to retrieve data from metadata service.", - self._num_attempts + self._num_attempts, ) except BadIMDSRequestError as e: logger.debug( "Failed to retrieve a region from IMDS. " "Region detection may not be supported from this endpoint: " - "%s", e.request.url + "%s", + e.request.url, ) return None @@ -190,7 +198,7 @@ def _get_region(self): response = self._get_request( url_path=self._URL_PATH, retry_func=self._default_retry, - token=token + token=token, ) availability_zone = response.text region = availability_zone[:-1] @@ -229,16 +237,19 @@ def _split_with_quotes(value): # Find an opening list bracket list_start = part.find('=[') - if list_start >= 0 and value.find(']') != -1 and \ - (quote_char is None or part.find(quote_char) > list_start): + if ( + list_start >= 0 + and value.find(']') != -1 + and (quote_char is None or part.find(quote_char) > list_start) + ): # This is a list, eat all the items until the end if ']' in part: # Short circuit for only one item new_chunk = part else: new_chunk = _eat_items(value, iter_parts, part, ']') - list_items = _split_with_quotes(new_chunk[list_start + 2:-1]) - new_chunk = new_chunk[:list_start + 1] + ','.join(list_items) + list_items = _split_with_quotes(new_chunk[list_start + 2 : -1]) + new_chunk = new_chunk[: list_start + 1] + ','.join(list_items) new_parts.append(new_chunk) continue elif quote_char is None: @@ -334,8 +345,11 @@ def is_document_type_container(shape): def is_streaming_blob_type(shape): """Check if the shape is a streaming blob type.""" - return (shape and shape.type_name == 'blob' and - shape.serialization.get('streaming', False)) + return ( + shape + and shape.type_name == 'blob' + and shape.serialization.get('streaming', False) + ) def is_tagged_union_type(shape): @@ -373,8 +387,7 @@ def ignore_ctrl_c(): def emit_top_level_args_parsed_event(session, args): - session.emit( - 'top-level-args-parsed', parsed_args=args, session=session) + session.emit('top-level-args-parsed', parsed_args=args, session=session) def is_a_tty(): @@ -392,8 +405,9 @@ def is_stdin_a_tty(): class OutputStreamFactory(object): - def __init__(self, session, popen=None, environ=None, - default_less_flags='FRX'): + def __init__( + self, session, popen=None, environ=None, default_less_flags='FRX' + ): self._session = session self._popen = popen if popen is None: @@ -537,12 +551,14 @@ def _do_shape_visit(self, shape, visitor): class BaseShapeVisitor(object): """Visit shape encountered by ShapeWalker""" + def visit_shape(self, shape): pass class ShapeRecordingVisitor(BaseShapeVisitor): """Record shapes visited by ShapeWalker""" + def __init__(self): self.visited = [] @@ -558,12 +574,13 @@ def add_component_to_user_agent_extra(session, component): def add_metadata_component_to_user_agent_extra(session, name, value=None): add_component_to_user_agent_extra( - session, - UserAgentComponent("md", name, value) + session, UserAgentComponent("md", name, value) ) def add_command_lineage_to_user_agent_extra(session, lineage): # Only add a command lineage if one is not already present in the user agent extra. if not re.search(r'md\/command#[\w\.]*', session.user_agent_extra): - add_metadata_component_to_user_agent_extra(session, "command", ".".join(lineage)) + add_metadata_component_to_user_agent_extra( + session, "command", ".".join(lineage) + ) diff --git a/backends/build_system/__main__.py b/backends/build_system/__main__.py index 2e8f8ceddbb9..bb3285d3fe59 100644 --- a/backends/build_system/__main__.py +++ b/backends/build_system/__main__.py @@ -13,19 +13,14 @@ import argparse import os import shutil + from awscli_venv import AwsCliVenv -from constants import ( - ArtifactType, - BUILD_DIR, - INSTALL_DIRNAME, -) from exe import ExeBuilder -from install import ( - Installer, - Uninstaller, -) +from install import Installer, Uninstaller from validate_env import validate_env +from constants import BUILD_DIR, INSTALL_DIRNAME, ArtifactType + def create_exe(aws_venv, build_dir): exe_workspace = os.path.join(build_dir, "exe") diff --git a/backends/build_system/awscli_venv.py b/backends/build_system/awscli_venv.py index 7997043153d6..be995bf8116e 100644 --- a/backends/build_system/awscli_venv.py +++ b/backends/build_system/awscli_venv.py @@ -10,24 +10,24 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os import json -import subprocess +import os +import pathlib import site +import subprocess import sys -import pathlib from constants import ( - ArtifactType, - DOWNLOAD_DEPS_BOOTSTRAP_LOCK, - PORTABLE_EXE_REQUIREMENTS_LOCK, - SYSTEM_SANDBOX_REQUIREMENTS_LOCK, - ROOT_DIR, - IS_WINDOWS, BIN_DIRNAME, - PYTHON_EXE_NAME, CLI_SCRIPTS, DISTRIBUTION_SOURCE_SANDBOX, + DOWNLOAD_DEPS_BOOTSTRAP_LOCK, + IS_WINDOWS, + PORTABLE_EXE_REQUIREMENTS_LOCK, + PYTHON_EXE_NAME, + ROOT_DIR, + SYSTEM_SANDBOX_REQUIREMENTS_LOCK, + ArtifactType, ) from utils import Utils @@ -138,15 +138,19 @@ def _site_packages(self) -> str: # On windows the getsitepackages can return the root venv dir. # So instead of just taking the first entry, we need to take the # first entry that contains the string "site-packages" in the path. - site_path = [path for path in json.loads( - subprocess.check_output( - [ - self.python_exe, - "-c", - "import site, json; print(json.dumps(site.getsitepackages()))", - ] + site_path = [ + path + for path in json.loads( + subprocess.check_output( + [ + self.python_exe, + "-c", + "import site, json; print(json.dumps(site.getsitepackages()))", + ] + ) + .decode() + .strip() ) - .decode() - .strip() - ) if "site-packages" in path][0] + if "site-packages" in path + ][0] return site_path diff --git a/backends/build_system/constants.py b/backends/build_system/constants.py index 7fb8bb35cc99..382027450073 100644 --- a/backends/build_system/constants.py +++ b/backends/build_system/constants.py @@ -14,7 +14,6 @@ from enum import Enum from pathlib import Path - ROOT_DIR = Path(__file__).parents[2] BUILD_DIR = ROOT_DIR / "build" @@ -34,10 +33,16 @@ REQUIREMENTS_DIR = ROOT_DIR / "requirements" BOOTSTRAP_REQUIREMENTS = REQUIREMENTS_DIR / "bootstrap.txt" DOWNLOAD_DEPS_BOOTSTRAP = REQUIREMENTS_DIR / "download-deps" / "bootstrap.txt" -DOWNLOAD_DEPS_BOOTSTRAP_LOCK = REQUIREMENTS_DIR / "download-deps" / f"bootstrap-{LOCK_SUFFIX}" +DOWNLOAD_DEPS_BOOTSTRAP_LOCK = ( + REQUIREMENTS_DIR / "download-deps" / f"bootstrap-{LOCK_SUFFIX}" +) PORTABLE_EXE_REQUIREMENTS = REQUIREMENTS_DIR / "portable-exe-extras.txt" -PORTABLE_EXE_REQUIREMENTS_LOCK = REQUIREMENTS_DIR / "download-deps" / f"portable-exe-{LOCK_SUFFIX}" -SYSTEM_SANDBOX_REQUIREMENTS_LOCK = REQUIREMENTS_DIR / "download-deps" / f"system-sandbox-{LOCK_SUFFIX}" +PORTABLE_EXE_REQUIREMENTS_LOCK = ( + REQUIREMENTS_DIR / "download-deps" / f"portable-exe-{LOCK_SUFFIX}" +) +SYSTEM_SANDBOX_REQUIREMENTS_LOCK = ( + REQUIREMENTS_DIR / "download-deps" / f"system-sandbox-{LOCK_SUFFIX}" +) # Auto-complete index AC_INDEX = ROOT_DIR / "awscli" / "data" / "ac.index" diff --git a/backends/build_system/exe.py b/backends/build_system/exe.py index 60a3d9253789..60bdb4348873 100644 --- a/backends/build_system/exe.py +++ b/backends/build_system/exe.py @@ -13,10 +13,16 @@ import os from dataclasses import dataclass, field -from constants import EXE_ASSETS_DIR, PYINSTALLER_DIR, DISTRIBUTION_SOURCE_EXE, PYINSTALLER_EXE_NAME -from utils import Utils from awscli_venv import AwsCliVenv +from constants import ( + DISTRIBUTION_SOURCE_EXE, + EXE_ASSETS_DIR, + PYINSTALLER_DIR, + PYINSTALLER_EXE_NAME, +) +from utils import Utils + @dataclass class ExeBuilder: @@ -52,12 +58,10 @@ def _update_metadata(self): distribution_source=DISTRIBUTION_SOURCE_EXE, ) for distinfo in self._utils.glob( - '**/*.dist-info', - root=self._final_dist_dir + '**/*.dist-info', root=self._final_dist_dir ): self._utils.rmtree(os.path.join(self._final_dist_dir, distinfo)) - def _ensure_no_existing_build_dir(self): if self._utils.isdir(self._dist_dir): self._utils.rmtree(self._dist_dir) diff --git a/backends/build_system/install.py b/backends/build_system/install.py index 189455d30145..ed230e2ca5f0 100644 --- a/backends/build_system/install.py +++ b/backends/build_system/install.py @@ -10,21 +10,23 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os import functools +import os -from constants import CLI_SCRIPTS -from constants import IS_WINDOWS -from constants import BIN_DIRNAME -from constants import PYTHON_EXE_NAME -from constants import ArtifactType +from constants import ( + BIN_DIRNAME, + CLI_SCRIPTS, + IS_WINDOWS, + PYTHON_EXE_NAME, + ArtifactType, +) from utils import Utils - WINDOWS_CMD_TEMPLATE = """@echo off {path} %* """ + class Uninstaller: def __init__(self, utils: Utils = None): if utils is None: @@ -36,7 +38,9 @@ def uninstall(self, install_dir: str, bin_dir: str): self._utils.rmtree(install_dir) for exe in CLI_SCRIPTS: exe_path = os.path.join(bin_dir, exe) - if self._utils.islink(exe_path) or self._utils.path_exists(exe_path): + if self._utils.islink(exe_path) or self._utils.path_exists( + exe_path + ): self._utils.remove(exe_path) @@ -78,7 +82,9 @@ def _install_executables(self, install_dir, bin_dir): def _install_executables_on_windows(self, install_dir, bin_dir): filepath = os.path.join(bin_dir, "aws.cmd") - content = WINDOWS_CMD_TEMPLATE.format(path=os.path.join(install_dir, "aws.exe")) + content = WINDOWS_CMD_TEMPLATE.format( + path=os.path.join(install_dir, "aws.exe") + ) self._utils.write_file(filepath, content) def _symlink_executables(self, install_dir, bin_dir): diff --git a/backends/build_system/utils.py b/backends/build_system/utils.py index 230307857988..b41846050add 100644 --- a/backends/build_system/utils.py +++ b/backends/build_system/utils.py @@ -10,22 +10,19 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import contextlib +import glob +import json import os import re -import sys import shlex -import glob -import json import shutil import subprocess +import sys import venv -import contextlib -from typing import List, Dict, Any, Optional, Callable - -from constants import ROOT_DIR -from constants import IS_WINDOWS -from constants import BOOTSTRAP_REQUIREMENTS +from typing import Any, Callable, Dict, List, Optional +from constants import BOOTSTRAP_REQUIREMENTS, IS_WINDOWS, ROOT_DIR PACKAGE_NAME = re.compile(r"(?P[A-Za-z][A-Za-z0-9_\.\-]+)(?P.+)") CONSTRAINT = re.compile(r"(?P[=\<\>]+)(?P.+)") @@ -47,7 +44,9 @@ def __init__(self, unmet_deps, in_venv, reason=None): f"{package} (required: {required.constraints}) " f"(version installed: {actual_version})\n" ) - pip_install_command_args.append(f'{package}{required.string_constraints()}') + pip_install_command_args.append( + f'{package}{required.string_constraints()}' + ) if reason: msg += f"\n{reason}\n" @@ -100,7 +99,9 @@ def _meets_constraint(self, version, constraint) -> bool: if not match: raise RuntimeError(f"Unknown version specifier {constraint}") comparison, constraint_version = match.group('comparison', 'version') - version, constraint_version = self._normalize(version, constraint_version) + version, constraint_version = self._normalize( + version, constraint_version + ) compare_fn = COMPARISONS.get(comparison) if not compare_fn: @@ -120,7 +121,9 @@ def _normalize(self, v1: str, v2: str): def __eq__(self, other): if other is None: return False - return (self.name == other.name and self.constraints == other.constraints) + return ( + self.name == other.name and self.constraints == other.constraints + ) def string_constraints(self): return ','.join(self.constraints) @@ -138,7 +141,7 @@ def parse_requirements(lines_list): if line.startswith('#'): continue if ' #' in line: - line = line[:line.find(' #')] + line = line[: line.find(' #')] if line.endswith('\\'): line = line[:-2].strip() try: @@ -184,17 +187,14 @@ def get_install_requires(): def get_flit_core_unmet_exception(): in_venv = sys.prefix != sys.base_prefix with open(BOOTSTRAP_REQUIREMENTS, 'r') as f: - flit_core_req = [ - l for l in f.read().split('\n') - if 'flit_core' in l - ] + flit_core_req = [l for l in f.read().split('\n') if 'flit_core' in l] return UnmetDependenciesException( [('flit_core', None, list(parse_requirements(flit_core_req))[0])], in_venv, reason=( 'flit_core is needed ahead of time in order to parse the ' 'rest of the requirements.' - ) + ), ) @@ -248,7 +248,9 @@ def copy_directory(self, src: str, dst: str): def update_metadata(self, dirname, **kwargs): print("Update metadata values %s" % kwargs) - metadata_file = os.path.join(dirname, "awscli", "data", "metadata.json") + metadata_file = os.path.join( + dirname, "awscli", "data", "metadata.json" + ) with open(metadata_file) as f: metadata = json.load(f) for key, value in kwargs.items(): @@ -261,5 +263,7 @@ def create_venv(self, name: str, with_pip: bool = True): def get_script_header(self, python_exe_path: str) -> str: if IS_WINDOWS: - return f'@echo off & "{python_exe_path}" -x "%~f0" %* & goto :eof\n' + return ( + f'@echo off & "{python_exe_path}" -x "%~f0" %* & goto :eof\n' + ) return f"#!{python_exe_path}\n" diff --git a/backends/build_system/validate_env.py b/backends/build_system/validate_env.py index 6a7b4110dc77..98e0883d7073 100644 --- a/backends/build_system/validate_env.py +++ b/backends/build_system/validate_env.py @@ -10,24 +10,21 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import importlib.metadata import re import sys from pathlib import Path -import importlib.metadata -from constants import ( - BOOTSTRAP_REQUIREMENTS, - PORTABLE_EXE_REQUIREMENTS, +from constants import BOOTSTRAP_REQUIREMENTS, PORTABLE_EXE_REQUIREMENTS +from utils import ( + UnmetDependenciesException, + get_install_requires, + parse_requirements, ) -from utils import get_install_requires, parse_requirements -from utils import UnmetDependenciesException - ROOT = Path(__file__).parents[2] PYPROJECT = ROOT / "pyproject.toml" -BUILD_REQS_RE = re.compile( - r"requires = \[([\s\S]+?)\]\s", re.MULTILINE -) +BUILD_REQS_RE = re.compile(r"requires = \[([\s\S]+?)\]\s", re.MULTILINE) EXTRACT_DEPENDENCIES_RE = re.compile(r'"(.+)"') diff --git a/backends/pep517.py b/backends/pep517.py index 4e4818007e84..b567673312d1 100644 --- a/backends/pep517.py +++ b/backends/pep517.py @@ -23,15 +23,16 @@ is that it builds the auto-complete index and injects it into the wheel built by flit prior to returning. """ -import re + +import base64 import contextlib +import glob import hashlib -import base64 import os -import glob -import tarfile +import re import shutil import sys +import tarfile import zipfile from pathlib import Path @@ -150,6 +151,7 @@ def _should_copy(path): return False return True + def read_sdist_extras(): with open(ROOT_DIR / "pyproject.toml", "r") as f: data = f.read() diff --git a/bin/aws b/bin/aws index b462ced92128..8d28af14f384 100755 --- a/bin/aws +++ b/bin/aws @@ -7,12 +7,13 @@ # http://aws.amazon.com/apache2.0/ +import os + # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import sys -import os if os.environ.get('LC_CTYPE', '') == 'UTF-8': os.environ['LC_CTYPE'] = 'en_US.UTF-8' diff --git a/bin/aws_completer b/bin/aws_completer index a7f2b1e2af41..3c480dfdc544 100755 --- a/bin/aws_completer +++ b/bin/aws_completer @@ -12,6 +12,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os + if os.environ.get('LC_CTYPE', '') == 'UTF-8': os.environ['LC_CTYPE'] = 'en_US.UTF-8' from awscli.autocomplete.main import autocomplete diff --git a/doc/source/bootstrapdocs.py b/doc/source/bootstrapdocs.py index 830071a3cf9e..39b50d8d863b 100644 --- a/doc/source/bootstrapdocs.py +++ b/doc/source/bootstrapdocs.py @@ -4,10 +4,9 @@ import sys RST_GENERATION_SCRIPT = 'htmlgen' -script_path = os.path.join(os.path.dirname(__file__), - RST_GENERATION_SCRIPT) +script_path = os.path.join(os.path.dirname(__file__), RST_GENERATION_SCRIPT) os.environ['PATH'] += ':.' -rc = subprocess.call("python "+ script_path, shell=True, env=os.environ) +rc = subprocess.call("python " + script_path, shell=True, env=os.environ) if rc != 0: sys.stderr.write("Failed to generate documentation!\n") sys.exit(2) diff --git a/doc/source/conf.py b/doc/source/conf.py index 16be235bb810..4505f306d6b9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,7 +11,8 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os +import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -23,23 +24,24 @@ # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['notfound.extension',] +extensions = [ + 'notfound.extension', +] notfound_context = { 'title': 'Page not found', 'body': '

    Page not found

    \n\n' - 'Sorry, the page you requested could not be found.' + 'Sorry, the page you requested could not be found.', } notfound_pagename = '_404' # notfound.extension changes all the relative links to links like # "/en/latest/_static/**" and we use "notfound_default_language" key # to change “en” to our path prefix notfound_default_language = os.environ.get( - 'DOCS_STATIC_PATH', - 'v2/documentation/api' + 'DOCS_STATIC_PATH', 'v2/documentation/api' ) # For local 404.html testing uncomment lines below and put in local path @@ -55,14 +57,14 @@ source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. -project = u'AWS CLI Command Reference' -copyright = u'2018, Amazon Web Services' +project = 'AWS CLI Command Reference' +copyright = '2018, Amazon Web Services' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -75,45 +77,45 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['examples'] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'guzzle_sphinx_theme.GuzzleStyle' -#pygments_style = 'sphinx' +# pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -#html_theme = 'pyramid' +# html_theme = 'pyramid' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -123,23 +125,23 @@ # } # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = ['.'] +# html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = "AWS CLI %s Command Reference" % release # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -148,50 +150,52 @@ # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { - '**': ['sidebarlogo.html', - 'localtoc.html', - 'searchbox.html', - 'feedback.html', - 'userguide.html'] + '**': [ + 'sidebarlogo.html', + 'localtoc.html', + 'searchbox.html', + 'feedback.html', + 'userguide.html', + ] } # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'aws-clidoc' @@ -219,46 +223,48 @@ } - # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'aws-cli.tex', u'AWS CLI Documentation', - u'Amazon Web Services', 'manual'), + ( + 'index', + 'aws-cli.tex', + 'AWS CLI Documentation', + 'Amazon Web Services', + 'manual', + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output -------------------------------------------- @@ -266,63 +272,122 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [('reference/index', 'aws', 'The main command', '', 1), - ('reference/autoscaling/index', 'aws-autoscaling', - 'The autoscaling service', '', 1), - ('reference/cloudformation/index', 'aws-cloudformation', - 'AWS CloudFormation', '', 1), - ('reference/cloudwatch/index', 'aws-cloudwatch', - 'Amazon CloudWatch', '', 1), - ('reference/datapipeline/index', 'aws-datapipeline', - 'AWS Data Pipeline', '', 1), - ('reference/directconnect/index', 'aws-directconnect', - 'AWS Direct Connect', '', 1), - ('reference/dynamodb/index', 'aws-dynamodb', - 'Amazon DynamoDB', '', 1), - ('reference/ec2/index', 'aws-ec2', - 'Amazon Elastic Compute Cloud', '', 1), - ('reference/elasticache/index', 'aws-elasticache', - 'Amazon ElastiCache', '', 1), - ('reference/elasticbeanstalk/index', 'aws-elasticbeanstalk', - 'AWS Elastic Beanstalk', '', 1), - ('reference/elastictranscoder/index', 'aws-elastictranscoder', - 'Amazon Elastic Transcoder', '', 1), - ('reference/elb/index', 'aws-elb', - 'Elastic Load Balancing', '', 1), - ('reference/emr/index', 'aws-emr', - 'Amazon Elastic MapReduce', '', 1), - ('reference/iam/index', 'aws-iam', - 'AWS Identity and Access Management', '', 1), - ('reference/importexport/index', 'aws-importexport', - 'AWS Import/Export', '', 1), - ('reference/opsworks/index', 'aws-opsworks', - 'AWS OpsWorks', '', 1), - ('reference/rds/index', 'aws-rds', - 'Amazon Relational Database Service', '', 1), - ('reference/redshift/index', 'aws-redshift', - 'Amazon Redshift', '', 1), - ('reference/route53/index', 'aws-route53', - 'Amazon Route 53', '', 1), - ('reference/s3/index', 'aws-s3', - 'Amazon Simple Storage Service', '', 1), - ('reference/ses/index', 'aws-ses', - 'Amazon Simple Email Service', '', 1), - ('reference/sns/index', 'aws-sns', - 'Amazon Simple Notification Service', '', 1), - ('reference/sqs/index', 'aws-sqs', - 'Amazon Simple Queue Service', '', 1), - ('reference/storagegateway/index', 'aws-storagegateway', - 'AWS Storage Gateway', '', 1), - ('reference/sts/index', 'aws-sts', - 'AWS Security Token Service', '', 1), - ('reference/support/index', 'aws-support', - 'AWS Support', '', 1), - ('reference/swf/index', 'aws-swf', - 'Amazon Simple Workflow Service', '', 1), - ] +man_pages = [ + ('reference/index', 'aws', 'The main command', '', 1), + ( + 'reference/autoscaling/index', + 'aws-autoscaling', + 'The autoscaling service', + '', + 1, + ), + ( + 'reference/cloudformation/index', + 'aws-cloudformation', + 'AWS CloudFormation', + '', + 1, + ), + ( + 'reference/cloudwatch/index', + 'aws-cloudwatch', + 'Amazon CloudWatch', + '', + 1, + ), + ( + 'reference/datapipeline/index', + 'aws-datapipeline', + 'AWS Data Pipeline', + '', + 1, + ), + ( + 'reference/directconnect/index', + 'aws-directconnect', + 'AWS Direct Connect', + '', + 1, + ), + ('reference/dynamodb/index', 'aws-dynamodb', 'Amazon DynamoDB', '', 1), + ('reference/ec2/index', 'aws-ec2', 'Amazon Elastic Compute Cloud', '', 1), + ( + 'reference/elasticache/index', + 'aws-elasticache', + 'Amazon ElastiCache', + '', + 1, + ), + ( + 'reference/elasticbeanstalk/index', + 'aws-elasticbeanstalk', + 'AWS Elastic Beanstalk', + '', + 1, + ), + ( + 'reference/elastictranscoder/index', + 'aws-elastictranscoder', + 'Amazon Elastic Transcoder', + '', + 1, + ), + ('reference/elb/index', 'aws-elb', 'Elastic Load Balancing', '', 1), + ('reference/emr/index', 'aws-emr', 'Amazon Elastic MapReduce', '', 1), + ( + 'reference/iam/index', + 'aws-iam', + 'AWS Identity and Access Management', + '', + 1, + ), + ( + 'reference/importexport/index', + 'aws-importexport', + 'AWS Import/Export', + '', + 1, + ), + ('reference/opsworks/index', 'aws-opsworks', 'AWS OpsWorks', '', 1), + ( + 'reference/rds/index', + 'aws-rds', + 'Amazon Relational Database Service', + '', + 1, + ), + ('reference/redshift/index', 'aws-redshift', 'Amazon Redshift', '', 1), + ('reference/route53/index', 'aws-route53', 'Amazon Route 53', '', 1), + ('reference/s3/index', 'aws-s3', 'Amazon Simple Storage Service', '', 1), + ('reference/ses/index', 'aws-ses', 'Amazon Simple Email Service', '', 1), + ( + 'reference/sns/index', + 'aws-sns', + 'Amazon Simple Notification Service', + '', + 1, + ), + ('reference/sqs/index', 'aws-sqs', 'Amazon Simple Queue Service', '', 1), + ( + 'reference/storagegateway/index', + 'aws-storagegateway', + 'AWS Storage Gateway', + '', + 1, + ), + ('reference/sts/index', 'aws-sts', 'AWS Security Token Service', '', 1), + ('reference/support/index', 'aws-support', 'AWS Support', '', 1), + ( + 'reference/swf/index', + 'aws-swf', + 'Amazon Simple Workflow Service', + '', + 1, + ), +] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -334,10 +399,10 @@ # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' diff --git a/doc/source/htmlgen b/doc/source/htmlgen index e2f1a63bf6ef..80a96bc78dea 100755 --- a/doc/source/htmlgen +++ b/doc/source/htmlgen @@ -1,8 +1,9 @@ #!/usr/bin/env python +import argparse +import json import os import sys -import json -import argparse + import awscli.clidriver from awscli.help import PagingHelpRenderer @@ -12,7 +13,6 @@ TOPIC_PATH = 'topic' class FileRenderer(PagingHelpRenderer): - def __init__(self, file_path): self._file_path = file_path @@ -23,8 +23,7 @@ class FileRenderer(PagingHelpRenderer): def do_operation(driver, service_path, operation_name, operation_command): - file_path = os.path.join(service_path, - operation_name + '.rst') + file_path = os.path.join(service_path, operation_name + '.rst') help_command = operation_command.create_help_command() if help_command is None: # Do not document anything that does not have a help command. @@ -34,8 +33,9 @@ def do_operation(driver, service_path, operation_name, operation_command): help_command(None, None) -def do_service(driver, ref_path, service_name, service_command, - is_top_level_service=True): +def do_service( + driver, ref_path, service_name, service_command, is_top_level_service=True +): if is_top_level_service: print('...%s' % service_name) service_path = os.path.join(ref_path, service_name) @@ -57,21 +57,24 @@ def do_service(driver, ref_path, service_name, service_command, # If the operation command has a subcommand table with commands # in it, treat it as a service command as opposed to an operation # command. - if (len(subcommand_table) > 0): - do_service(driver, service_path, operation_name, - operation_command, False) + if len(subcommand_table) > 0: + do_service( + driver, service_path, operation_name, operation_command, False + ) else: - do_operation(driver, service_path, operation_name, - operation_command) + do_operation( + driver, service_path, operation_name, operation_command + ) + def do_topic(driver, topic_path, topic_help_command): print('...%s' % topic_help_command.name) - file_path = os.path.join(topic_path, - topic_help_command.name + '.rst') + file_path = os.path.join(topic_path, topic_help_command.name + '.rst') topic_help_command.doc.target = 'html' topic_help_command.renderer = FileRenderer(file_path) topic_help_command(None, None) + def do_provider(driver): help_command = driver.create_help_command() help_command.doc.target = 'html' @@ -79,8 +82,9 @@ def do_provider(driver): help_command(None, None) topic_help_command = help_command.subcommand_table['topics'] - topic_help_command.renderer = FileRenderer(os.path.join(TOPIC_PATH, - 'index.rst')) + topic_help_command.renderer = FileRenderer( + os.path.join(TOPIC_PATH, 'index.rst') + ) topic_help_command.doc.target = 'html' help_command(['topics'], None) topics = help_command.subcommand_table @@ -118,20 +122,25 @@ def build_service_list(tut_path, ref_path, driver): for full_name, service_name in l: service_ref_path = os.path.join(ref_path, service_name) service_ref_path = os.path.join(service_ref_path, 'index') - fp.write('* :doc:`%s <..%s%s>`\n' % (full_name, - os.path.sep, - service_ref_path)) + fp.write( + '* :doc:`%s <..%s%s>`\n' + % (full_name, os.path.sep, service_ref_path) + ) fp.write('\n') fp.close() if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('-s', '--service', - help='Name of service, or else all services') - parser.add_argument('-o', '--operations', - help='Name of operations, or else all operations', - nargs='*') + parser.add_argument( + '-s', '--service', help='Name of service, or else all services' + ) + parser.add_argument( + '-o', + '--operations', + help='Name of operations, or else all operations', + nargs='*', + ) args = parser.parse_args() driver = awscli.clidriver.create_clidriver() if not os.path.isdir(REF_PATH): diff --git a/exe/pyinstaller/hook-awscli.py b/exe/pyinstaller/hook-awscli.py index 617b71d927ed..5e53e45b233a 100644 --- a/exe/pyinstaller/hook-awscli.py +++ b/exe/pyinstaller/hook-awscli.py @@ -1,6 +1,5 @@ from PyInstaller.utils import hooks - hiddenimports = [ 'docutils', 'urllib', @@ -14,19 +13,17 @@ # NOTE: This can be removed once this hidden import issue related to # setuptools and PyInstaller is resolved: # https://github.com/pypa/setuptools/issues/1963 - 'pkg_resources.py2_warn' + 'pkg_resources.py2_warn', ] -imports_for_legacy_plugins = ( - hooks.collect_submodules('http') + - hooks.collect_submodules('logging') -) +imports_for_legacy_plugins = hooks.collect_submodules( + 'http' +) + hooks.collect_submodules('logging') hiddenimports += imports_for_legacy_plugins -alias_packages_plugins = ( - hooks.collect_submodules('awscli.botocore') + - hooks.collect_submodules('awscli.s3transfer') -) +alias_packages_plugins = hooks.collect_submodules( + 'awscli.botocore' +) + hooks.collect_submodules('awscli.s3transfer') hiddenimports += alias_packages_plugins datas = hooks.collect_data_files('awscli') diff --git a/exe/tests/README.rst b/exe/tests/README.rst index 6d0f38d5efb0..2c35693f570b 100644 --- a/exe/tests/README.rst +++ b/exe/tests/README.rst @@ -33,4 +33,3 @@ test file:: $ shellcheck ../assets/install $ shellcheck install.bats - diff --git a/proposals/assets/contribution-guide/contribution-guide-flowchart.xml b/proposals/assets/contribution-guide/contribution-guide-flowchart.xml index e5b23af4802f..218ed54a949d 100644 --- a/proposals/assets/contribution-guide/contribution-guide-flowchart.xml +++ b/proposals/assets/contribution-guide/contribution-guide-flowchart.xml @@ -1,2 +1,2 @@ -5V1bd5s4EP41ebQPIC7mMU2TNG3S9XGa7WZf9shGsakxcgHbcX79ChBXyTK2uThpzmkKg7hpvhl9mhmRC3C1eL314HL2gC3kXCiS9XoBPl8oCgDygPwXSraxRDZ1EEumnm1RWSZ4tN8QFUpUurIt5BcaBhg7gb0sCifYddEkKMig5+FNsdkLdop3XcIpYgSPE+iw0p+2Fcxi6UCTMvkXZE9nyZ1liR5ZwKQxFfgzaOFNTgSuL8CVh3EQby1er5AT9l7SL/F5NzuOpg/mITeocsIP9+vd0+jfx9727/m3H89o7Hyf9tKHC7bJGyOLdADdxV4ww1PsQuc6k37y8Mq1UHhZiexlbe4xXhKhTIS/UBBsqTbhKsBENAsWDj2KXu3gn/D0vkb3nunFwu3Pr/mdbbLjBt42d1K4+5w/lp0W7RXOGyLPXqAAeVTI9h3tzvDlcwLak7cIk7O9LWngIQcG9rqIDkhBNk3bpacOsU1uoUjUInogQc62LEgu4uOVN0H0vEyfZCP3IJko0jJf4xv1H3A3BZJyj+/mcH5z9fbrskcfeQ2dFX0JBgH+xl440A1V7QfQC6gegUkElu0RM7OxSwRuqHpRb66RF6BXYX8m/SJLaqljZEmnkk1meHriF2Z5oxtoe/UwIk8N3Sl5xeyeg7IuZMk0mVsanDsqoHhD6BBsuTBAn0LT8E/U28vE/fT76+8vs0fFv3U/vb0tle89dcBRnO4EkU7WZHMabt65AZyjSJ2IeEjagNww14Zz2gQvlg6xjwtws+ucMkZmcBluejO8GK/8on1b0J+l/mEzswPkL+EkbL4hwwSRLXP2SK+QM1EOoIRQrowyoOoFhasao21T4+BLknbjq6BngVKHb6P18/D2Yf756a+bb2/af5vRvxxjvMKLxcq1g/CpoWtFAwnxBOQf6Rkyjtr+ZOX7of2VNZJ5ZTnp9cdir+c0BMc+dlYBuvQm1LojabanRsbv4Xk67CkNKkY2jX2KSZXQjmZUwKjmO+5krDx23DtijD1MvfFAVcFlkTFkioL9ZtD04KvKRX+vNDbyct9y0C18CuDJsLQDPsdDQaThPBKEkGkaCqDENswSg4gftDEoyLzBvBoLU6VWWdigMgkr89h3TsL4ejN3krAXHOErU6D+e4WTAz0/Ut4laSAPlq/ZwTInG6dELiRkC6JKGKk5I2Xj8ilEFt97B1Mjug+KPiEe16+wg70IPxHGXmzHKYmgY09DhE3IY0S8LESSTebGl/TAwrasyE3x2EbRdXmYvgn43DP5DkZsKftBTAGks9xB5aGnBubAd2G7MZIj1Pe2O0dWFM2ozNJv7eDLahze3/dX58vVT9VjygnNviZlP6A4dndN3RXWhV8RM/TsMRmDQ6qOl8j1wzuH3bOKLuih3yvkB4yCuqHuNekJqKCgJ3mfntpm8t3yrnTnWN4lVNNe4qVwOLjQbTXNvJQkWEMBYrTLvBR2XnemzEsDFZlXSo4+MvNS1LaY1witbbT5cxlXaiGVGZfMcfKtUi6ZUcH79/CJGvIe/vrXz4fRtX37BOTrqTQcft08DXpKRQevtOTflaJX0Rvz76LeEJPvhzSk6kcvHBu8FMxQZS4+HKWOw+P4BxEdPyO2d4CtvxO2p7Hj+zPyO/UPB0be+IHbJG1aNXBb2aeIfEXep/DTx3o7TgUAsVOpL3LL7Q5d7RZAXUT+KwOICwxgsggS+euaAXTpeXCba7AMkeEL8CUV8aXlUcS01nVBa7IR3/1YBAr7M+fWnvwoI1hOEoYj2gZ7cxpaklZLixBzsjEc9T/WUFSavrQ69gi9YU5JD4jgP6EWUkgaqMJStcS7Ewf7WbsovvfBtCUmDnKrGV+ukz+XhG9XYzmoPQB0kjpYIn/nFw2pULRSNpU01L3HTBrDuyIZ/RzcJaWAd8DCfcBB+6ApmgzYqNc7q/tLGVLlaTYnV801BL0q+e3KXrbrn5c/l8rN+tv94/PTM7p8vdV7GmMvnFlrNBWqNEntDA2HTnQqF6NwGCm/YSOMlK0ENaRiILynlzPoO0Il7KUGJe/SM0BjEySRyeTAd+mHDHQVc9SIiIb5sIjvJARnB/mJeCpbltwN+Tk8kG+a5QoKjq9vOSgC2DrDjorZEpPPu//U/PcMACfHWU9xFSLYN+4qmEyNUa54bjgAYnAGlxJ6ziNpZkgVk2ay/rGSZly17S4ZrzlnNvRQz/tz8mZCG6kOX13pl3LhLHhlnVPV0lgiTe9mWnAkxa+HQIqUuXeuIHMmCy2yykPjnHKpAjop2tzFLBVgiNo3E+uUWTYZropA0YOFtZDx9J9Y8mR2ka50kVyU1tNJY5SLDxApeRDJgeNocWLKPScT7Fk28e/bcwm11eNXSiOUynEq7cZFZbZ2o+O1FAdSyWNo6/GORuQ/9lJSua2MnKn1ZVUZaEb8G6hF0Jn95Ej4WzeK1686s5VLWG5uraSo03O4zROd1PM0FIisxxuU45CA26EtxCG5r6OxXfwAo+QVjEszoBW+y0tUUZt0/Afy1eKsCDD7Zv6nRUfO15bEaOudFVc075WFMG/cK5f8pVktoliXv0ziTOcxfTiuTqKx+DNnpsBPNRntYKUHktleEsowlcaiSvw3Zd1JU6up6LL4PyA2cUwUTS9lIkzD6HPW1zRU0itKronTWd8jGpauqpHWNqxcwEmal9ZTnXP1Zg15CFmS20xEcJOU3YwP1M8n2wclHU7mF1WzzkrF4aH2tfKnLa4yOtHoiaW1hyDhhBG/6ucP2qqi7emlMsee2VwhrfBNd0z34BraDhxHCZd4yjdJllSezxdPDnfFGpszatUV8+ur3vnnKKp+eUKpHKFvywrVvVbY8DRN5QXY83YmjWjc5S4kRuwEP1lQPtk6NsGDt9/6xjFw7sepAE7m0whOf60CcpXEIOuwN6Cz9gY4hJaXpTWN3fo8bb04m0MvrhenNDYqcgxzGLSIG5eLZt6pEwRGaZJxBnUxCu9TDe07xROcm1GVYtT+XZ3T8szHf/8GGDUBUlUYL8H51o3Mm/Uy32ysr2Oa+sBM3sH3aFj9oMhH1XBHDtfhZanWFDXZpxeWRYEQJmTC6lsMq8ooqLrsQG0spK59OBomjIDvD5W3RcO0Mg3TmvsIGD/JcPynKEBdozLjBHkFdFwfqDZmEkZTn3oYcbOJglXcH8UvGgevXlFLZK1rN6mzU5a96/p95IRfBC+sGeKp/iwDvs3oUTY7jztorCaTWWa1dH+rk8+a1FCakKpyX82l9QftTU35xgWYTv7waVuhYvezmapBJb12NnOaprsN17e2gkPn6Kedzw/sUA/Zzf7SQ0wgsz+YAa7/Bw== \ No newline at end of file +5V1bd5s4EP41ebQPIC7mMU2TNG3S9XGa7WZf9shGsakxcgHbcX79ChBXyTK2uThpzmkKg7hpvhl9mhmRC3C1eL314HL2gC3kXCiS9XoBPl8oCgDygPwXSraxRDZ1EEumnm1RWSZ4tN8QFUpUurIt5BcaBhg7gb0sCifYddEkKMig5+FNsdkLdop3XcIpYgSPE+iw0p+2Fcxi6UCTMvkXZE9nyZ1liR5ZwKQxFfgzaOFNTgSuL8CVh3EQby1er5AT9l7SL/F5NzuOpg/mITeocsIP9+vd0+jfx9727/m3H89o7Hyf9tKHC7bJGyOLdADdxV4ww1PsQuc6k37y8Mq1UHhZiexlbe4xXhKhTIS/UBBsqTbhKsBENAsWDj2KXu3gn/D0vkb3nunFwu3Pr/mdbbLjBt42d1K4+5w/lp0W7RXOGyLPXqAAeVTI9h3tzvDlcwLak7cIk7O9LWngIQcG9rqIDkhBNk3bpacOsU1uoUjUInogQc62LEgu4uOVN0H0vEyfZCP3IJko0jJf4xv1H3A3BZJyj+/mcH5z9fbrskcfeQ2dFX0JBgH+xl440A1V7QfQC6gegUkElu0RM7OxSwRuqHpRb66RF6BXYX8m/SJLaqljZEmnkk1meHriF2Z5oxtoe/UwIk8N3Sl5xeyeg7IuZMk0mVsanDsqoHhD6BBsuTBAn0LT8E/U28vE/fT76+8vs0fFv3U/vb0tle89dcBRnO4EkU7WZHMabt65AZyjSJ2IeEjagNww14Zz2gQvlg6xjwtws+ucMkZmcBluejO8GK/8on1b0J+l/mEzswPkL+EkbL4hwwSRLXP2SK+QM1EOoIRQrowyoOoFhasao21T4+BLknbjq6BngVKHb6P18/D2Yf756a+bb2/af5vRvxxjvMKLxcq1g/CpoWtFAwnxBOQf6Rkyjtr+ZOX7of2VNZJ5ZTnp9cdir+c0BMc+dlYBuvQm1LojabanRsbv4Xk67CkNKkY2jX2KSZXQjmZUwKjmO+5krDx23DtijD1MvfFAVcFlkTFkioL9ZtD04KvKRX+vNDbyct9y0C18CuDJsLQDPsdDQaThPBKEkGkaCqDENswSg4gftDEoyLzBvBoLU6VWWdigMgkr89h3TsL4ejN3krAXHOErU6D+e4WTAz0/Ut4laSAPlq/ZwTInG6dELiRkC6JKGKk5I2Xj8ilEFt97B1Mjug+KPiEe16+wg70IPxHGXmzHKYmgY09DhE3IY0S8LESSTebGl/TAwrasyE3x2EbRdXmYvgn43DP5DkZsKftBTAGks9xB5aGnBubAd2G7MZIj1Pe2O0dWFM2ozNJv7eDLahze3/dX58vVT9VjygnNviZlP6A4dndN3RXWhV8RM/TsMRmDQ6qOl8j1wzuH3bOKLuih3yvkB4yCuqHuNekJqKCgJ3mfntpm8t3yrnTnWN4lVNNe4qVwOLjQbTXNvJQkWEMBYrTLvBR2XnemzEsDFZlXSo4+MvNS1LaY1witbbT5cxlXaiGVGZfMcfKtUi6ZUcH79/CJGvIe/vrXz4fRtX37BOTrqTQcft08DXpKRQevtOTflaJX0Rvz76LeEJPvhzSk6kcvHBu8FMxQZS4+HKWOw+P4BxEdPyO2d4CtvxO2p7Hj+zPyO/UPB0be+IHbJG1aNXBb2aeIfEXep/DTx3o7TgUAsVOpL3LL7Q5d7RZAXUT+KwOICwxgsggS+euaAXTpeXCba7AMkeEL8CUV8aXlUcS01nVBa7IR3/1YBAr7M+fWnvwoI1hOEoYj2gZ7cxpaklZLixBzsjEc9T/WUFSavrQ69gi9YU5JD4jgP6EWUkgaqMJStcS7Ewf7WbsovvfBtCUmDnKrGV+ukz+XhG9XYzmoPQB0kjpYIn/nFw2pULRSNpU01L3HTBrDuyIZ/RzcJaWAd8DCfcBB+6ApmgzYqNc7q/tLGVLlaTYnV801BL0q+e3KXrbrn5c/l8rN+tv94/PTM7p8vdV7GmMvnFlrNBWqNEntDA2HTnQqF6NwGCm/YSOMlK0ENaRiILynlzPoO0Il7KUGJe/SM0BjEySRyeTAd+mHDHQVc9SIiIb5sIjvJARnB/mJeCpbltwN+Tk8kG+a5QoKjq9vOSgC2DrDjorZEpPPu//U/PcMACfHWU9xFSLYN+4qmEyNUa54bjgAYnAGlxJ6ziNpZkgVk2ay/rGSZly17S4ZrzlnNvRQz/tz8mZCG6kOX13pl3LhLHhlnVPV0lgiTe9mWnAkxa+HQIqUuXeuIHMmCy2yykPjnHKpAjop2tzFLBVgiNo3E+uUWTYZropA0YOFtZDx9J9Y8mR2ka50kVyU1tNJY5SLDxApeRDJgeNocWLKPScT7Fk28e/bcwm11eNXSiOUynEq7cZFZbZ2o+O1FAdSyWNo6/GORuQ/9lJSua2MnKn1ZVUZaEb8G6hF0Jn95Ej4WzeK1686s5VLWG5uraSo03O4zROd1PM0FIisxxuU45CA26EtxCG5r6OxXfwAo+QVjEszoBW+y0tUUZt0/Afy1eKsCDD7Zv6nRUfO15bEaOudFVc075WFMG/cK5f8pVktoliXv0ziTOcxfTiuTqKx+DNnpsBPNRntYKUHktleEsowlcaiSvw3Zd1JU6up6LL4PyA2cUwUTS9lIkzD6HPW1zRU0itKronTWd8jGpauqpHWNqxcwEmal9ZTnXP1Zg15CFmS20xEcJOU3YwP1M8n2wclHU7mF1WzzkrF4aH2tfKnLa4yOtHoiaW1hyDhhBG/6ucP2qqi7emlMsee2VwhrfBNd0z34BraDhxHCZd4yjdJllSezxdPDnfFGpszatUV8+ur3vnnKKp+eUKpHKFvywrVvVbY8DRN5QXY83YmjWjc5S4kRuwEP1lQPtk6NsGDt9/6xjFw7sepAE7m0whOf60CcpXEIOuwN6Cz9gY4hJaXpTWN3fo8bb04m0MvrhenNDYqcgxzGLSIG5eLZt6pEwRGaZJxBnUxCu9TDe07xROcm1GVYtT+XZ3T8szHf/8GGDUBUlUYL8H51o3Mm/Uy32ysr2Oa+sBM3sH3aFj9oMhH1XBHDtfhZanWFDXZpxeWRYEQJmTC6lsMq8ooqLrsQG0spK59OBomjIDvD5W3RcO0Mg3TmvsIGD/JcPynKEBdozLjBHkFdFwfqDZmEkZTn3oYcbOJglXcH8UvGgevXlFLZK1rN6mzU5a96/p95IRfBC+sGeKp/iwDvs3oUTY7jztorCaTWWa1dH+rk8+a1FCakKpyX82l9QftTU35xgWYTv7waVuhYvezmapBJb12NnOaprsN17e2gkPn6Kedzw/sUA/Zzf7SQ0wgsz+YAa7/Bw== diff --git a/proposals/contribution-guide.md b/proposals/contribution-guide.md index abc5437300de..ee1a53759fca 100644 --- a/proposals/contribution-guide.md +++ b/proposals/contribution-guide.md @@ -48,7 +48,7 @@ GitHub issues are triaged regularly to determine that they are correctly categorized and express a real and relevant problem or request. An answer is provided as soon as possible to acknowledge or resolve the issue. Feature requests are reviewed for general suitability and uniqueness. Users can vote for -features via "reactions" on the issue. +features via "reactions" on the issue. #### Implementation stage @@ -62,7 +62,7 @@ is provided to the contributor to improve the proposed change. Pull requests are selected for review opportunistically when the maintainers have decided that a change or feature should be incorporated. When a feature is selected for review it is added to an internal queue. This queue manages the -prioritization of these features but is not visible to the community. +prioritization of these features but is not visible to the community. Pull requests are subjected to automated tests and checks which provide preliminary feedback. Once a pull request passes all automated tests and checks, @@ -252,7 +252,7 @@ following criteria in descending order of importance: A maintainer must select the issue from the queue with the current highest priority. The prioritization of issues in the queue is reviewed on a regular cadence and is ultimately decided based on the maintainers' discretion. See the -[rationale](#rationale-reprioritize) for further discussion. +[rationale](#rationale-reprioritize) for further discussion. ### Review stage @@ -305,7 +305,7 @@ contribution. The ready for review lane is the prioritization queue, and issues will be ordered in decreasing priority from top to bottom. [Figure 2](#figure-2) demonstrates an example project with contributions in -various stages of completion. +various stages of completion. ## Managing the existing backlog @@ -361,7 +361,7 @@ Pull requests are often made for problems that only affect the contributor or a small portion of the user base. An issue provides a mechanism to gather quantitative feedback in the form of "upvotes" through GitHub reactions to estimate the impact of the issue on the community. - + We intend for issues to be a way to improve contributor confidence in both their contributions and the overall process. Draft pull requests can be used to demonstrate a potential implementation and get community feedback or interest @@ -478,7 +478,7 @@ include (but are not limited to): also affect the behavior in other SDKs and must thus be made in coordination with internal teams. -### Preliminary review criteria +### Preliminary review criteria The maintainers will use a set of criteria to move a pull request from the implementation to the ready for review stage, which may include (but are not diff --git a/pyproject.toml b/pyproject.toml index 671e41b2af59..bff2f7b73bdf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -134,3 +134,70 @@ markers = [ [tool.black] line-length = 80 + +[tool.isort] +profile = "black" +line_length = 79 +honor_noqa = true +src_paths = ["awscli", "tests"] + +[tool.ruff] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Format same as Black. +line-length = 79 +indent-width = 4 + +target-version = "py38" + +[tool.ruff.lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = ["E4", "E7", "E9", "F", "UP"] +ignore = ["F401"] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.format] +# Like Black, use double quotes for strings, spaces for indents +# and trailing commas. +quote-style = "preserve" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" + +docstring-code-format = false +docstring-code-line-length = "dynamic" diff --git a/requirements-build.txt b/requirements-build.txt index 0ef53f656530..6bf6d5b21336 100644 --- a/requirements-build.txt +++ b/requirements-build.txt @@ -1,4 +1,4 @@ # Requirements we need to run our build jobs for the installers. # We create the separation for cases where we're doing installation # from a local dependency directory instead of requirements.txt. -PyInstaller==5.13.2 \ No newline at end of file +PyInstaller==5.13.2 diff --git a/requirements.txt b/requirements.txt index dfd867f42c69..60285a2d7faa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ --r requirements-dev.txt \ No newline at end of file +-r requirements-dev.txt diff --git a/requirements/bootstrap.txt b/requirements/bootstrap.txt index fcc7ca5345f4..31ab5e26f67c 100644 --- a/requirements/bootstrap.txt +++ b/requirements/bootstrap.txt @@ -1,2 +1,2 @@ pip>=22.0.0,<25.0.0 -flit_core>=3.7.1,<3.9.1 \ No newline at end of file +flit_core>=3.7.1,<3.9.1 diff --git a/scripts/ci/install b/scripts/ci/install index 0d2e41906fa2..5f1627d7762e 100755 --- a/scripts/ci/install +++ b/scripts/ci/install @@ -1,9 +1,9 @@ #!/usr/bin/env python +import glob import os +import shutil import sys -import glob from subprocess import check_call -import shutil _dname = os.path.dirname @@ -14,6 +14,7 @@ os.chdir(REPO_ROOT) def run(command): return check_call(command, shell=True) + if sys.version_info[:2] >= (3, 12): # Python 3.12+ no longer includes setuptools by default. diff --git a/scripts/ci/install-benchmark b/scripts/ci/install-benchmark index 70c82f26d074..5ec17234fea2 100755 --- a/scripts/ci/install-benchmark +++ b/scripts/ci/install-benchmark @@ -10,33 +10,37 @@ perf testing. * Install dependencies """ + import os import shutil from subprocess import check_call - GIT_OWNER = os.environ.get('GIT_OWNER', 'boto') # Using PERF_BRANCH instead of GIT_BRANCH because that value # is set by jenkins to the branch that's been checked out via # git. PERF_BRANCH = os.environ.get('PERF_BRANCH', 'develop') REPO_ROOT = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir')) def clone_s3_transfer_repo(): if os.path.isdir('s3transfer'): shutil.rmtree('s3transfer') - check_call('git clone https://github.com/%s/s3transfer.git' % GIT_OWNER, - shell=True) + check_call( + 'git clone https://github.com/%s/s3transfer.git' % GIT_OWNER, + shell=True, + ) check_call('cd s3transfer && git checkout %s' % PERF_BRANCH, shell=True) def pip_install_s3transfer_and_deps(): check_call('cd s3transfer && pip install -e .', shell=True) - check_call('cd s3transfer && pip install -r requirements-dev.txt', - shell=True) + check_call( + 'cd s3transfer && pip install -r requirements-dev.txt', shell=True + ) check_call('pip install "caf>=0.1.0,<1.0.0"', shell=True) check_call('cd %s && pip install -e .' % REPO_ROOT, shell=True) diff --git a/scripts/ci/install-build-system b/scripts/ci/install-build-system index 8aa9cba1dba5..a54fddf47742 100755 --- a/scripts/ci/install-build-system +++ b/scripts/ci/install-build-system @@ -1,11 +1,10 @@ #!/usr/bin/env python3 import argparse -import tarfile -import tempfile -import os import glob +import os import shutil - +import tarfile +import tempfile from pathlib import Path from subprocess import check_call diff --git a/scripts/ci/run-benchmark b/scripts/ci/run-benchmark index bee911dbcff8..fc5148418f3a 100755 --- a/scripts/ci/run-benchmark +++ b/scripts/ci/run-benchmark @@ -4,23 +4,24 @@ As of now this benchmarks `cp` and `rm` with test cases for multiple 4kb files (default 10000 files) and a single large file (default 10gb, `cp` only). """ -import os -import json -from subprocess import check_call, Popen, PIPE -from datetime import datetime -import random + import argparse import inspect -import shutil +import json +import os import platform +import random +import shutil +from datetime import datetime +from subprocess import PIPE, Popen, check_call import awscli import s3transfer - TEST_BUCKET = os.environ.get('PERF_TEST_BUCKET') REPO_ROOT = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir')) MANY_FILES_DIR = 'many' LARGE_FILE_DIR = 'large' @@ -44,14 +45,18 @@ def initialize_files(num_files, file_size): many_files_dir = os.path.join(WORKDIR, MANY_FILES_DIR) if not os.path.exists(many_files_dir): os.makedirs(many_files_dir) - run('caf gen --file-size 4kb --max-files %s --directory %s' % - (num_files, many_files_dir)) + run( + 'caf gen --file-size 4kb --max-files %s --directory %s' + % (num_files, many_files_dir) + ) large_file_dir = os.path.join(WORKDIR, LARGE_FILE_DIR) if not os.path.exists(large_file_dir): os.makedirs(large_file_dir) - run('caf gen --file-size %s --max-files 1 --directory %s' % - (file_size, large_file_dir)) + run( + 'caf gen --file-size %s --max-files 1 --directory %s' + % (file_size, large_file_dir) + ) def write_metadata_file(filename): @@ -81,12 +86,21 @@ def _inject_package_info(package, metadata): def _get_git_version(package): dname = os.path.dirname(inspect.getfile(package)) - git_sha = Popen( - 'git rev-parse HEAD', - cwd=dname, shell=True, stdout=PIPE).communicate()[0].strip() - git_branch = Popen( - 'git rev-parse --abbrev-ref HEAD', - cwd=dname, shell=True, stdout=PIPE).communicate()[0].strip() + git_sha = ( + Popen('git rev-parse HEAD', cwd=dname, shell=True, stdout=PIPE) + .communicate()[0] + .strip() + ) + git_branch = ( + Popen( + 'git rev-parse --abbrev-ref HEAD', + cwd=dname, + shell=True, + stdout=PIPE, + ) + .communicate()[0] + .strip() + ) return '%s (%s)' % (git_sha, git_branch) @@ -115,24 +129,30 @@ def benchmark(bucket, results_dir, num_iterations=1): results = os.path.join(results_dir, 'upload-10k-small') os.makedirs(results) benchmark_cp = os.path.join(perf_dir, 'benchmark-cp') - run(benchmark_cp + ' --recursive --num-iterations %s ' - '--source %s --dest %s --result-dir %s --no-cleanup' % ( - num_iterations, local_dir, s3_location, results)) + run( + benchmark_cp + ' --recursive --num-iterations %s ' + '--source %s --dest %s --result-dir %s --no-cleanup' + % (num_iterations, local_dir, s3_location, results) + ) # 10k download results = os.path.join(results_dir, 'download-10k-small') os.makedirs(results) - run(benchmark_cp + ' --recursive --num-iterations %s ' - '--source %s --dest %s --result-dir %s' % ( - num_iterations, s3_location, local_dir, results)) + run( + benchmark_cp + ' --recursive --num-iterations %s ' + '--source %s --dest %s --result-dir %s' + % (num_iterations, s3_location, local_dir, results) + ) # 10k rm results = os.path.join(results_dir, 'delete-10k-small') os.makedirs(results) benchmark_rm = os.path.join(perf_dir, 'benchmark-rm') - run(benchmark_rm + ' --recursive --num-iterations %s ' - '--target %s --result-dir %s' % ( - num_iterations, s3_location, results)) + run( + benchmark_rm + ' --recursive --num-iterations %s ' + '--target %s --result-dir %s' + % (num_iterations, s3_location, results) + ) finally: # Note that the delete-10k-small benchmark restores # the files it's deleted once the script is finished. @@ -146,16 +166,20 @@ def benchmark(bucket, results_dir, num_iterations=1): # 10gb upload results = os.path.join(results_dir, 'upload-10gb') os.makedirs(results) - run(benchmark_cp + ' --recursive --num-iterations %s ' - '--source %s --dest %s --result-dir %s --no-cleanup' % ( - num_iterations, local_dir, s3_location, results)) + run( + benchmark_cp + ' --recursive --num-iterations %s ' + '--source %s --dest %s --result-dir %s --no-cleanup' + % (num_iterations, local_dir, s3_location, results) + ) # 10gb download results = os.path.join(results_dir, 'download-10gb') os.makedirs(results) - run(benchmark_cp + ' --recursive --num-iterations %s ' - '--source %s --dest %s --result-dir %s' % ( - num_iterations, s3_location, local_dir, results)) + run( + benchmark_cp + ' --recursive --num-iterations %s ' + '--source %s --dest %s --result-dir %s' + % (num_iterations, s3_location, local_dir, results) + ) finally: # Not benchmarking a single rm call since it's just a single call run('aws s3 rm --recursive ' + s3_location) @@ -170,23 +194,32 @@ def s3_uri(value): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - '-n', '--num-iterations', type=int, default=10, - help='The number of times to run each test.' + '-n', + '--num-iterations', + type=int, + default=10, + help='The number of times to run each test.', ) parser.add_argument( - '-b', '--bucket', default=TEST_BUCKET, type=s3_uri, + '-b', + '--bucket', + default=TEST_BUCKET, + type=s3_uri, required=TEST_BUCKET is None, help='The bucket to use for testing as an s3 uri. This can also be ' - 'set by the environment variable PERF_TEST_BUCKET. If the ' - 'environment variable is not set, then this argument is required.' + 'set by the environment variable PERF_TEST_BUCKET. If the ' + 'environment variable is not set, then this argument is required.', ) parser.add_argument( - '--num-files', default=10000, type=int, - help='The number of files to use for the multiple file case.' + '--num-files', + default=10000, + type=int, + help='The number of files to use for the multiple file case.', ) parser.add_argument( - '--large-file-size', default='10gb', + '--large-file-size', + default='10gb', help='The file size for the large file case. This can be in the form ' - '10gb, 4kb, etc.' + '10gb, 4kb, etc.', ) main(parser.parse_args()) diff --git a/scripts/ci/run-build-system-tests b/scripts/ci/run-build-system-tests index 3fae903657e0..8614df8c9942 100755 --- a/scripts/ci/run-build-system-tests +++ b/scripts/ci/run-build-system-tests @@ -2,7 +2,6 @@ import argparse import os import sys - from contextlib import contextmanager from pathlib import Path from subprocess import check_call diff --git a/scripts/ci/run-tests b/scripts/ci/run-tests index 663b7f1a506e..670006a1630b 100755 --- a/scripts/ci/run-tests +++ b/scripts/ci/run-tests @@ -84,23 +84,19 @@ if __name__ == "__main__": "running tests. This allows you to run the tests against the " "current repository without have to install the package as a " "distribution." - ) + ), ) parser.add_argument( "--ignore", nargs='+', default=[], - help=( - "Ignore a test subdirectory. Can be specified multiple times." - ) + help=("Ignore a test subdirectory. Can be specified multiple times."), ) parser.add_argument( "--tests-path", default=None, type=os.path.abspath, - help=( - "Optional path to an alternate test directory to use." - ) + help=("Optional path to an alternate test directory to use."), ) raw_args = parser.parse_args() test_runner, test_args, test_dirs = process_args(raw_args) diff --git a/scripts/ci/upload-benchmark b/scripts/ci/upload-benchmark index 33f7496ffa49..bf7d689a488f 100755 --- a/scripts/ci/upload-benchmark +++ b/scripts/ci/upload-benchmark @@ -1,13 +1,14 @@ #!/usr/bin/env python """Script to upload benchmark results to an s3 location.""" -import os + import argparse +import os from datetime import datetime from subprocess import check_call - REPO_ROOT = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir')) DEFAULT_BUCKET = os.environ.get('PERF_RESULTS_BUCKET') DATE_FORMAT = "%Y-%m-%d-%H-%M-%S-" @@ -18,8 +19,8 @@ def main(args): run_id = source.split(os.sep)[-1] destination = '%s/%s' % (args.bucket, run_id) check_call( - 'aws s3 cp --recursive %s %s' % (source, destination), - shell=True) + 'aws s3 cp --recursive %s %s' % (source, destination), shell=True + ) def s3_uri(value): @@ -54,15 +55,22 @@ def _is_result_dir_format(directory): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - '-d', '--directory', default=os.path.join(WORKDIR, 'results'), + '-d', + '--directory', + default=os.path.join(WORKDIR, 'results'), help='A directory containing multiple test runs or a single test ' - 'run directory. If this is a directory with multiple test runs, ' - 'the latest will be uploaded.') + 'run directory. If this is a directory with multiple test runs, ' + 'the latest will be uploaded.', + ) parser.add_argument( - '-b', '--bucket', default=DEFAULT_BUCKET, type=s3_uri, + '-b', + '--bucket', + default=DEFAULT_BUCKET, + type=s3_uri, required=DEFAULT_BUCKET is None, help='An s3uri to upload the results to. This can also be set with ' - 'the environment variable PERF_RESULTS_BUCKET. If the ' - 'environment variable is not set, then this argument is ' - 'required.') + 'the environment variable PERF_RESULTS_BUCKET. If the ' + 'environment variable is not set, then this argument is ' + 'required.', + ) main(parser.parse_args()) diff --git a/scripts/gen-ac-index b/scripts/gen-ac-index index a4c16c1bc657..4f4df9174663 100755 --- a/scripts/gen-ac-index +++ b/scripts/gen-ac-index @@ -1,23 +1,26 @@ #!/usr/bin/env python -"""Generate the index used for the new auto-completion. +"""Generate the index used for the new auto-completion.""" -""" -import os import argparse +import os -from awscli.autocomplete import db -from awscli.autocomplete import generator +from awscli.autocomplete import db, generator def main(): parser = argparse.ArgumentParser() - parser.add_argument('--include-builtin-index', action='store_true', - help=("Also generate builtin index as well as the " - "INDEX_LOCATION.")) - parser.add_argument('--index-location', default=db.INDEX_FILE, - help=( - 'Location to write the index file. ' - 'Defaults to ' + db.INDEX_FILE)) + parser.add_argument( + '--include-builtin-index', + action='store_true', + help=("Also generate builtin index as well as the " "INDEX_LOCATION."), + ) + parser.add_argument( + '--index-location', + default=db.INDEX_FILE, + help=( + 'Location to write the index file. ' 'Defaults to ' + db.INDEX_FILE + ), + ) args = parser.parse_args() index_dir = os.path.dirname(os.path.abspath(args.index_location)) if not os.path.isdir(index_dir): diff --git a/scripts/gen-server-completions b/scripts/gen-server-completions index 335e685cec75..938e9d130e26 100755 --- a/scripts/gen-server-completions +++ b/scripts/gen-server-completions @@ -13,16 +13,18 @@ if you want to see the generated completion data without modifying existing files. """ + import argparse import json -import sys import os import re +import sys import botocore.session - -from awscli.autocomplete.autogen import ServerCompletionHeuristic -from awscli.autocomplete.autogen import BasicSingularize +from awscli.autocomplete.autogen import ( + BasicSingularize, + ServerCompletionHeuristic, +) # The awscli/__init__.py file sets the AWS_DATA_PATH env var, so as long # as we import from awscli we're ensured this env var exists. @@ -37,8 +39,10 @@ def generate_completion_data(args): model = session.get_service_model(service_name) completion_data = gen.generate_completion_descriptions(model) out_filename = os.path.join( - BOTOCORE_DATA_PATH, service_name, - model.api_version, 'completions-1.json' + BOTOCORE_DATA_PATH, + service_name, + model.api_version, + 'completions-1.json', ) to_json = _pretty_json_dump(completion_data) if args.only_print: diff --git a/scripts/install b/scripts/install index 8f825eb62ac7..12f23e6344a1 100755 --- a/scripts/install +++ b/scripts/install @@ -13,19 +13,18 @@ import subprocess import sys import tarfile import tempfile - from contextlib import contextmanager PACKAGES_DIR = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'packages') -INSTALL_DIR = os.path.expanduser(os.path.join( - '~', '.local', 'lib', 'aws')) + os.path.dirname(os.path.abspath(__file__)), 'packages' +) +INSTALL_DIR = os.path.expanduser(os.path.join('~', '.local', 'lib', 'aws')) GTE_PY37 = sys.version_info[:2] >= (3, 7) UNSUPPORTED_PYTHON = ( - (2,6), - (3,3), - (3,4), - (3,5), + (2, 6), + (3, 3), + (3, 4), + (3, 5), ) INSTALL_ARGS = ( '--no-binary :all: --no-build-isolation --no-cache-dir --no-index ' @@ -45,6 +44,7 @@ class PythonDeprecationWarning(Warning): Python version being used is scheduled to become unsupported in an future release. See warning for specifics. """ + pass @@ -52,12 +52,10 @@ def _build_deprecations(): py_27_params = { 'date': 'July 15, 2021', 'blog_link': 'https://aws.amazon.com/blogs/developer/announcing-end-' - 'of-support-for-python-2-7-in-aws-sdk-for-python-and-' - 'aws-cli-v1/' - } - return { - (2,7): py_27_params + 'of-support-for-python-2-7-in-aws-sdk-for-python-and-' + 'aws-cli-v1/', } + return {(2, 7): py_27_params} DEPRECATED_PYTHON = _build_deprecations() @@ -75,13 +73,15 @@ def cd(dirname): def run(cmd): sys.stdout.write("Running cmd: %s\n" % cmd) - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) stdout, stderr = p.communicate() if p.returncode != 0: output = (stdout + stderr).decode("utf-8") - raise BadRCError("Bad rc (%s) for cmd '%s': %s" % ( - p.returncode, cmd, output)) + raise BadRCError( + "Bad rc (%s) for cmd '%s': %s" % (p.returncode, cmd, output) + ) return stdout @@ -123,15 +123,16 @@ def _create_virtualenv_external(location, working_dir): # We know that virtualenv is the only dir in this directory # so we can listdir()[0] it. with cd(os.listdir('.')[0]): - run(('%s virtualenv.py --no-download ' - '--python %s %s') % (sys.executable, - sys.executable, - location)) + run( + ('%s virtualenv.py --no-download ' '--python %s %s') + % (sys.executable, sys.executable, location) + ) def _get_package_tarball(package_dir, package_prefix): - package_filenames = sorted([p for p in os.listdir(package_dir) - if p.startswith(package_prefix)]) + package_filenames = sorted( + [p for p in os.listdir(package_dir) if p.startswith(package_prefix)] + ) return package_filenames[-1] @@ -145,8 +146,9 @@ def create_working_dir(): def pip_install_packages(install_dir): - cli_tarball = [p for p in os.listdir(PACKAGES_DIR) - if p.startswith('awscli')] + cli_tarball = [ + p for p in os.listdir(PACKAGES_DIR) if p.startswith('awscli') + ] if len(cli_tarball) != 1: message = ( "Multiple versions of the CLI were found in %s. Please clear " @@ -161,8 +163,10 @@ def pip_install_packages(install_dir): _install_setup_deps(pip_script, '.') with cd(PACKAGES_DIR): - run('%s install %s --find-links file://%s %s' % ( - pip_script, INSTALL_ARGS, PACKAGES_DIR, cli_tarball)) + run( + '%s install %s --find-links file://%s %s' + % (pip_script, INSTALL_ARGS, PACKAGES_DIR, cli_tarball) + ) def _install_setup_deps(pip_script, setup_package_dir): @@ -172,15 +176,19 @@ def _install_setup_deps(pip_script, setup_package_dir): # so for now we're explicitly installing the one setup_requires package # we need. This comes from python-dateutils. setuptools_scm_tarball = _get_package_tarball( - setup_package_dir, 'setuptools_scm') - run('%s install --no-binary :all: --no-cache-dir --no-index ' - '--find-links file://%s %s' % ( - pip_script, setup_package_dir, setuptools_scm_tarball)) - wheel_tarball = _get_package_tarball( - setup_package_dir, 'wheel') - run('%s install --no-binary :all: --no-cache-dir --no-index ' - '--find-links file://%s %s' % ( - pip_script, setup_package_dir, wheel_tarball)) + setup_package_dir, 'setuptools_scm' + ) + run( + '%s install --no-binary :all: --no-cache-dir --no-index ' + '--find-links file://%s %s' + % (pip_script, setup_package_dir, setuptools_scm_tarball) + ) + wheel_tarball = _get_package_tarball(setup_package_dir, 'wheel') + run( + '%s install --no-binary :all: --no-cache-dir --no-index ' + '--find-links file://%s %s' + % (pip_script, setup_package_dir, wheel_tarball) + ) def create_symlink(real_location, symlink_name): @@ -197,17 +205,25 @@ def create_symlink(real_location, symlink_name): def main(): parser = optparse.OptionParser() - parser.add_option('-i', '--install-dir', help="The location to install " - "the AWS CLI. The default value is ~/.local/lib/aws", - default=INSTALL_DIR) - parser.add_option('-b', '--bin-location', help="If this argument is " - "provided, then a symlink will be created at this " - "location that points to the aws executable. " - "This argument is useful if you want to put the aws " - "executable somewhere already on your path, e.g. " - "-b /usr/local/bin/aws. This is an optional argument. " - "If you do not provide this argument you will have to " - "add INSTALL_DIR/bin to your PATH.") + parser.add_option( + '-i', + '--install-dir', + help="The location to install " + "the AWS CLI. The default value is ~/.local/lib/aws", + default=INSTALL_DIR, + ) + parser.add_option( + '-b', + '--bin-location', + help="If this argument is " + "provided, then a symlink will be created at this " + "location that points to the aws executable. " + "This argument is useful if you want to put the aws " + "executable somewhere already on your path, e.g. " + "-b /usr/local/bin/aws. This is an optional argument. " + "If you do not provide this argument you will have to " + "add INSTALL_DIR/bin to your PATH.", + ) py_version = sys.version_info[:2] if py_version in UNSUPPORTED_PYTHON: unsupported_python_msg = ( @@ -240,8 +256,9 @@ def main(): create_install_structure(working_dir, opts.install_dir) pip_install_packages(opts.install_dir) real_location = os.path.join(opts.install_dir, bin_path(), 'aws') - if opts.bin_location and create_symlink(real_location, - opts.bin_location): + if opts.bin_location and create_symlink( + real_location, opts.bin_location + ): print("You can now run: %s --version" % opts.bin_location) else: print("You can now run: %s --version" % real_location) diff --git a/scripts/install_deps.py b/scripts/install_deps.py index 39db22085a35..2a142d0fe4c6 100644 --- a/scripts/install_deps.py +++ b/scripts/install_deps.py @@ -1,8 +1,10 @@ import os -from utils import cd, bin_path, run, virtualenv_enabled +from utils import bin_path, cd, run, virtualenv_enabled -INSTALL_ARGS = "--no-build-isolation --no-cache-dir --no-index --prefer-binary " +INSTALL_ARGS = ( + "--no-build-isolation --no-cache-dir --no-index --prefer-binary " +) PINNED_PIP_VERSION = '24.0' SETUP_DEPS = ("setuptools-", "setuptools_scm", "wheel", "hatchling") @@ -17,7 +19,8 @@ def get_package_tarball(package_dir, package_prefix): ) if len(package_filenames) == 0: raise InstallationError( - "Unable to find local package starting with %s prefix." % package_prefix + "Unable to find local package starting with %s prefix." + % package_prefix ) # We only expect a single package from the downloader return package_filenames[0] @@ -41,7 +44,9 @@ def pip_install_packages(package_dir): # Setup pip to support modern setuptools calls pip_script = os.path.join(os.environ["VIRTUAL_ENV"], bin_path(), "pip") - local_python = os.path.join(os.environ["VIRTUAL_ENV"], bin_path(), "python") + local_python = os.path.join( + os.environ["VIRTUAL_ENV"], bin_path(), "python" + ) # Windows can't replace a running pip.exe, so we need to work around run("%s -m pip install pip==%s" % (local_python, PINNED_PIP_VERSION)) diff --git a/scripts/installers/make-docker b/scripts/installers/make-docker index c9293ba02a8a..12d606da755a 100755 --- a/scripts/installers/make-docker +++ b/scripts/installers/make-docker @@ -1,19 +1,27 @@ #!/usr/bin/env python """Script to build a Docker image of the AWS CLI""" + import argparse import os -import sys import shutil +import sys from distutils.dir_util import copy_tree sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from utils import run, tmp_dir, cd, BadRCError, \ - extract_zip, update_metadata, save_to_zip - - -ROOT = os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) +from utils import ( + BadRCError, + cd, + extract_zip, + run, + save_to_zip, + tmp_dir, + update_metadata, +) + +ROOT = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) DOCKER_DIR = os.path.join(ROOT, 'docker') DIST_DIR = os.path.join(ROOT, 'dist') DEFAULT_EXE_ZIP = os.path.join(DIST_DIR, 'awscli-exe.zip') @@ -52,8 +60,10 @@ def _make_build_context(build_context_dir, exe): def _update_exe_metadata(exe): with tmp_dir() as tmp: extract_zip(exe, tmp) - update_metadata(os.path.join(tmp, 'aws', 'dist'), - distribution_source=DISTRIBUTION_SOURCE) + update_metadata( + os.path.join(tmp, 'aws', 'dist'), + distribution_source=DISTRIBUTION_SOURCE, + ) save_to_zip(tmp, exe) @@ -63,15 +73,19 @@ def _copy_docker_dir_to_build_context(build_context_dir): def _copy_exe_to_build_context(build_context_dir, exe): build_context_exe_path = os.path.join( - build_context_dir, os.path.basename(exe)) + build_context_dir, os.path.basename(exe) + ) shutil.copy(exe, build_context_exe_path) def _docker_build(build_context_dir, tags, exe_filename): with cd(build_context_dir): docker_build_cmd = [ - 'docker', 'build', '--build-arg', - f'EXE_FILENAME={exe_filename}', '.' + 'docker', + 'build', + '--build-arg', + f'EXE_FILENAME={exe_filename}', + '.', ] for tag in tags: docker_build_cmd.extend(['-t', tag]) @@ -94,7 +108,7 @@ def main(): help=( 'The name of the exe zip to build into the Docker image. By ' 'default the exe located at: %s' % DEFAULT_EXE_ZIP - ) + ), ) parser.add_argument( '--output', @@ -102,7 +116,7 @@ def main(): help=( 'The name of the file to save the Docker image. By default, ' 'this will be saved at: %s' % DEFAULT_DOCKER_OUTPUT - ) + ), ) parser.add_argument( '--tags', diff --git a/scripts/installers/make-exe b/scripts/installers/make-exe index 75a3634fcc18..58c872fb37a1 100755 --- a/scripts/installers/make-exe +++ b/scripts/installers/make-exe @@ -4,21 +4,23 @@ This exe can then be wrapped in a platform specific installer for each supported platform. """ + import argparse import json import os -import sys import shutil +import sys from distutils.dir_util import copy_tree sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from utils import run, tmp_dir, update_metadata, save_to_zip, remove_dist_info from install_deps import install_packages +from utils import remove_dist_info, run, save_to_zip, tmp_dir, update_metadata -ROOT = os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) +ROOT = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) EXE_DIR = os.path.join(ROOT, 'exe') PYINSTALLER_DIR = os.path.join(EXE_DIR, 'pyinstaller') ASSETS_DIR = os.path.join(EXE_DIR, 'assets') @@ -42,12 +44,14 @@ def do_make_exe(workdir, exe_zipfile, ac_index): aws_exe_build = pyinstaller('aws.spec') if ac_index: full_internal_ac_index_path = os.path.join( - aws_exe_build, AC_INDEX_INTERNAL_PATH) + aws_exe_build, AC_INDEX_INTERNAL_PATH + ) copy_file(ac_index, full_internal_ac_index_path) copy_directory(aws_exe_build, output_exe_dist_dir) aws_complete_exe_build = pyinstaller('aws_completer.spec') - update_metadata(aws_complete_exe_build, - distribution_source=DISTRIBUTION_SOURCE) + update_metadata( + aws_complete_exe_build, distribution_source=DISTRIBUTION_SOURCE + ) copy_directory_contents_into(aws_complete_exe_build, output_exe_dist_dir) copy_directory_contents_into(ASSETS_DIR, exe_dir) remove_dist_info(workdir) @@ -63,8 +67,7 @@ def delete_existing_exe_build(): def pyinstaller(specfile): aws_spec_path = os.path.join(PYINSTALLER_DIR, specfile) print(run('pyinstaller %s' % (aws_spec_path), cwd=PYINSTALLER_DIR)) - return os.path.join( - PYINSTALLER_DIR, 'dist', os.path.splitext(specfile)[0]) + return os.path.join(PYINSTALLER_DIR, 'dist', os.path.splitext(specfile)[0]) def copy_directory(src, dst): @@ -101,7 +104,7 @@ def main(): 'The name of the file to save the exe zip. By default, ' 'this will be saved in "dist/%s" directory in the root of the ' 'awscli.' % DEFAULT_OUTPUT_ZIP - ) + ), ) parser.add_argument( '--no-cleanup', @@ -124,16 +127,15 @@ def main(): parser.add_argument( '--ac-index-path', default=None, - help=( - 'Path to ac.index file to include in the exe.' - ) + help=('Path to ac.index file to include in the exe.'), ) args = parser.parse_args() output = os.path.abspath(args.output) if args.src_dir: print( - 'Installing dependencies from local directory: %s' % args.src_dir) + 'Installing dependencies from local directory: %s' % args.src_dir + ) install_packages(args.src_dir) else: run('pip install -r requirements-dev-lock.txt') diff --git a/scripts/installers/make-macpkg b/scripts/installers/make-macpkg index 5b6f7e1e95f4..51e8b6336676 100755 --- a/scripts/installers/make-macpkg +++ b/scripts/installers/make-macpkg @@ -4,18 +4,19 @@ This script assumes that an executable has been produced previously by the sibling script make-pyinstaller. """ + import argparse import os -import sys import shutil +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from utils import run, tmp_dir, extract_zip - +from utils import extract_zip, run, tmp_dir -ROOT = os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) +ROOT = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) PKG_DIR = os.path.join(ROOT, 'macpkg') SCRIPTS_DIR = os.path.join(PKG_DIR, 'scripts') RESOURCES_DIR = os.path.join(PKG_DIR, 'resources') @@ -45,27 +46,30 @@ def stage_files(workdir): def do_make_pkg(workdir, pkg_name): version = get_version(workdir) - print(run( - ( - 'pkgbuild --identifier com.amazon.aws.cli2 ' - '--root ./stage ' - '--scripts %s ' - '--version %s ' - '%s' - ) % (SCRIPTS_DIR, version, TEMP_PKG_NAME), - cwd=workdir, - )) - with tmp_dir() as formatted_resource_dir: - render_resources( - formatted_resource_dir, RESOURCES_DIR, {'version': version}) - print(run( + print( + run( ( - 'productbuild --distribution %s ' - '--resources %s ' + 'pkgbuild --identifier com.amazon.aws.cli2 ' + '--root ./stage ' + '--scripts %s ' + '--version %s ' '%s' - ) % (DISTRIBUTION_PATH, formatted_resource_dir, PKG_NAME), + ) + % (SCRIPTS_DIR, version, TEMP_PKG_NAME), cwd=workdir, - )) + ) + ) + with tmp_dir() as formatted_resource_dir: + render_resources( + formatted_resource_dir, RESOURCES_DIR, {'version': version} + ) + print( + run( + ('productbuild --distribution %s ' '--resources %s ' '%s') + % (DISTRIBUTION_PATH, formatted_resource_dir, PKG_NAME), + cwd=workdir, + ) + ) shutil.copyfile(os.path.join(workdir, PKG_NAME), pkg_name) @@ -104,7 +108,7 @@ def main(): help=( 'The output PKG name. By default, this will be ' '"dist/%s" in the root of the awscli.' % PKG_NAME - ) + ), ) parser.add_argument( '--src-exe', @@ -112,7 +116,7 @@ def main(): help=( 'The exe used to build the PKG. By default, this will be the' '"dist/%s" zipfile in the root of the awscli.' % EXE_ZIP_NAME - ) + ), ) args = parser.parse_args() output = os.path.abspath(args.output) diff --git a/scripts/installers/sign-exe b/scripts/installers/sign-exe index a0f4c14fba3b..01763684a1e1 100755 --- a/scripts/installers/sign-exe +++ b/scripts/installers/sign-exe @@ -1,16 +1,17 @@ #!/usr/bin/env python """Script to sign exe bundle""" + import argparse import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from utils import run, BadRCError - +from utils import BadRCError, run -ROOT = os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) +ROOT = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) EXE_ZIP_NAME = 'awscli-exe.zip' SIGNATURE_FILENAME = EXE_ZIP_NAME + '.sig' @@ -28,9 +29,7 @@ def _verify_gpg_installed(): def _sign_exe_zip(exe_zipfile, signature_filename, key_name): - options = [ - '--yes', '--output', signature_filename - ] + options = ['--yes', '--output', signature_filename] if key_name: options.extend(['--local-user', key_name]) options = ' '.join(options) @@ -45,7 +44,7 @@ def main(): help=( 'The output signature file. By default, this will be ' '"dist/%s" in the root of the awscli.' % SIGNATURE_FILENAME - ) + ), ) parser.add_argument( '--exe', @@ -53,7 +52,7 @@ def main(): help=( 'The exe zip to sign. By default, this will be the' '"dist/%s" zipfile in the root of the awscli.' % EXE_ZIP_NAME - ) + ), ) parser.add_argument( '--key-name', @@ -61,7 +60,7 @@ def main(): 'The name of the key to use for signing. This corresponds to the ' '--local-user option when running gpg. By default, the key used ' 'is your default private key in gpg.' - ) + ), ) args = parser.parse_args() sign_exe(args.exe, args.output, args.key_name) @@ -69,4 +68,3 @@ def main(): if __name__ == "__main__": main() - diff --git a/scripts/installers/test-installer b/scripts/installers/test-installer index 465a1bc5e25d..5f069b55cb78 100755 --- a/scripts/installers/test-installer +++ b/scripts/installers/test-installer @@ -1,23 +1,26 @@ #!/usr/bin/env python """Script to run smoke tests on aws cli packaged installers""" + import argparse -import sys import os import re import shutil +import sys import tempfile SCRIPTS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(SCRIPTS_DIR) -from utils import run, tmp_dir, extract_zip +from utils import extract_zip, run, tmp_dir REPO_ROOT = os.path.dirname(SCRIPTS_DIR) DIST_DIR = os.path.join(REPO_ROOT, 'dist') SMOKE_TEST_PATH = os.path.join( - REPO_ROOT, 'tests', 'integration', 'test_smoke.py') + REPO_ROOT, 'tests', 'integration', 'test_smoke.py' +) UNINSTALL_MAC_PKG_PATH = os.path.join( - SCRIPTS_DIR, 'installers', 'uninstall-mac-pkg') + SCRIPTS_DIR, 'installers', 'uninstall-mac-pkg' +) EXE_NAME = 'aws' @@ -79,9 +82,8 @@ class ExeTester(InstallerTester): extract_zip(self._installer_location, workdir) install_script = os.path.join(workdir, 'aws', 'install') run( - '%s --install-dir %s --bin-dir %s' % ( - install_script, install_dir, bin_dir - ) + '%s --install-dir %s --bin-dir %s' + % (install_script, install_dir, bin_dir) ) def cleanup(self): @@ -92,7 +94,9 @@ class ExeTester(InstallerTester): class PkgTester(InstallerTester): - DEFAULT_INSTALLER_LOCATION = os.path.join(DIST_DIR, 'AWS-CLI-Installer.pkg') + DEFAULT_INSTALLER_LOCATION = os.path.join( + DIST_DIR, 'AWS-CLI-Installer.pkg' + ) _PKG_ID = 'com.amazon.aws.cli2' def get_aws_cmd(self): @@ -109,8 +113,9 @@ class PkgTester(InstallerTester): run('sudo %s %s uninstall' % (sys.executable, UNINSTALL_MAC_PKG_PATH)) def __call__(self): - assert os.geteuid() == 0, \ - 'Mac PKG installer must be run as root (with sudo).' + assert ( + os.geteuid() == 0 + ), 'Mac PKG installer must be run as root (with sudo).' super(PkgTester, self).__call__() @@ -121,20 +126,20 @@ def main(): } parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( - '--installer-type', required=True, + '--installer-type', + required=True, choices=installer_to_tester_cls.keys(), - help='The type of installer to test' + help='The type of installer to test', ) parser.add_argument( '--installer-path', help=( 'The path to the installer to test. By default, installers are ' 'used from the dist directory.' - ) + ), ) args = parser.parse_args() - tester = installer_to_tester_cls[args.installer_type]( - args.installer_path) + tester = installer_to_tester_cls[args.installer_type](args.installer_path) return tester() diff --git a/scripts/installers/uninstall-mac-pkg b/scripts/installers/uninstall-mac-pkg index 49bc4f164434..4f2003a3ae53 100755 --- a/scripts/installers/uninstall-mac-pkg +++ b/scripts/installers/uninstall-mac-pkg @@ -1,20 +1,17 @@ #!/usr/bin/env python """Script to uninstall AWS CLI V2 Mac PKG""" + import argparse -import sys import os import re +import sys from datetime import datetime -from subprocess import check_output -from subprocess import CalledProcessError -from subprocess import PIPE +from subprocess import PIPE, CalledProcessError, check_output SCRIPTS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(SCRIPTS_DIR) -from utils import run -from utils import BadRCError - +from utils import BadRCError, run _PKG_ID = 'com.amazon.aws.cli2' _PKGUTIL_PATTERN = re.compile( @@ -24,9 +21,10 @@ _PKGUTIL_PATTERN = re.compile( r'location:\s*(?P.*?)\n' r'install-time:\s*(?P.*?)\n' ), - re.X + re.X, ) + def uninstall(): assert _is_installed(), 'Could not find AWS CLI installation.' assert os.geteuid() == 0, 'Script must be run as root (with sudo).' @@ -58,7 +56,8 @@ def _get_root_dir(): def _get_file_list(root): lines = run( - 'pkgutil --only-files --files %s /' % _PKG_ID, echo=False).split('\n') + 'pkgutil --only-files --files %s /' % _PKG_ID, echo=False + ).split('\n') pkg_file_list = [os.path.join(root, line) for line in lines if line] extra_files = _read_install_metadata(root) return pkg_file_list + extra_files @@ -79,13 +78,14 @@ def _read_install_metadata(root): def _get_dir_list(root): lines = run( - 'pkgutil --only-dirs --files %s /' % _PKG_ID, echo=False).split('\n') + 'pkgutil --only-dirs --files %s /' % _PKG_ID, echo=False + ).split('\n') # Longer directory names are listed first to force them to come before # their parent directories. This ensures that child directories are # deleted before their parents. return sorted( [os.path.join(root, line) for line in lines if line], - key=lambda x: -len(x) + key=lambda x: -len(x), ) @@ -114,10 +114,14 @@ def check(): lines = run('pkgutil --pkg-info %s /' % _PKG_ID, echo=False) output = _PKGUTIL_PATTERN.search(lines) root = os.path.join(output.group('volume'), output.group('location')) - print('Found AWS CLI version %s installed at %s' % ( - output.group('version'), root)) - print('Installed on %s' % datetime.fromtimestamp( - int(output.group('install_time')))) + print( + 'Found AWS CLI version %s installed at %s' + % (output.group('version'), root) + ) + print( + 'Installed on %s' + % datetime.fromtimestamp(int(output.group('install_time'))) + ) command = 'sudo %s uninstall' % os.path.abspath(__file__) print('To uninstall run the command:') print(command) @@ -133,11 +137,12 @@ def _is_installed(): def _warn_missing_arg(print_help): - # wrap `parser.print_help()` to return 1 so any callers don't receive # a potentially misleading 0 exit code from a failed call. def missing_arg_warning(): - print('Missing input: script requires at least one positional argument\n') + print( + 'Missing input: script requires at least one positional argument\n' + ) print_help() return 1 @@ -152,12 +157,11 @@ def main(): help=( 'Check if the AWS CLI is currently installed from a PKG ' 'installer.' - ) + ), ) check_parser.set_defaults(func=check) uninstall_parser = subparsers.add_parser( - 'uninstall', - help='Uninstall the AWS CLI installed from the Mac PKG' + 'uninstall', help='Uninstall the AWS CLI installed from the Mac PKG' ) uninstall_parser.set_defaults(func=uninstall) diff --git a/scripts/make-bundle b/scripts/make-bundle index 915e4dea0255..4a9ccc17ad1f 100755 --- a/scripts/make-bundle +++ b/scripts/make-bundle @@ -12,15 +12,15 @@ interface for those not familiar with the python ecosystem. """ + import os -import sys -import subprocess import shutil +import subprocess +import sys import tempfile import zipfile from contextlib import contextmanager - EXTRA_RUNTIME_DEPS = [ # Use an up to date virtualenv/pip/setuptools on > 2.6. ('virtualenv', '16.7.8'), @@ -36,7 +36,8 @@ PIP_DOWNLOAD_ARGS = '--no-binary :all:' # we're distributing a copy that works on all supported platforms. CONSTRAINTS_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), - 'assets', 'constraints-bundled.txt' + 'assets', + 'constraints-bundled.txt', ) @@ -56,13 +57,15 @@ def cd(dirname): def run(cmd): sys.stdout.write("Running cmd: %s\n" % cmd) - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) stdout, stderr = p.communicate() rc = p.wait() if p.returncode != 0: - raise BadRCError("Bad rc (%s) for cmd '%s': %s" % ( - rc, cmd, stderr + stdout)) + raise BadRCError( + "Bad rc (%s) for cmd '%s': %s" % (rc, cmd, stderr + stdout) + ) return stdout @@ -80,17 +83,19 @@ def create_scratch_dir(): def download_package_tarballs(dirname, packages): with cd(dirname): for package, package_version in packages: - run('%s -m pip download %s==%s %s' % ( - sys.executable, package, package_version, PIP_DOWNLOAD_ARGS - )) + run( + '%s -m pip download %s==%s %s' + % (sys.executable, package, package_version, PIP_DOWNLOAD_ARGS) + ) def download_cli_deps(scratch_dir): - awscli_dir = os.path.dirname( - os.path.dirname(os.path.abspath(__file__))) + awscli_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) with cd(scratch_dir): - run('pip download -c %s %s %s' % ( - CONSTRAINTS_FILE, PIP_DOWNLOAD_ARGS, awscli_dir)) + run( + 'pip download -c %s %s %s' + % (CONSTRAINTS_FILE, PIP_DOWNLOAD_ARGS, awscli_dir) + ) def _remove_cli_zip(scratch_dir): @@ -100,20 +105,21 @@ def _remove_cli_zip(scratch_dir): def add_cli_sdist(scratch_dir): - awscli_dir = os.path.dirname( - os.path.dirname(os.path.abspath(__file__))) + awscli_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.path.exists(os.path.join(awscli_dir, 'dist')): shutil.rmtree(os.path.join(awscli_dir, 'dist')) with cd(awscli_dir): run('%s setup.py sdist' % sys.executable) filename = os.listdir('dist')[0] - shutil.move(os.path.join('dist', filename), - os.path.join(scratch_dir, filename)) + shutil.move( + os.path.join('dist', filename), os.path.join(scratch_dir, filename) + ) def create_bootstrap_script(scratch_dir): install_script = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'install') + os.path.dirname(os.path.abspath(__file__)), 'install' + ) shutil.copy(install_script, os.path.join(scratch_dir, 'install')) @@ -135,11 +141,13 @@ def zip_dir(scratch_dir): def verify_preconditions(): # The pip version looks like: # 'pip 1.4.1 from ....' - pip_version = run( - '%s -m pip --version' % sys.executable).strip().split()[1] + pip_version = ( + run('%s -m pip --version' % sys.executable).strip().split()[1] + ) # Virtualenv version just has the version string: '1.14.5\n' virtualenv_version = run( - '%s -m virtualenv --version' % sys.executable).strip() + '%s -m virtualenv --version' % sys.executable + ).strip() _min_version_required('9.0.1', pip_version, 'pip') _min_version_required('15.1.0', virtualenv_version, 'virtualenv') @@ -152,8 +160,10 @@ def _min_version_required(min_version, actual_version, name): for min_version_part, actual_version_part in zip(min_split, actual_split): if int(actual_version_part) >= int(min_version_part): return - raise ValueError("%s requires at least version %s, but version %s was " - "found." % (name, min_version, actual_version)) + raise ValueError( + "%s requires at least version %s, but version %s was " + "found." % (name, min_version, actual_version) + ) def main(): diff --git a/scripts/make-global-opts-documentation b/scripts/make-global-opts-documentation index 3f8f345df62c..00097504ab5b 100755 --- a/scripts/make-global-opts-documentation +++ b/scripts/make-global-opts-documentation @@ -10,11 +10,13 @@ every subcommand's help docs. import os -from awscli.clidriver import create_clidriver from awscli.clidocs import ( - EXAMPLES_DIR, GLOBAL_OPTIONS_FILE, - GLOBAL_OPTIONS_SYNOPSIS_FILE, GlobalOptionsDocumenter + EXAMPLES_DIR, + GLOBAL_OPTIONS_FILE, + GLOBAL_OPTIONS_SYNOPSIS_FILE, + GlobalOptionsDocumenter, ) +from awscli.clidriver import create_clidriver def main(): diff --git a/scripts/new-change b/scripts/new-change index 8b4905147318..43fc571dad37 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -36,21 +36,20 @@ You can then use the ``scripts/render-change`` to generate the CHANGELOG.rst file. """ + +import argparse +import json import os +import random import re -import sys -import json import string -import random -import tempfile import subprocess -import argparse - +import sys +import tempfile VALID_CHARS = set(string.ascii_letters + string.digits) CHANGES_DIR = os.path.join( - os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - '.changes' + os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.changes' ) TEMPLATE = """\ # Type should be one of: feature, bugfix, enhancement, api-change @@ -90,7 +89,8 @@ def new_changelog_entry(args): parsed_values = get_values_from_editor(args) if has_empty_values(parsed_values): sys.stderr.write( - "Empty changelog values received, skipping entry creation.\n") + "Empty changelog values received, skipping entry creation.\n" + ) return 1 replace_issue_references(parsed_values, args.repo) write_new_change(parsed_values) @@ -98,9 +98,11 @@ def new_changelog_entry(args): def has_empty_values(parsed_values): - return not (parsed_values.get('type') and - parsed_values.get('category') and - parsed_values.get('description')) + return not ( + parsed_values.get('type') + and parsed_values.get('category') + and parsed_values.get('description') + ) def all_values_provided(args): @@ -131,9 +133,11 @@ def replace_issue_references(parsed, repo_name): def linkify(match): number = match.group()[1:] - return ( - '`%s `__' % ( - match.group(), repo_name, number)) + return '`%s `__' % ( + match.group(), + repo_name, + number, + ) new_description = re.sub('#\d+', linkify, description) parsed['description'] = new_description @@ -151,13 +155,15 @@ def write_new_change(parsed_values): category = parsed_values['category'] short_summary = ''.join(filter(lambda x: x in VALID_CHARS, category)) filename = '{type_name}-{summary}'.format( - type_name=parsed_values['type'], - summary=short_summary) + type_name=parsed_values['type'], summary=short_summary + ) possible_filename = os.path.join( - dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000)))) + dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))) + ) while os.path.isfile(possible_filename): possible_filename = os.path.join( - dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000)))) + dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))) + ) with open(possible_filename, 'w') as f: f.write(json.dumps(parsed_values, indent=2) + "\n") @@ -198,15 +204,21 @@ def parse_filled_in_contents(contents): def main(): parser = argparse.ArgumentParser() - parser.add_argument('-t', '--type', dest='change_type', - default='', choices=('bugfix', 'feature', - 'enhancement', 'api-change')) - parser.add_argument('-c', '--category', dest='category', - default='') - parser.add_argument('-d', '--description', dest='description', - default='') - parser.add_argument('-r', '--repo', default='aws/aws-cli', - help='Optional repo name, e.g: aws/aws-cli') + parser.add_argument( + '-t', + '--type', + dest='change_type', + default='', + choices=('bugfix', 'feature', 'enhancement', 'api-change'), + ) + parser.add_argument('-c', '--category', dest='category', default='') + parser.add_argument('-d', '--description', dest='description', default='') + parser.add_argument( + '-r', + '--repo', + default='aws/aws-cli', + help='Optional repo name, e.g: aws/aws-cli', + ) args = parser.parse_args() sys.exit(new_changelog_entry(args)) diff --git a/scripts/performance/benchmark-cp b/scripts/performance/benchmark-cp index e63ae7cd8d56..8a2e58cf1403 100755 --- a/scripts/performance/benchmark-cp +++ b/scripts/performance/benchmark-cp @@ -1,7 +1,12 @@ #!/usr/bin/env python -from benchmark_utils import summarize, clean -from benchmark_utils import get_default_argparser, get_transfer_command -from benchmark_utils import create_random_subfolder, benchmark_command +from benchmark_utils import ( + benchmark_command, + clean, + create_random_subfolder, + get_default_argparser, + get_transfer_command, + summarize, +) def benchmark_cp(args): @@ -16,21 +21,26 @@ def benchmark_cp(args): clean(destination, args.recursive) benchmark_command( - command, args.benchmark_script, args.summarize_script, - args.result_dir, args.num_iterations, args.dry_run, - cleanup=cleanup + command, + args.benchmark_script, + args.summarize_script, + args.result_dir, + args.num_iterations, + args.dry_run, + cleanup=cleanup, ) if __name__ == "__main__": parser = get_default_argparser() parser.add_argument( - '-s', '--source', required=True, - help='A local path or s3 path.' + '-s', '--source', required=True, help='A local path or s3 path.' ) parser.add_argument( - '-d', '--destination', required=True, + '-d', + '--destination', + required=True, help='A local path or s3 path. A directory will be created in this ' - 'location to copy to in the case of a recursive transfer.' + 'location to copy to in the case of a recursive transfer.', ) benchmark_cp(parser.parse_args()) diff --git a/scripts/performance/benchmark-mv b/scripts/performance/benchmark-mv index b6e679425edd..f08a61a3dffc 100755 --- a/scripts/performance/benchmark-mv +++ b/scripts/performance/benchmark-mv @@ -1,7 +1,13 @@ #!/usr/bin/env python -from benchmark_utils import backup, copy, clean, get_default_argparser -from benchmark_utils import create_random_subfolder, benchmark_command -from benchmark_utils import get_transfer_command +from benchmark_utils import ( + backup, + benchmark_command, + clean, + copy, + create_random_subfolder, + get_default_argparser, + get_transfer_command, +) def benchmark_mv(args): @@ -22,22 +28,27 @@ def benchmark_mv(args): copy(backup_path, args.source, args.recursive) benchmark_command( - command, args.benchmark_script, args.summarize_script, - args.result_dir, args.num_iterations, args.dry_run, + command, + args.benchmark_script, + args.summarize_script, + args.result_dir, + args.num_iterations, + args.dry_run, upkeep=upkeep, - cleanup=cleanup + cleanup=cleanup, ) if __name__ == "__main__": parser = get_default_argparser() parser.add_argument( - '-s', '--source', required=True, - help='A local path or s3 path.' + '-s', '--source', required=True, help='A local path or s3 path.' ) parser.add_argument( - '-d', '--destination', required=True, + '-d', + '--destination', + required=True, help='A local path or s3 path. A directory will be created in this ' - 'location to move to in the case of a recursive transfer.' + 'location to move to in the case of a recursive transfer.', ) benchmark_mv(parser.parse_args()) diff --git a/scripts/performance/benchmark-rm b/scripts/performance/benchmark-rm index 16009c696cda..946830a68de8 100755 --- a/scripts/performance/benchmark-rm +++ b/scripts/performance/benchmark-rm @@ -1,18 +1,29 @@ #!/usr/bin/env python -from benchmark_utils import benchmark_command, get_transfer_command -from benchmark_utils import backup, copy, clean, get_default_argparser +from benchmark_utils import ( + backup, + benchmark_command, + clean, + copy, + get_default_argparser, + get_transfer_command, +) def benchmark_rm(args): command = get_transfer_command( - 'rm %s' % args.target, args.recursive, args.quiet) + 'rm %s' % args.target, args.recursive, args.quiet + ) backup_path = backup(args.target, args.recursive) benchmark_command( - command, args.benchmark_script, args.summarize_script, - args.result_dir, args.num_iterations, args.dry_run, + command, + args.benchmark_script, + args.summarize_script, + args.result_dir, + args.num_iterations, + args.dry_run, upkeep=lambda: copy(backup_path, args.target, args.recursive), - cleanup=lambda: clean(backup_path, args.recursive) + cleanup=lambda: clean(backup_path, args.recursive), ) diff --git a/scripts/performance/benchmark_utils.py b/scripts/performance/benchmark_utils.py index da48ae372d81..9b6ece2114e6 100644 --- a/scripts/performance/benchmark_utils.py +++ b/scripts/performance/benchmark_utils.py @@ -1,10 +1,11 @@ -import s3transfer +import argparse import os -import subprocess -import uuid import shutil -import argparse +import subprocess import tempfile +import uuid + +import s3transfer def summarize(script, result_dir, summary_dir): @@ -145,9 +146,16 @@ def get_transfer_command(command, recursive, quiet): return cli_command -def benchmark_command(command, benchmark_script, summarize_script, - output_dir, num_iterations, dry_run, upkeep=None, - cleanup=None): +def benchmark_command( + command, + benchmark_script, + summarize_script, + output_dir, + num_iterations, + dry_run, + upkeep=None, + cleanup=None, +): """Benchmark several runs of a long-running command. :type command: str @@ -192,7 +200,10 @@ def benchmark_command(command, benchmark_script, summarize_script, out_file = 'performance%s.csv' % i out_file = os.path.join(performance_dir, out_file) benchmark_args = [ - benchmark_script, command, '--output-file', out_file + benchmark_script, + command, + '--output-file', + out_file, ] if not dry_run: subprocess.check_call(benchmark_args) @@ -210,42 +221,61 @@ def get_default_argparser(): """Get an ArgumentParser with all the base benchmark arguments added in.""" parser = argparse.ArgumentParser() parser.add_argument( - '--no-cleanup', action='store_true', default=False, - help='Do not remove the destination after the tests complete.' + '--no-cleanup', + action='store_true', + default=False, + help='Do not remove the destination after the tests complete.', ) parser.add_argument( - '--recursive', action='store_true', default=False, - help='Indicates that this is a recursive transfer.' + '--recursive', + action='store_true', + default=False, + help='Indicates that this is a recursive transfer.', ) benchmark_script = get_benchmark_script() parser.add_argument( - '--benchmark-script', default=benchmark_script, + '--benchmark-script', + default=benchmark_script, required=benchmark_script is None, - help=('The benchmark script to run the commands with. This should be ' - 'from s3transfer.') + help=( + 'The benchmark script to run the commands with. This should be ' + 'from s3transfer.' + ), ) summarize_script = get_summarize_script() parser.add_argument( - '--summarize-script', default=summarize_script, + '--summarize-script', + default=summarize_script, required=summarize_script is None, - help=('The summarize script to run the commands with. This should be ' - 'from s3transfer.') + help=( + 'The summarize script to run the commands with. This should be ' + 'from s3transfer.' + ), ) parser.add_argument( - '-o', '--result-dir', default='results', + '-o', + '--result-dir', + default='results', help='The directory to output performance results to. Existing ' - 'results will be deleted.' + 'results will be deleted.', ) parser.add_argument( - '--dry-run', default=False, action='store_true', - help='If set, commands will only be printed out, not executed.' + '--dry-run', + default=False, + action='store_true', + help='If set, commands will only be printed out, not executed.', ) parser.add_argument( - '--quiet', default=False, action='store_true', - help='If set, output is suppressed.' + '--quiet', + default=False, + action='store_true', + help='If set, output is suppressed.', ) parser.add_argument( - '-n', '--num-iterations', default=1, type=int, - help='The number of times to run the test.' + '-n', + '--num-iterations', + default=1, + type=int, + help='The number of times to run the test.', ) return parser diff --git a/scripts/performance/perfcmp b/scripts/performance/perfcmp index d2b2c8378e87..8e8d93170972 100755 --- a/scripts/performance/perfcmp +++ b/scripts/performance/perfcmp @@ -7,16 +7,16 @@ the run information:: $ ./perfcmp /results/2016-01-01-1111/ /results/2016-01-01-2222/ """ -import os -import json + import argparse +import json +import os from colorama import Fore, Style from tabulate import tabulate class RunComparison(object): - MEMORY_FIELDS = ['average_memory', 'max_memory'] TIME_FIELDS = ['total_time'] # Fields that aren't memory or time fields, they require @@ -66,7 +66,7 @@ class RunComparison(object): def _format(self, field, value): if field.startswith('std_dev_'): - field = field[len('std_dev_'):] + field = field[len('std_dev_') :] if field in self.MEMORY_FIELDS: return self._human_readable_size(value)[0] elif field in self.TIME_FIELDS: @@ -85,14 +85,15 @@ class RunComparison(object): return '%d Bytes' % bytes_int for i, suffix in enumerate(hummanize_suffixes): - unit = base ** (i+2) + unit = base ** (i + 2) if round((bytes_int / unit) * base) < base: return ['%.2f' % (base * bytes_int / unit), suffix] def diff_percent(self, field): diff_percent = ( - (self.new_summary[field] - self.old_summary[field]) / - float(self.old_summary[field])) * 100 + (self.new_summary[field] - self.old_summary[field]) + / float(self.old_summary[field]) + ) * 100 return diff_percent @@ -105,29 +106,39 @@ def compare_runs(old_dir, new_dir): old_summary = get_summary(old_run_dir) new_summary = get_summary(new_run_dir) comp = RunComparison(old_summary, new_summary) - header = [Style.BRIGHT + dirname + Style.RESET_ALL, - Style.BRIGHT + 'old' + Style.RESET_ALL, - # Numeric suffix (MiB, GiB, sec). - '', - 'std_dev', - Style.BRIGHT + 'new' + Style.RESET_ALL, - # Numeric suffix (MiB, GiB, sec). - '', - 'std_dev', - Style.BRIGHT + 'delta' + Style.RESET_ALL] + header = [ + Style.BRIGHT + dirname + Style.RESET_ALL, + Style.BRIGHT + 'old' + Style.RESET_ALL, + # Numeric suffix (MiB, GiB, sec). + '', + 'std_dev', + Style.BRIGHT + 'new' + Style.RESET_ALL, + # Numeric suffix (MiB, GiB, sec). + '', + 'std_dev', + Style.BRIGHT + 'delta' + Style.RESET_ALL, + ] rows = [] for field in comp.iter_field_names(): - row = [field, comp.old(field), comp.old_suffix(field), - comp.old_stddev(field), comp.new(field), - comp.new_suffix(field), comp.new_stddev(field)] + row = [ + field, + comp.old(field), + comp.old_suffix(field), + comp.old_stddev(field), + comp.new(field), + comp.new_suffix(field), + comp.new_stddev(field), + ] diff_percent = comp.diff_percent(field) diff_percent_str = '%.2f%%' % diff_percent if diff_percent < 0: diff_percent_str = ( - Fore.GREEN + diff_percent_str + Style.RESET_ALL) + Fore.GREEN + diff_percent_str + Style.RESET_ALL + ) else: diff_percent_str = ( - Fore.RED + diff_percent_str + Style.RESET_ALL) + Fore.RED + diff_percent_str + Style.RESET_ALL + ) row.append(diff_percent_str) rows.append(row) print(tabulate(rows, headers=header, tablefmt='plain')) diff --git a/scripts/regenerate-configure/Dockerfile b/scripts/regenerate-configure/Dockerfile index be4d15e3cc6b..231549955d99 100644 --- a/scripts/regenerate-configure/Dockerfile +++ b/scripts/regenerate-configure/Dockerfile @@ -23,4 +23,4 @@ RUN make RUN make install WORKDIR /build -RUN autoreconf \ No newline at end of file +RUN autoreconf diff --git a/scripts/regenerate-configure/regenerate-configure b/scripts/regenerate-configure/regenerate-configure index d5e8bd9891b4..e9ea173e53f6 100755 --- a/scripts/regenerate-configure/regenerate-configure +++ b/scripts/regenerate-configure/regenerate-configure @@ -4,7 +4,6 @@ import re from pathlib import Path from subprocess import run - ROOT = Path(__file__).parents[2] DOCKERFILE_PATH = ROOT / "scripts" / "regenerate-configure" / "Dockerfile" IMAGE_RE = re.compile(r"sha256:(?P.*?)\s") @@ -84,7 +83,4 @@ if __name__ == "__main__": help="Do not clean up docker image and container. Useful for debugging.", ) args = parser.parse_args() - main( - not args.no_cleanup, - args.dockerfile_path - ) + main(not args.no_cleanup, args.dockerfile_path) diff --git a/scripts/regenerate-lock-files b/scripts/regenerate-lock-files index 56554c1e1542..90921311d4a9 100755 --- a/scripts/regenerate-lock-files +++ b/scripts/regenerate-lock-files @@ -14,15 +14,15 @@ """This script is to programatically regenerate the requirements/*-lock.txt files. In order to run it you need to have pip-tools installed into the currently active virtual environment.""" + import argparse -import sys import os -from typing import List, ClassVar -from pathlib import Path +import sys from dataclasses import dataclass +from pathlib import Path +from typing import ClassVar, List -from utils import run, BadRCError - +from utils import BadRCError, run ROOT = Path(__file__).parents[1] IS_WINDOWS = sys.platform == "win32" @@ -124,10 +124,10 @@ def show_file(path: Path): def main( - build_directory: Path, - should_show_files: bool, - include_sdist: bool, - include_base: bool + build_directory: Path, + should_show_files: bool, + include_sdist: bool, + include_base: bool, ): builder = LockFileBuilder( source_directory=ROOT, @@ -204,13 +204,24 @@ if __name__ == "__main__": help=("Default base directory where output lock files to be written."), ) parser.add_argument('--show-files', action='store_true') - parser.add_argument('--no-show-files', action='store_false', dest='show_files') + parser.add_argument( + '--no-show-files', action='store_false', dest='show_files' + ) parser.set_defaults(show_files=False) parser.add_argument('--include-sdist', action='store_true') - parser.add_argument('--no-include-sdist', action='store_false', dest='include_sdist') + parser.add_argument( + '--no-include-sdist', action='store_false', dest='include_sdist' + ) parser.set_defaults(include_sdist=True) parser.add_argument('--include-base', action='store_true') - parser.add_argument('--no-include-base', action='store_false', dest='include_base') + parser.add_argument( + '--no-include-base', action='store_false', dest='include_base' + ) parser.set_defaults(include_base=False) args = parser.parse_args() - main(args.output_directory, args.show_files, args.include_sdist, args.include_base) + main( + args.output_directory, + args.show_files, + args.include_sdist, + args.include_base, + ) diff --git a/scripts/utils.py b/scripts/utils.py index 3d5bfaa8c722..065c5d12f2f8 100644 --- a/scripts/utils.py +++ b/scripts/utils.py @@ -1,13 +1,14 @@ import contextlib +import glob import json import os import platform import shutil -import sys import subprocess +import sys import tempfile import zipfile -import glob + class BadRCError(Exception): pass @@ -31,8 +32,9 @@ def run(cmd, cwd=None, env=None, echo=True): stdout, stderr = p.communicate() output = stdout.decode('utf-8') + stderr.decode('utf-8') if p.returncode != 0: - raise BadRCError("Bad rc (%s) for cmd '%s': %s" % ( - p.returncode, cmd, output)) + raise BadRCError( + "Bad rc (%s) for cmd '%s': %s" % (p.returncode, cmd, output) + ) return output