diff --git a/.gitignore b/.gitignore
index ba43271401dc..8549cb17e66c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -77,4 +77,4 @@ autom4te.cache
/depcomp
/install-sh
/missing
-/stamp-h1
\ No newline at end of file
+/stamp-h1
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000000..c562438ec94f
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,33 @@
+exclude: "\
+ ^(\
+ .changes|\
+ .github|\
+ awscli/examples|\
+ awscli/topics|\
+ awscli/botocore|\
+ awscli/s3transfer|\
+ awscli/doc|\
+ exe/assets|\
+ tests/functional/cloudformation/deploy_templates/booleans/input.yml|\
+ tests/functional/cloudformation/deploy_templates/nested-tag/input.yml|\
+ tests/|\
+ CHANGELOG.rst|\
+ configure\
+ )"
+repos:
+ - repo: 'https://github.com/pre-commit/pre-commit-hooks'
+ rev: v4.5.0
+ hooks:
+ - id: check-yaml
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ - repo: 'https://github.com/PyCQA/isort'
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.4.8
+ hooks:
+ - id: ruff
+ args: [ --fix ]
+ - id: ruff-format
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 0a89c7b112f9..0d60b0eda675 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -67,7 +67,7 @@ Also, ensure your commit messages match this format::
Describe your changes in the imperative mood, e.g.
"Add foo to bar", "Update foo component for bar",
"Fix race condition for foo".
-
+
The body of the commit message can include:
* an explanation of the problem and what this change
@@ -120,6 +120,28 @@ can run these commands::
When you push to your remote, the output will contain a URL you
can use to open a pull request.
+Codestyle
+---------
+This project uses `ruff To use the following examples, you must have the AWS "
- "CLI installed and configured. See the "
- ""
- "Getting started guide in the AWS CLI User Guide "
- "for more information. Unless otherwise stated, all examples have unix-like "
- "quotation rules. These examples will need to be adapted "
- "to your terminal's quoting rules. See "
- ""
- "Using quotation marks with strings "
- "in the AWS CLI User Guide. To use the following examples, you must have the AWS "
+ "CLI installed and configured. See the "
+ ""
+ "Getting started guide in the AWS CLI User Guide "
+ "for more information. Unless otherwise stated, all examples have unix-like "
+ "quotation rules. These examples will need to be adapted "
+ "to your terminal's quoting rules. See "
+ ""
+ "Using quotation marks with strings "
+ "in the AWS CLI User Guide. The path to the zip file of the {param_type} you are uploading. '
@@ -31,14 +31,21 @@
def register_lambda_create_function(cli):
- cli.register('building-argument-table.lambda.create-function',
- ZipFileArgumentHoister('Code').hoist)
- cli.register('building-argument-table.lambda.publish-layer-version',
- ZipFileArgumentHoister('Content').hoist)
- cli.register('building-argument-table.lambda.update-function-code',
- _modify_zipfile_docstring)
- cli.register('process-cli-arg.lambda.update-function-code',
- validate_is_zip_file)
+ cli.register(
+ 'building-argument-table.lambda.create-function',
+ ZipFileArgumentHoister('Code').hoist,
+ )
+ cli.register(
+ 'building-argument-table.lambda.publish-layer-version',
+ ZipFileArgumentHoister('Content').hoist,
+ )
+ cli.register(
+ 'building-argument-table.lambda.update-function-code',
+ _modify_zipfile_docstring,
+ )
+ cli.register(
+ 'process-cli-arg.lambda.update-function-code', validate_is_zip_file
+ )
def validate_is_zip_file(cli_argument, value, **kwargs):
@@ -55,6 +62,7 @@ class ZipFileArgumentHoister(object):
ReplacedZipFileArgument to prevent its usage and recommend the new
top-level injected parameter.
"""
+
def __init__(self, serialized_name):
self._serialized_name = serialized_name
self._name = serialized_name.lower()
@@ -62,8 +70,10 @@ def __init__(self, serialized_name):
def hoist(self, session, argument_table, **kwargs):
help_text = ZIP_DOCSTRING.format(param_type=self._name)
argument_table['zip-file'] = ZipFileArgument(
- 'zip-file', help_text=help_text, cli_type_name='blob',
- serialized_name=self._serialized_name
+ 'zip-file',
+ help_text=help_text,
+ cli_type_name='blob',
+ serialized_name=self._serialized_name,
)
argument = argument_table[self._name]
model = copy.deepcopy(argument.argument_model)
@@ -107,6 +117,7 @@ class ZipFileArgument(CustomArgument):
--zip-file foo.zip winds up being serialized as
{ 'Code': { 'ZipFile': A string that identifies one or more attributes to retrieve from '
'the specified table or index. These attributes can include scalars, '
@@ -71,11 +73,12 @@
' For CLI specific syntax see '
'aws help ddb-expressions A string that contains conditions that DynamoDB applies after the '
'operation, but before the data is returned to you. Items that do '
@@ -90,11 +93,12 @@
' For CLI specific syntax see '
'aws help ddb-expressions A condition that must be satisfied in order for a conditional '
' For CLI specific syntax see '
'aws help ddb-expressions The condition that specifies the key value(s) for items to be '
'retrieved. Must perform an equality test on a single partition key '
@@ -152,7 +157,7 @@
' For CLI specific syntax see '
'aws help ddb-expressions One or more items to put into the table, in YAML format. Determines the read consistency model: If set to '
' Will include the aggregate Number of instances to launch. If a single number is provided, it
is assumed to be the minimum to launch (defaults to %d). If a range is
provided in the form ']
+ lines = [
+ 'The output format to display credentials. '
+ 'Defaults to `process`. ',
+ '
',
+ ]
for name, cls in formats.items():
line = f'
'
- )
+ ),
}
PROJECTION_EXPRESSION = {
- 'name': 'projection', 'nargs': '+',
+ 'name': 'projection',
+ 'nargs': '+',
'help_text': (
'put
operation to succeed.--consistent-read
, then the operation uses strongly '
@@ -175,29 +183,36 @@
'global secondary indexes. If you query a global secondary index '
'with --consistent-read
, you will receive a '
'ValidationException
.ConsumedCapacity
for the '
'operation. If --index-name
is also specified, '
'then the ConsumedCapacity
for each table and secondary '
'index that was accessed will be returned.min:max
then the first number is
interpreted as the minimum number of instances to launch and the second
-is interpreted as the maximum number of instances to launch.
--ip-permissions
option')
+ msg = (
+ 'To specify multiple rules in a single command '
+ 'use the --ip-permissions
option'
+ )
doc.include_doc_string(msg)
doc.style.end_note()
EVENTS = [
- ('building-argument-table.ec2.authorize-security-group-ingress',
- _add_params),
- ('building-argument-table.ec2.authorize-security-group-egress',
- _add_params),
+ (
+ 'building-argument-table.ec2.authorize-security-group-ingress',
+ _add_params,
+ ),
+ (
+ 'building-argument-table.ec2.authorize-security-group-egress',
+ _add_params,
+ ),
('building-argument-table.ec2.revoke-security-group-ingress', _add_params),
('building-argument-table.ec2.revoke-security-group-egress', _add_params),
- ('operation-args-parsed.ec2.authorize-security-group-ingress',
- _check_args),
+ (
+ 'operation-args-parsed.ec2.authorize-security-group-ingress',
+ _check_args,
+ ),
('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),
('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),
('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),
@@ -95,25 +101,31 @@ def _add_docs(help_command, **kwargs):
('doc-description.ec2.revoke-security-group-ingress', _add_docs),
('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),
]
-PROTOCOL_DOCS = ('The IP protocol: tcp
| '
- 'udp
| icmp
(VPC only) Use all
to specify all protocols.
If this argument is provided without also providing the '
- 'port
argument, then it will be applied to all '
- 'ports for the specified protocol.
For TCP or UDP: The range of ports to allow.'
- ' A single integer or a range (min-max
).
For ICMP: A single integer or a range (type-code
)'
- ' representing the ICMP type'
- ' number and the ICMP code number respectively.'
- ' A value of -1 indicates all ICMP codes for'
- ' all ICMP types. A value of -1 just for type
'
- ' indicates all ICMP codes for the specified ICMP type.
The IP protocol: tcp
| '
+ 'udp
| icmp
(VPC only) Use all
to specify all protocols.
If this argument is provided without also providing the '
+ 'port
argument, then it will be applied to all '
+ 'ports for the specified protocol.
For TCP or UDP: The range of ports to allow.'
+ ' A single integer or a range (min-max
).
For ICMP: A single integer or a range (type-code
)'
+ ' representing the ICMP type'
+ ' number and the ICMP code number respectively.'
+ ' A value of -1 indicates all ICMP codes for'
+ ' all ICMP types. A value of -1 just for type
'
+ ' indicates all ICMP codes for the specified ICMP type.
The IPv4 address range, in CIDR format.
' -SOURCEGROUP_DOCS = ('The name or ID of the source security group.
') -GROUPOWNER_DOCS = ('The AWS account ID that owns the source security ' - 'group. Cannot be used when specifying a CIDR IP ' - 'address.
') +SOURCEGROUP_DOCS = 'The name or ID of the source security group.
' +GROUPOWNER_DOCS = ( + 'The AWS account ID that owns the source security ' + 'group. Cannot be used when specifying a CIDR IP ' + 'address.
' +) def register_secgroup(event_handler): @@ -137,19 +149,22 @@ def _build_ip_permissions(params, key, value): class ProtocolArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: try: int_value = int(value) if (int_value < 0 or int_value > 255) and int_value != -1: - msg = ('protocol numbers must be in the range 0-255 ' - 'or -1 to specify all protocols') + msg = ( + 'protocol numbers must be in the range 0-255 ' + 'or -1 to specify all protocols' + ) raise ParamValidationError(msg) except ValueError: if value not in ('tcp', 'udp', 'icmp', 'all'): - msg = ('protocol parameter should be one of: ' - 'tcp|udp|icmp|all or any valid protocol number.') + msg = ( + 'protocol parameter should be one of: ' + 'tcp|udp|icmp|all or any valid protocol number.' + ) raise ParamValidationError(msg) if value == 'all': value = '-1' @@ -157,7 +172,6 @@ def add_to_params(self, parameters, value): class PortArgument(CustomArgument): - def add_to_params(self, parameters, value): if value: try: @@ -175,13 +189,14 @@ def add_to_params(self, parameters, value): _build_ip_permissions(parameters, 'FromPort', int(fromstr)) _build_ip_permissions(parameters, 'ToPort', int(tostr)) except ValueError: - msg = ('port parameter should be of the ' - 'formTaskDefinition
"
- "property will be updated within the appspec with "
- "the newly registered task definition ARN, "
- "overwriting any placeholder values in the file."),
- 'required': True
+ 'help_text': (
+ "The file path where your AWS CodeDeploy appspec "
+ "file is located. The appspec file may be in JSON "
+ "or YAML format. The TaskDefinition
"
+ "property will be updated within the appspec with "
+ "the newly registered task definition ARN, "
+ "overwriting any placeholder values in the file."
+ ),
+ 'required': True,
},
{
'name': 'cluster',
- 'help_text': ("The short name or full Amazon Resource Name "
- "(ARN) of the cluster that your service is "
- "running within. If you do not specify a "
- "cluster, the \"default\" cluster is assumed."),
- 'required': False
+ 'help_text': (
+ "The short name or full Amazon Resource Name "
+ "(ARN) of the cluster that your service is "
+ "running within. If you do not specify a "
+ "cluster, the \"default\" cluster is assumed."
+ ),
+ 'required': False,
},
{
'name': 'codedeploy-application',
- 'help_text': ("The name of the AWS CodeDeploy application "
- "to use for the deployment. The specified "
- "application must use the 'ECS' compute "
- "platform. If you do not specify an "
- "application, the application name "
- "AppECS-[CLUSTER_NAME]-[SERVICE_NAME]
"
- "is assumed."),
- 'required': False
+ 'help_text': (
+ "The name of the AWS CodeDeploy application "
+ "to use for the deployment. The specified "
+ "application must use the 'ECS' compute "
+ "platform. If you do not specify an "
+ "application, the application name "
+ "AppECS-[CLUSTER_NAME]-[SERVICE_NAME]
"
+ "is assumed."
+ ),
+ 'required': False,
},
{
'name': 'codedeploy-deployment-group',
- 'help_text': ("The name of the AWS CodeDeploy deployment "
- "group to use for the deployment. The "
- "specified deployment group must be associated "
- "with the specified ECS service and cluster. "
- "If you do not specify a deployment group, "
- "the deployment group name "
- "DgpECS-[CLUSTER_NAME]-[SERVICE_NAME]
"
- "is assumed."),
- 'required': False
- }
+ 'help_text': (
+ "The name of the AWS CodeDeploy deployment "
+ "group to use for the deployment. The "
+ "specified deployment group must be associated "
+ "with the specified ECS service and cluster. "
+ "If you do not specify a deployment group, "
+ "the deployment group name "
+ "DgpECS-[CLUSTER_NAME]-[SERVICE_NAME]
"
+ "is assumed."
+ ),
+ 'required': False,
+ },
]
- MSG_TASK_DEF_REGISTERED = \
+ MSG_TASK_DEF_REGISTERED = (
"Successfully registered new ECS task definition {arn}\n"
+ )
MSG_CREATED_DEPLOYMENT = "Successfully created deployment {id}\n"
- MSG_SUCCESS = ("Successfully deployed {task_def} to "
- "service '{service}'\n")
+ MSG_SUCCESS = (
+ "Successfully deployed {task_def} to " "service '{service}'\n"
+ )
USER_AGENT_EXTRA = 'md/customization#ecs-deploy'
def _run_main(self, parsed_args, parsed_globals):
-
- register_task_def_kwargs, appspec_obj = \
- self._load_file_args(parsed_args.task_definition,
- parsed_args.codedeploy_appspec)
+ register_task_def_kwargs, appspec_obj = self._load_file_args(
+ parsed_args.task_definition, parsed_args.codedeploy_appspec
+ )
ecs_client_wrapper = ECSClient(
- self._session, parsed_args, parsed_globals, self.USER_AGENT_EXTRA)
+ self._session, parsed_args, parsed_globals, self.USER_AGENT_EXTRA
+ )
self.resources = self._get_resource_names(
- parsed_args, ecs_client_wrapper)
+ parsed_args, ecs_client_wrapper
+ )
codedeploy_client = self._session.create_client(
'codedeploy',
region_name=parsed_globals.region,
verify=parsed_globals.verify_ssl,
- config=config.Config(user_agent_extra=self.USER_AGENT_EXTRA))
+ config=config.Config(user_agent_extra=self.USER_AGENT_EXTRA),
+ )
self._validate_code_deploy_resources(codedeploy_client)
self.wait_time = self._cd_validator.get_deployment_wait_time()
self.task_def_arn = self._register_task_def(
- register_task_def_kwargs, ecs_client_wrapper)
+ register_task_def_kwargs, ecs_client_wrapper
+ )
self._create_and_wait_for_deployment(codedeploy_client, appspec_obj)
return 0
@@ -143,18 +160,19 @@ def _create_and_wait_for_deployment(self, client, appspec):
deployer = CodeDeployer(client, appspec)
deployer.update_task_def_arn(self.task_def_arn)
deployment_id = deployer.create_deployment(
- self.resources['app_name'],
- self.resources['deployment_group_name'])
+ self.resources['app_name'], self.resources['deployment_group_name']
+ )
- sys.stdout.write(self.MSG_CREATED_DEPLOYMENT.format(
- id=deployment_id))
+ sys.stdout.write(self.MSG_CREATED_DEPLOYMENT.format(id=deployment_id))
deployer.wait_for_deploy_success(deployment_id, self.wait_time)
service_name = self.resources['service']
sys.stdout.write(
self.MSG_SUCCESS.format(
- task_def=self.task_def_arn, service=service_name))
+ task_def=self.task_def_arn, service=service_name
+ )
+ )
sys.stdout.flush()
def _get_file_contents(self, file_path):
@@ -163,8 +181,7 @@ def _get_file_contents(self, file_path):
with compat_open(full_path) as f:
return f.read()
except (OSError, IOError, UnicodeDecodeError) as e:
- raise exceptions.FileLoadError(
- file_path=file_path, error=e)
+ raise exceptions.FileLoadError(file_path=file_path, error=e)
def _get_resource_names(self, args, ecs_client):
service_details = ecs_client.get_service_details()
@@ -172,9 +189,11 @@ def _get_resource_names(self, args, ecs_client):
cluster_name = service_details['cluster_name']
application_name = filehelpers.get_app_name(
- service_name, cluster_name, args.codedeploy_application)
+ service_name, cluster_name, args.codedeploy_application
+ )
deployment_group_name = filehelpers.get_deploy_group_name(
- service_name, cluster_name, args.codedeploy_deployment_group)
+ service_name, cluster_name, args.codedeploy_deployment_group
+ )
return {
'service': service_name,
@@ -182,7 +201,7 @@ def _get_resource_names(self, args, ecs_client):
'cluster': cluster_name,
'cluster_arn': service_details['cluster_arn'],
'app_name': application_name,
- 'deployment_group_name': deployment_group_name
+ 'deployment_group_name': deployment_group_name,
}
def _load_file_args(self, task_def_arg, appspec_arg):
@@ -199,8 +218,7 @@ def _register_task_def(self, task_def_kwargs, ecs_client):
task_def_arn = response['taskDefinition']['taskDefinitionArn']
- sys.stdout.write(self.MSG_TASK_DEF_REGISTERED.format(
- arn=task_def_arn))
+ sys.stdout.write(self.MSG_TASK_DEF_REGISTERED.format(arn=task_def_arn))
sys.stdout.flush()
return task_def_arn
@@ -212,10 +230,11 @@ def _validate_code_deploy_resources(self, client):
self._cd_validator = validator
-class CodeDeployer():
-
- MSG_WAITING = ("Waiting for {deployment_id} to succeed "
- "(will wait up to {wait} minutes)...\n")
+class CodeDeployer:
+ MSG_WAITING = (
+ "Waiting for {deployment_id} to succeed "
+ "(will wait up to {wait} minutes)...\n"
+ )
def __init__(self, cd_client, appspec_dict):
self._client = cd_client
@@ -223,13 +242,15 @@ def __init__(self, cd_client, appspec_dict):
def create_deployment(self, app_name, deploy_grp_name):
request_obj = self._get_create_deploy_request(
- app_name, deploy_grp_name)
+ app_name, deploy_grp_name
+ )
try:
response = self._client.create_deployment(**request_obj)
except ClientError as e:
raise exceptions.ServiceClientError(
- action='create deployment', error=e)
+ action='create deployment', error=e
+ )
return response['deploymentId']
@@ -246,9 +267,9 @@ def _get_create_deploy_request(self, app_name, deploy_grp_name):
"revisionType": "AppSpecContent",
"appSpecContent": {
"content": json.dumps(self._appspec_dict),
- "sha256": self._get_appspec_hash()
- }
- }
+ "sha256": self._get_appspec_hash(),
+ },
+ },
}
def update_task_def_arn(self, new_arn):
@@ -270,7 +291,8 @@ def update_task_def_arn(self, new_arn):
appspec_obj = self._appspec_dict
resources_key = filehelpers.find_required_key(
- 'codedeploy-appspec', appspec_obj, 'resources')
+ 'codedeploy-appspec', appspec_obj, 'resources'
+ )
updated_resources = []
# 'resources' is a list of string:obj dictionaries
@@ -280,11 +302,13 @@ def update_task_def_arn(self, new_arn):
resource_content = resource[name]
# get resource properties
properties_key = filehelpers.find_required_key(
- name, resource_content, 'properties')
+ name, resource_content, 'properties'
+ )
properties_content = resource_content[properties_key]
# find task definition property
task_def_key = filehelpers.find_required_key(
- properties_key, properties_content, 'taskDefinition')
+ properties_key, properties_content, 'taskDefinition'
+ )
# insert new task def ARN into resource
properties_content[task_def_key] = new_arn
@@ -305,22 +329,19 @@ def wait_for_deploy_success(self, id, wait_min):
delay_sec = DEFAULT_DELAY_SEC
max_attempts = (wait_min * 60) / delay_sec
- config = {
- 'Delay': delay_sec,
- 'MaxAttempts': max_attempts
- }
+ config = {'Delay': delay_sec, 'MaxAttempts': max_attempts}
self._show_deploy_wait_msg(id, wait_min)
waiter.wait(deploymentId=id, WaiterConfig=config)
def _show_deploy_wait_msg(self, id, wait_min):
sys.stdout.write(
- self.MSG_WAITING.format(deployment_id=id,
- wait=wait_min))
+ self.MSG_WAITING.format(deployment_id=id, wait=wait_min)
+ )
sys.stdout.flush()
-class CodeDeployValidator():
+class CodeDeployValidator:
def __init__(self, cd_client, resources):
self._client = cd_client
self._resource_names = resources
@@ -328,35 +349,42 @@ def __init__(self, cd_client, resources):
def describe_cd_resources(self):
try:
self.app_details = self._client.get_application(
- applicationName=self._resource_names['app_name'])
+ applicationName=self._resource_names['app_name']
+ )
except ClientError as e:
raise exceptions.ServiceClientError(
- action='describe Code Deploy application', error=e)
+ action='describe Code Deploy application', error=e
+ )
try:
dgp = self._resource_names['deployment_group_name']
app = self._resource_names['app_name']
self.deployment_group_details = self._client.get_deployment_group(
- applicationName=app, deploymentGroupName=dgp)
+ applicationName=app, deploymentGroupName=dgp
+ )
except ClientError as e:
raise exceptions.ServiceClientError(
- action='describe Code Deploy deployment group', error=e)
+ action='describe Code Deploy deployment group', error=e
+ )
def get_deployment_wait_time(self):
-
- if (not hasattr(self, 'deployment_group_details') or
- self.deployment_group_details is None):
+ if (
+ not hasattr(self, 'deployment_group_details')
+ or self.deployment_group_details is None
+ ):
return None
else:
dgp_info = self.deployment_group_details['deploymentGroupInfo']
blue_green_info = dgp_info['blueGreenDeploymentConfiguration']
- deploy_ready_wait_min = \
- blue_green_info['deploymentReadyOption']['waitTimeInMinutes']
+ deploy_ready_wait_min = blue_green_info['deploymentReadyOption'][
+ 'waitTimeInMinutes'
+ ]
terminate_key = 'terminateBlueInstancesOnDeploymentSuccess'
- termination_wait_min = \
- blue_green_info[terminate_key]['terminationWaitTimeInMinutes']
+ termination_wait_min = blue_green_info[terminate_key][
+ 'terminationWaitTimeInMinutes'
+ ]
configured_wait = deploy_ready_wait_min + termination_wait_min
@@ -370,7 +398,8 @@ def validate_application(self):
app_name = self._resource_names['app_name']
if self.app_details['application']['computePlatform'] != 'ECS':
raise exceptions.InvalidPlatformError(
- resource='Application', name=app_name)
+ resource='Application', name=app_name
+ )
def validate_deployment_group(self):
dgp = self._resource_names['deployment_group_name']
@@ -384,26 +413,29 @@ def validate_deployment_group(self):
if compute_platform != 'ECS':
raise exceptions.InvalidPlatformError(
- resource='Deployment Group', name=dgp)
+ resource='Deployment Group', name=dgp
+ )
- target_services = \
- self.deployment_group_details['deploymentGroupInfo']['ecsServices']
+ target_services = self.deployment_group_details['deploymentGroupInfo'][
+ 'ecsServices'
+ ]
# either ECS resource names or ARNs can be stored, so check both
for target in target_services:
target_serv = target['serviceName']
if target_serv != service and target_serv != service_arn:
raise exceptions.InvalidProperyError(
- dg_name=dgp, resource='service', resource_name=service)
+ dg_name=dgp, resource='service', resource_name=service
+ )
target_cluster = target['clusterName']
if target_cluster != cluster and target_cluster != cluster_arn:
raise exceptions.InvalidProperyError(
- dg_name=dgp, resource='cluster', resource_name=cluster)
-
+ dg_name=dgp, resource='cluster', resource_name=cluster
+ )
-class ECSClient():
+class ECSClient:
def __init__(self, session, parsed_args, parsed_globals, user_agent_extra):
self._args = parsed_args
self._custom_config = config.Config(user_agent_extra=user_agent_extra)
@@ -412,7 +444,8 @@ def __init__(self, session, parsed_args, parsed_globals, user_agent_extra):
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl,
- config=self._custom_config)
+ config=self._custom_config,
+ )
def get_service_details(self):
cluster = self._args.cluster
@@ -422,33 +455,36 @@ def get_service_details(self):
try:
service_response = self._client.describe_services(
- cluster=cluster, services=[self._args.service])
+ cluster=cluster, services=[self._args.service]
+ )
except ClientError as e:
raise exceptions.ServiceClientError(
- action='describe ECS service', error=e)
+ action='describe ECS service', error=e
+ )
if len(service_response['services']) == 0:
raise exceptions.InvalidServiceError(
- service=self._args.service, cluster=cluster)
+ service=self._args.service, cluster=cluster
+ )
service_details = service_response['services'][0]
- cluster_name = \
- filehelpers.get_cluster_name_from_arn(
- service_details['clusterArn'])
+ cluster_name = filehelpers.get_cluster_name_from_arn(
+ service_details['clusterArn']
+ )
return {
'service_arn': service_details['serviceArn'],
'service_name': service_details['serviceName'],
'cluster_arn': service_details['clusterArn'],
- 'cluster_name': cluster_name
+ 'cluster_name': cluster_name,
}
def register_task_definition(self, kwargs):
try:
- response = \
- self._client.register_task_definition(**kwargs)
+ response = self._client.register_task_definition(**kwargs)
except ClientError as e:
raise exceptions.ServiceClientError(
- action='register ECS task definition', error=e)
+ action='register ECS task definition', error=e
+ )
return response
diff --git a/awscli/customizations/ecs/exceptions.py b/awscli/customizations/ecs/exceptions.py
index 0dbc564ef811..52b078d56c58 100644
--- a/awscli/customizations/ecs/exceptions.py
+++ b/awscli/customizations/ecs/exceptions.py
@@ -13,7 +13,8 @@
class ECSError(Exception):
- """ Base class for all ECSErrors."""
+ """Base class for all ECSErrors."""
+
fmt = 'An unspecified error occurred'
def __init__(self, **kwargs):
@@ -23,8 +24,7 @@ def __init__(self, **kwargs):
class MissingPropertyError(ECSError):
- fmt = \
- "Error: Resource '{resource}' must include property '{prop_name}'"
+ fmt = "Error: Resource '{resource}' must include property '{prop_name}'"
class FileLoadError(ECSError):
@@ -36,8 +36,10 @@ class InvalidPlatformError(ECSError):
class InvalidProperyError(ECSError):
- fmt = ("Error: deployment group '{dg_name}' does not target "
- "ECS {resource} '{resource_name}'")
+ fmt = (
+ "Error: deployment group '{dg_name}' does not target "
+ "ECS {resource} '{resource_name}'"
+ )
class InvalidServiceError(ECSError):
@@ -45,4 +47,4 @@ class InvalidServiceError(ECSError):
class ServiceClientError(ECSError):
- fmt = "Failed to {action}:\n{error}"
\ No newline at end of file
+ fmt = "Failed to {action}:\n{error}"
diff --git a/awscli/customizations/ecs/executecommand.py b/awscli/customizations/ecs/executecommand.py
index a578c73734c4..1da62aaa5626 100644
--- a/awscli/customizations/ecs/executecommand.py
+++ b/awscli/customizations/ecs/executecommand.py
@@ -10,13 +10,13 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import logging
-import json
import errno
-
+import json
+import logging
from subprocess import check_call
+
+from awscli.clidriver import CLIOperationCaller, ServiceOperation
from awscli.compat import ignore_user_entered_signals
-from awscli.clidriver import ServiceOperation, CLIOperationCaller
logger = logging.getLogger(__name__)
@@ -24,17 +24,13 @@
'SessionManagerPlugin is not found. ',
'Please refer to SessionManager Documentation here: ',
'http://docs.aws.amazon.com/console/systems-manager/',
- 'session-manager-plugin-not-found'
+ 'session-manager-plugin-not-found',
)
-TASK_NOT_FOUND = (
- 'The task provided in the request was '
- 'not found.'
-)
+TASK_NOT_FOUND = 'The task provided in the request was ' 'not found.'
class ECSExecuteCommand(ServiceOperation):
-
def create_help_command(self):
help_command = super(ECSExecuteCommand, self).create_help_command()
# change the output shape because the command provides no output.
@@ -43,10 +39,7 @@ def create_help_command(self):
def get_container_runtime_id(client, container_name, task_id, cluster_name):
- describe_tasks_params = {
- "cluster": cluster_name,
- "tasks": [task_id]
- }
+ describe_tasks_params = {"cluster": cluster_name, "tasks": [task_id]}
describe_tasks_response = client.describe_tasks(**describe_tasks_params)
# need to fail here if task has failed in the intermediate time
tasks = describe_tasks_response['tasks']
@@ -64,11 +57,10 @@ def build_ssm_request_paramaters(response, client):
container_name = response['containerName']
# in order to get container run-time id
# we need to make a call to describe-tasks
- container_runtime_id = \
- get_container_runtime_id(client, container_name,
- task_id, cluster_name)
- target = "ecs:{}_{}_{}".format(cluster_name, task_id,
- container_runtime_id)
+ container_runtime_id = get_container_runtime_id(
+ client, container_name, task_id, cluster_name
+ )
+ target = "ecs:{}_{}_{}".format(cluster_name, task_id, container_runtime_id)
ssm_request_params = {"Target": target}
return ssm_request_params
@@ -85,13 +77,18 @@ def invoke(self, service_name, operation_name, parameters, parsed_globals):
# before execute-command-command is made
check_call(["session-manager-plugin"])
client = self._session.create_client(
- service_name, region_name=parsed_globals.region,
+ service_name,
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl)
+ verify=parsed_globals.verify_ssl,
+ )
response = client.execute_command(**parameters)
region_name = client.meta.region_name
- profile_name = self._session.profile \
- if self._session.profile is not None else ''
+ profile_name = (
+ self._session.profile
+ if self._session.profile is not None
+ else ''
+ )
endpoint_url = client.meta.endpoint_url
ssm_request_params = build_ssm_request_paramaters(response, client)
# ignore_user_entered_signals ignores these signals
@@ -102,16 +99,21 @@ def invoke(self, service_name, operation_name, parameters, parsed_globals):
# and handling in there
with ignore_user_entered_signals():
# call executable with necessary input
- check_call(["session-manager-plugin",
- json.dumps(response['session']),
- region_name,
- "StartSession",
- profile_name,
- json.dumps(ssm_request_params),
- endpoint_url])
+ check_call(
+ [
+ "session-manager-plugin",
+ json.dumps(response['session']),
+ region_name,
+ "StartSession",
+ profile_name,
+ json.dumps(ssm_request_params),
+ endpoint_url,
+ ]
+ )
return 0
except OSError as ex:
if ex.errno == errno.ENOENT:
- logger.debug('SessionManagerPlugin is not present',
- exc_info=True)
+ logger.debug(
+ 'SessionManagerPlugin is not present', exc_info=True
+ )
raise ValueError(''.join(ERROR_MESSAGE))
diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py
index 6c8b67a0d81d..8b52ef24d8ae 100644
--- a/awscli/customizations/ecs/filehelpers.py
+++ b/awscli/customizations/ecs/filehelpers.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
+
from ruamel.yaml import YAML
from awscli.customizations.ecs import exceptions
@@ -21,16 +22,17 @@
def find_required_key(resource_name, obj, key):
-
if obj is None:
raise exceptions.MissingPropertyError(
- resource=resource_name, prop_name=key)
+ resource=resource_name, prop_name=key
+ )
result = _get_case_insensitive_key(obj, key)
if result is None:
raise exceptions.MissingPropertyError(
- resource=resource_name, prop_name=key)
+ resource=resource_name, prop_name=key
+ )
else:
return result
diff --git a/awscli/customizations/eks/__init__.py b/awscli/customizations/eks/__init__.py
index 9f3114a64d84..6e5d7e73837d 100644
--- a/awscli/customizations/eks/__init__.py
+++ b/awscli/customizations/eks/__init__.py
@@ -11,8 +11,8 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.eks.update_kubeconfig import UpdateKubeconfigCommand
from awscli.customizations.eks.get_token import GetTokenCommand
+from awscli.customizations.eks.update_kubeconfig import UpdateKubeconfigCommand
def initialize(cli):
diff --git a/awscli/customizations/eks/exceptions.py b/awscli/customizations/eks/exceptions.py
index bf01b2323d97..8bdef92016ce 100644
--- a/awscli/customizations/eks/exceptions.py
+++ b/awscli/customizations/eks/exceptions.py
@@ -13,8 +13,8 @@
class EKSError(Exception):
- """ Base class for all EKSErrors."""
+ """Base class for all EKSErrors."""
class EKSClusterError(EKSError):
- """ Raised when a cluster is not in the correct state."""
+ """Raised when a cluster is not in the correct state."""
diff --git a/awscli/customizations/eks/get_token.py b/awscli/customizations/eks/get_token.py
index c85b86dd7d0e..6b43f569797f 100644
--- a/awscli/customizations/eks/get_token.py
+++ b/awscli/customizations/eks/get_token.py
@@ -11,19 +11,17 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
-import botocore
import json
import os
import sys
-
from datetime import datetime, timedelta
-from botocore.signers import RequestSigner
-from botocore.model import ServiceId
-from awscli.formatter import get_formatter
+import botocore
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.utils import uni_print
-from awscli.customizations.utils import validate_mutually_exclusive
+from awscli.customizations.utils import uni_print, validate_mutually_exclusive
+from awscli.formatter import get_formatter
+from botocore.model import ServiceId
+from botocore.signers import RequestSigner
AUTH_SERVICE = "sts"
AUTH_COMMAND = "GetCallerIdentity"
@@ -116,15 +114,19 @@ def _run_main(self, parsed_args, parsed_globals):
sts_client = client_factory.get_sts_client(
region_name=parsed_globals.region, role_arn=parsed_args.role_arn
)
-
- validate_mutually_exclusive(parsed_args, ['cluster_name'], ['cluster_id'])
+
+ validate_mutually_exclusive(
+ parsed_args, ['cluster_name'], ['cluster_id']
+ )
if parsed_args.cluster_id:
identifier = parsed_args.cluster_id
elif parsed_args.cluster_name:
identifier = parsed_args.cluster_name
else:
- return ValueError("Either parameter --cluster-name or --cluster-id must be specified.")
+ return ValueError(
+ "Either parameter --cluster-name or --cluster-id must be specified."
+ )
token = TokenGenerator(sts_client).get_token(identifier)
@@ -273,4 +275,6 @@ def _retrieve_k8s_aws_id(self, params, context, **kwargs):
def _inject_k8s_aws_id_header(self, request, **kwargs):
if K8S_AWS_ID_HEADER in request.context:
- request.headers[K8S_AWS_ID_HEADER] = request.context[K8S_AWS_ID_HEADER]
+ request.headers[K8S_AWS_ID_HEADER] = request.context[
+ K8S_AWS_ID_HEADER
+ ]
diff --git a/awscli/customizations/eks/kubeconfig.py b/awscli/customizations/eks/kubeconfig.py
index f4c69039635b..101e70a41c8a 100644
--- a/awscli/customizations/eks/kubeconfig.py
+++ b/awscli/customizations/eks/kubeconfig.py
@@ -15,38 +15,40 @@
import os
import ruamel.yaml as yaml
-from botocore.compat import OrderedDict
from awscli.compat import compat_open
from awscli.customizations.eks.exceptions import EKSError
from awscli.customizations.eks.ordered_yaml import (
+ ordered_yaml_dump,
ordered_yaml_load,
- ordered_yaml_dump
)
+from botocore.compat import OrderedDict
class KubeconfigError(EKSError):
- """ Base class for all kubeconfig errors."""
+ """Base class for all kubeconfig errors."""
class KubeconfigCorruptedError(KubeconfigError):
- """ Raised when a kubeconfig cannot be parsed."""
+ """Raised when a kubeconfig cannot be parsed."""
class KubeconfigInaccessableError(KubeconfigError):
- """ Raised when a kubeconfig cannot be opened for read/writing."""
+ """Raised when a kubeconfig cannot be opened for read/writing."""
def _get_new_kubeconfig_content():
- return OrderedDict([
- ("apiVersion", "v1"),
- ("clusters", []),
- ("contexts", []),
- ("current-context", ""),
- ("kind", "Config"),
- ("preferences", OrderedDict()),
- ("users", [])
- ])
+ return OrderedDict(
+ [
+ ("apiVersion", "v1"),
+ ("clusters", []),
+ ("contexts", []),
+ ("current-context", ""),
+ ("kind", "Config"),
+ ("preferences", OrderedDict()),
+ ("users", []),
+ ]
+ )
class Kubeconfig(object):
@@ -57,7 +59,7 @@ def __init__(self, path, content=None):
self.content = content
def dump_content(self):
- """ Return the stored content in yaml format. """
+ """Return the stored content in yaml format."""
return ordered_yaml_dump(self.content)
def has_cluster(self, name):
@@ -67,14 +69,17 @@ def has_cluster(self, name):
"""
if self.content.get('clusters') is None:
return False
- return name in [cluster['name']
- for cluster in self.content['clusters'] if 'name' in cluster]
+ return name in [
+ cluster['name']
+ for cluster in self.content['clusters']
+ if 'name' in cluster
+ ]
def __eq__(self, other):
return (
- isinstance(other, Kubeconfig)
- and self.path == other.path
- and self.content == other.content
+ isinstance(other, Kubeconfig)
+ and self.path == other.path
+ and self.content == other.content
)
@@ -92,8 +97,9 @@ def validate_config(self, config):
:type config: Kubeconfig
"""
if not isinstance(config, Kubeconfig):
- raise KubeconfigCorruptedError("Internal error: "
- f"Not a {Kubeconfig}.")
+ raise KubeconfigCorruptedError(
+ "Internal error: " f"Not a {Kubeconfig}."
+ )
self._validate_config_types(config)
self._validate_list_entry_types(config)
@@ -108,9 +114,11 @@ def _validate_config_types(self, config):
if not isinstance(config.content, dict):
raise KubeconfigCorruptedError(f"Content not a {dict}.")
for key, value in self._validation_content.items():
- if (key in config.content and
- config.content[key] is not None and
- not isinstance(config.content[key], type(value))):
+ if (
+ key in config.content
+ and config.content[key] is not None
+ and not isinstance(config.content[key], type(value))
+ ):
raise KubeconfigCorruptedError(
f"{key} is wrong type: {type(config.content[key])} "
f"(Should be {type(value)})"
@@ -125,19 +133,19 @@ def _validate_list_entry_types(self, config):
:type config: Kubeconfig
"""
for key, value in self._validation_content.items():
- if (key in config.content and
- type(config.content[key]) == list):
+ if key in config.content and type(config.content[key]) == list:
for element in config.content[key]:
if not isinstance(element, OrderedDict):
raise KubeconfigCorruptedError(
- f"Entry in {key} not a {dict}. ")
+ f"Entry in {key} not a {dict}. "
+ )
class KubeconfigLoader(object):
- def __init__(self, validator = None):
+ def __init__(self, validator=None):
if validator is None:
- validator=KubeconfigValidator()
- self._validator=validator
+ validator = KubeconfigValidator()
+ self._validator = validator
def load_kubeconfig(self, path):
"""
@@ -161,15 +169,17 @@ def load_kubeconfig(self, path):
loaded_content = ordered_yaml_load(stream)
except IOError as e:
if e.errno == errno.ENOENT:
- loaded_content=None
+ loaded_content = None
else:
raise KubeconfigInaccessableError(
- f"Can't open kubeconfig for reading: {e}")
+ f"Can't open kubeconfig for reading: {e}"
+ )
except yaml.YAMLError as e:
raise KubeconfigCorruptedError(
- f"YamlError while loading kubeconfig: {e}")
+ f"YamlError while loading kubeconfig: {e}"
+ )
- loaded_config=Kubeconfig(path, loaded_content)
+ loaded_config = Kubeconfig(path, loaded_content)
self._validator.validate_config(loaded_config)
return loaded_config
@@ -187,21 +197,24 @@ def write_kubeconfig(self, config):
:raises KubeconfigInaccessableError: if the kubeconfig
can't be opened for writing
"""
- directory=os.path.dirname(config.path)
+ directory = os.path.dirname(config.path)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise KubeconfigInaccessableError(
- f"Can't create directory for writing: {e}")
+ f"Can't create directory for writing: {e}"
+ )
try:
with compat_open(
- config.path, "w+", access_permissions=0o600) as stream:
+ config.path, "w+", access_permissions=0o600
+ ) as stream:
ordered_yaml_dump(config.content, stream)
except IOError as e:
raise KubeconfigInaccessableError(
- f"Can't open kubeconfig for writing: {e}")
+ f"Can't open kubeconfig for writing: {e}"
+ )
class KubeconfigAppender(object):
@@ -213,42 +226,48 @@ def insert_entry(self, config, key, new_entry):
:param config: The kubeconfig to insert an entry into
:type config: Kubeconfig
"""
- entries=self._setdefault_existing_entries(config, key)
- same_name_index=self._index_same_name(entries, new_entry)
+ entries = self._setdefault_existing_entries(config, key)
+ same_name_index = self._index_same_name(entries, new_entry)
if same_name_index is None:
entries.append(new_entry)
else:
- entries[same_name_index]=new_entry
+ entries[same_name_index] = new_entry
return config
def _setdefault_existing_entries(self, config, key):
- config.content[key]=config.content.get(key) or []
- entries=config.content[key]
+ config.content[key] = config.content.get(key) or []
+ entries = config.content[key]
if not isinstance(entries, list):
- raise KubeconfigError(f"Tried to insert into {key}, "
- f"which is a {type(entries)} "
- f"not a {list}")
+ raise KubeconfigError(
+ f"Tried to insert into {key}, "
+ f"which is a {type(entries)} "
+ f"not a {list}"
+ )
return entries
def _index_same_name(self, entries, new_entry):
if "name" in new_entry:
- name_to_search=new_entry["name"]
+ name_to_search = new_entry["name"]
for i, entry in enumerate(entries):
if "name" in entry and entry["name"] == name_to_search:
return i
return None
- def _make_context(self, cluster, user, alias = None):
- """ Generate a context to associate cluster and user with a given alias."""
- return OrderedDict([
- ("context", OrderedDict([
- ("cluster", cluster["name"]),
- ("user", user["name"])
- ])),
- ("name", alias or user["name"])
- ])
-
- def insert_cluster_user_pair(self, config, cluster, user, alias = None):
+ def _make_context(self, cluster, user, alias=None):
+ """Generate a context to associate cluster and user with a given alias."""
+ return OrderedDict(
+ [
+ (
+ "context",
+ OrderedDict(
+ [("cluster", cluster["name"]), ("user", user["name"])]
+ ),
+ ),
+ ("name", alias or user["name"]),
+ ]
+ )
+
+ def insert_cluster_user_pair(self, config, cluster, user, alias=None):
"""
Insert the passed cluster entry and user entry,
then make a context to associate them
@@ -270,11 +289,11 @@ def insert_cluster_user_pair(self, config, cluster, user, alias = None):
:return: The generated context
:rtype: OrderedDict
"""
- context=self._make_context(cluster, user, alias = alias)
+ context = self._make_context(cluster, user, alias=alias)
self.insert_entry(config, "clusters", cluster)
self.insert_entry(config, "users", user)
self.insert_entry(config, "contexts", context)
- config.content["current-context"]=context["name"]
+ config.content["current-context"] = context["name"]
return context
diff --git a/awscli/customizations/eks/ordered_yaml.py b/awscli/customizations/eks/ordered_yaml.py
index 23834e0d0623..851627129eb1 100644
--- a/awscli/customizations/eks/ordered_yaml.py
+++ b/awscli/customizations/eks/ordered_yaml.py
@@ -11,25 +11,29 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import ruamel.yaml
-from botocore.compat import OrderedDict
from awscli.utils import dump_yaml_to_str
+from botocore.compat import OrderedDict
+
def _ordered_constructor(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
+
def _ordered_representer(dumper, data):
return dumper.represent_mapping(
- ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
- data.items())
+ ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()
+ )
+
def ordered_yaml_load(stream):
- """ Load an OrderedDict object from a yaml stream."""
+ """Load an OrderedDict object from a yaml stream."""
yaml = ruamel.yaml.YAML(typ="safe", pure=True)
yaml.Constructor.add_constructor(
ruamel.yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
- _ordered_constructor)
+ _ordered_constructor,
+ )
return yaml.load(stream)
diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py
index 8bce83525bbb..12f19e4534b1 100644
--- a/awscli/customizations/eks/update_kubeconfig.py
+++ b/awscli/customizations/eks/update_kubeconfig.py
@@ -13,21 +13,20 @@
import logging
import os
-from botocore.compat import OrderedDict
-
from awscli.compat import is_windows
from awscli.customizations.commands import BasicCommand
from awscli.customizations.eks.exceptions import EKSClusterError
from awscli.customizations.eks.kubeconfig import (
Kubeconfig,
+ KubeconfigAppender,
KubeconfigError,
KubeconfigLoader,
- KubeconfigWriter,
KubeconfigValidator,
- KubeconfigAppender
+ KubeconfigWriter,
)
from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump
from awscli.customizations.utils import uni_print
+from botocore.compat import OrderedDict
LOG = logging.getLogger(__name__)
@@ -37,75 +36,88 @@
# this can be safely changed to default to writing "v1"
API_VERSION = "client.authentication.k8s.io/v1beta1"
+
class UpdateKubeconfigCommand(BasicCommand):
NAME = 'update-kubeconfig'
DESCRIPTION = BasicCommand.FROM_FILE(
- 'eks',
- 'update-kubeconfig',
- '_description.rst'
+ 'eks', 'update-kubeconfig', '_description.rst'
)
ARG_TABLE = [
{
'name': 'name',
'dest': 'cluster_name',
- 'help_text': ("The name of the cluster for which "
- "to create a kubeconfig entry. "
- "This cluster must exist in your account and in the "
- "specified or configured default Region "
- "for your AWS CLI installation."),
- 'required': True
+ 'help_text': (
+ "The name of the cluster for which "
+ "to create a kubeconfig entry. "
+ "This cluster must exist in your account and in the "
+ "specified or configured default Region "
+ "for your AWS CLI installation."
+ ),
+ 'required': True,
},
{
'name': 'kubeconfig',
- 'help_text': ("Optionally specify a kubeconfig file to append "
- "with your configuration. "
- "By default, the configuration is written to the "
- "first file path in the KUBECONFIG "
- "environment variable (if it is set) "
- "or the default kubeconfig path (.kube/config) "
- "in your home directory."),
- 'required': False
+ 'help_text': (
+ "Optionally specify a kubeconfig file to append "
+ "with your configuration. "
+ "By default, the configuration is written to the "
+ "first file path in the KUBECONFIG "
+ "environment variable (if it is set) "
+ "or the default kubeconfig path (.kube/config) "
+ "in your home directory."
+ ),
+ 'required': False,
},
{
'name': 'role-arn',
- 'help_text': ("To assume a role for cluster authentication, "
- "specify an IAM role ARN with this option. "
- "For example, if you created a cluster "
- "while assuming an IAM role, "
- "then you must also assume that role to "
- "connect to the cluster the first time."),
- 'required': False
+ 'help_text': (
+ "To assume a role for cluster authentication, "
+ "specify an IAM role ARN with this option. "
+ "For example, if you created a cluster "
+ "while assuming an IAM role, "
+ "then you must also assume that role to "
+ "connect to the cluster the first time."
+ ),
+ 'required': False,
},
{
'name': 'dry-run',
'action': 'store_true',
'default': False,
- 'help_text': ("Print the merged kubeconfig to stdout instead of "
- "writing it to the specified file."),
- 'required': False
+ 'help_text': (
+ "Print the merged kubeconfig to stdout instead of "
+ "writing it to the specified file."
+ ),
+ 'required': False,
},
{
'name': 'verbose',
'action': 'store_true',
'default': False,
- 'help_text': ("Print more detailed output "
- "when writing to the kubeconfig file, "
- "including the appended entries.")
+ 'help_text': (
+ "Print more detailed output "
+ "when writing to the kubeconfig file, "
+ "including the appended entries."
+ ),
},
{
'name': 'alias',
- 'help_text': ("Alias for the cluster context name. "
- "Defaults to match cluster ARN."),
- 'required': False
+ 'help_text': (
+ "Alias for the cluster context name. "
+ "Defaults to match cluster ARN."
+ ),
+ 'required': False,
},
{
'name': 'user-alias',
- 'help_text': ("Alias for the generated user name. "
- "Defaults to match cluster ARN."),
- 'required': False
- }
+ 'help_text': (
+ "Alias for the generated user name. "
+ "Defaults to match cluster ARN."
+ ),
+ 'required': False,
+ },
]
def _display_entries(self, entries):
@@ -121,25 +133,25 @@ def _display_entries(self, entries):
uni_print("\n")
def _run_main(self, parsed_args, parsed_globals):
- client = EKSClient(self._session,
- parsed_args=parsed_args,
- parsed_globals=parsed_globals)
+ client = EKSClient(
+ self._session,
+ parsed_args=parsed_args,
+ parsed_globals=parsed_globals,
+ )
new_cluster_dict = client.get_cluster_entry()
- new_user_dict = client.get_user_entry(user_alias=parsed_args.user_alias)
+ new_user_dict = client.get_user_entry(
+ user_alias=parsed_args.user_alias
+ )
config_selector = KubeconfigSelector(
- os.environ.get("KUBECONFIG", ""),
- parsed_args.kubeconfig
- )
- config = config_selector.choose_kubeconfig(
- new_cluster_dict["name"]
+ os.environ.get("KUBECONFIG", ""), parsed_args.kubeconfig
)
+ config = config_selector.choose_kubeconfig(new_cluster_dict["name"])
updating_existing = config.has_cluster(new_cluster_dict["name"])
appender = KubeconfigAppender()
- new_context_dict = appender.insert_cluster_user_pair(config,
- new_cluster_dict,
- new_user_dict,
- parsed_args.alias)
+ new_context_dict = appender.insert_cluster_user_pair(
+ config, new_cluster_dict, new_user_dict, parsed_args.alias
+ )
if parsed_args.dry_run:
uni_print(config.dump_content())
@@ -148,27 +160,27 @@ def _run_main(self, parsed_args, parsed_globals):
writer.write_kubeconfig(config)
if updating_existing:
- uni_print("Updated context {0} in {1}\n".format(
- new_context_dict["name"], config.path
- ))
+ uni_print(
+ "Updated context {0} in {1}\n".format(
+ new_context_dict["name"], config.path
+ )
+ )
else:
- uni_print("Added new context {0} to {1}\n".format(
- new_context_dict["name"], config.path
- ))
+ uni_print(
+ "Added new context {0} to {1}\n".format(
+ new_context_dict["name"], config.path
+ )
+ )
if parsed_args.verbose:
- self._display_entries([
- new_context_dict,
- new_user_dict,
- new_cluster_dict
- ])
+ self._display_entries(
+ [new_context_dict, new_user_dict, new_cluster_dict]
+ )
return 0
class KubeconfigSelector(object):
-
- def __init__(self, env_variable, path_in, validator=None,
- loader=None):
+ def __init__(self, env_variable, path_in, validator=None, loader=None):
"""
Parse KUBECONFIG into a list of absolute paths.
Also replace the empty list with DEFAULT_PATH
@@ -194,9 +206,11 @@ def __init__(self, env_variable, path_in, validator=None,
# Get the list of paths from the environment variable
if env_variable == "":
env_variable = DEFAULT_PATH
- self._paths = [self._expand_path(element)
- for element in env_variable.split(os.pathsep)
- if len(element.strip()) > 0]
+ self._paths = [
+ self._expand_path(element)
+ for element in env_variable.split(os.pathsep)
+ if len(element.strip()) > 0
+ ]
if len(self._paths) == 0:
self._paths = [DEFAULT_PATH]
@@ -219,9 +233,9 @@ def choose_kubeconfig(self, cluster_name):
loaded_config = self._loader.load_kubeconfig(candidate_path)
if loaded_config.has_cluster(cluster_name):
- LOG.debug("Found entry to update at {0}".format(
- candidate_path
- ))
+ LOG.debug(
+ "Found entry to update at {0}".format(candidate_path)
+ )
return loaded_config
except KubeconfigError as e:
LOG.warning("Passing {0}:{1}".format(candidate_path, e))
@@ -232,7 +246,7 @@ def choose_kubeconfig(self, cluster_name):
return self._loader.load_kubeconfig(self._paths[0])
def _expand_path(self, path):
- """ A helper to expand a path to a full absolute path. """
+ """A helper to expand a path to a full absolute path."""
return os.path.abspath(os.path.expanduser(path))
@@ -259,17 +273,22 @@ def cluster_description(self):
"eks",
region_name=self._parsed_globals.region,
endpoint_url=self._parsed_globals.endpoint_url,
- verify=self._parsed_globals.verify_ssl
+ verify=self._parsed_globals.verify_ssl,
)
full_description = client.describe_cluster(name=self._cluster_name)
self._cluster_description = full_description["cluster"]
if "status" not in self._cluster_description:
raise EKSClusterError("Cluster not found")
- if self._cluster_description["status"] not in ["ACTIVE", "UPDATING"]:
- raise EKSClusterError("Cluster status is {0}".format(
- self._cluster_description["status"]
- ))
+ if self._cluster_description["status"] not in [
+ "ACTIVE",
+ "UPDATING",
+ ]:
+ raise EKSClusterError(
+ "Cluster status is {0}".format(
+ self._cluster_description["status"]
+ )
+ )
return self._cluster_description
@@ -279,17 +298,26 @@ def get_cluster_entry(self):
the previously obtained description.
"""
- cert_data = self.cluster_description.get("certificateAuthority", {}).get("data", "")
+ cert_data = self.cluster_description.get(
+ "certificateAuthority", {}
+ ).get("data", "")
endpoint = self.cluster_description.get("endpoint")
arn = self.cluster_description.get("arn")
- return OrderedDict([
- ("cluster", OrderedDict([
- ("certificate-authority-data", cert_data),
- ("server", endpoint)
- ])),
- ("name", arn)
- ])
+ return OrderedDict(
+ [
+ (
+ "cluster",
+ OrderedDict(
+ [
+ ("certificate-authority-data", cert_data),
+ ("server", endpoint),
+ ]
+ ),
+ ),
+ ("name", arn),
+ ]
+ )
def get_user_entry(self, user_alias=None):
"""
@@ -307,37 +335,54 @@ def get_user_entry(self, user_alias=None):
cluster_identification_parameter = "--cluster-id"
cluster_identification_value = self.cluster_description.get("id")
- generated_user = OrderedDict([
- ("name", user_alias or self.cluster_description.get("arn", "")),
- ("user", OrderedDict([
- ("exec", OrderedDict([
- ("apiVersion", API_VERSION),
- ("args",
+ generated_user = OrderedDict(
+ [
+ (
+ "name",
+ user_alias or self.cluster_description.get("arn", ""),
+ ),
+ (
+ "user",
+ OrderedDict(
[
- "--region",
- region,
- "eks",
- "get-token",
- cluster_identification_parameter,
- cluster_identification_value,
- "--output",
- "json",
- ]),
- ("command", "aws"),
- ]))
- ]))
- ])
+ (
+ "exec",
+ OrderedDict(
+ [
+ ("apiVersion", API_VERSION),
+ (
+ "args",
+ [
+ "--region",
+ region,
+ "eks",
+ "get-token",
+ cluster_identification_parameter,
+ cluster_identification_value,
+ "--output",
+ "json",
+ ],
+ ),
+ ("command", "aws"),
+ ]
+ ),
+ )
+ ]
+ ),
+ ),
+ ]
+ )
if self._parsed_args.role_arn is not None:
- generated_user["user"]["exec"]["args"].extend([
- "--role",
- self._parsed_args.role_arn
- ])
+ generated_user["user"]["exec"]["args"].extend(
+ ["--role", self._parsed_args.role_arn]
+ )
if self._session.profile:
- generated_user["user"]["exec"]["env"] = [OrderedDict([
- ("name", "AWS_PROFILE"),
- ("value", self._session.profile)
- ])]
+ generated_user["user"]["exec"]["env"] = [
+ OrderedDict(
+ [("name", "AWS_PROFILE"), ("value", self._session.profile)]
+ )
+ ]
return generated_user
diff --git a/awscli/customizations/emr/addinstancegroups.py b/awscli/customizations/emr/addinstancegroups.py
index eb5282393589..d4f7ed986065 100644
--- a/awscli/customizations/emr/addinstancegroups.py
+++ b/awscli/customizations/emr/addinstancegroups.py
@@ -12,10 +12,12 @@
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import argumentschema
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import helptext
-from awscli.customizations.emr import instancegroupsutils
+from awscli.customizations.emr import (
+ argumentschema,
+ emrutils,
+ helptext,
+ instancegroupsutils,
+)
from awscli.customizations.emr.command import Command
@@ -23,29 +25,46 @@ class AddInstanceGroups(Command):
NAME = 'add-instance-groups'
DESCRIPTION = 'Adds an instance group to a running cluster.'
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': helptext.CLUSTER_ID},
- {'name': 'instance-groups', 'required': True,
- 'help_text': helptext.INSTANCE_GROUPS,
- 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA}
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': helptext.CLUSTER_ID,
+ },
+ {
+ 'name': 'instance-groups',
+ 'required': True,
+ 'help_text': helptext.INSTANCE_GROUPS,
+ 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA,
+ },
]
def _run_main_command(self, parsed_args, parsed_globals):
parameters = {'JobFlowId': parsed_args.cluster_id}
- parameters['InstanceGroups'] = \
+ parameters['InstanceGroups'] = (
instancegroupsutils.build_instance_groups(
- parsed_args.instance_groups)
+ parsed_args.instance_groups
+ )
+ )
add_instance_groups_response = emrutils.call(
- self._session, 'add_instance_groups', parameters,
- self.region, parsed_globals.endpoint_url,
- parsed_globals.verify_ssl)
+ self._session,
+ 'add_instance_groups',
+ parameters,
+ self.region,
+ parsed_globals.endpoint_url,
+ parsed_globals.verify_ssl,
+ )
constructed_result = self._construct_result(
- add_instance_groups_response)
+ add_instance_groups_response
+ )
- emrutils.display_response(self._session, 'add_instance_groups',
- constructed_result, parsed_globals)
+ emrutils.display_response(
+ self._session,
+ 'add_instance_groups',
+ constructed_result,
+ parsed_globals,
+ )
return 0
def _construct_result(self, add_instance_groups_result):
@@ -55,12 +74,15 @@ def _construct_result(self, add_instance_groups_result):
if add_instance_groups_result is not None:
jobFlowId = add_instance_groups_result.get('JobFlowId')
instanceGroupIds = add_instance_groups_result.get(
- 'InstanceGroupIds')
+ 'InstanceGroupIds'
+ )
clusterArn = add_instance_groups_result.get('ClusterArn')
if jobFlowId is not None and instanceGroupIds is not None:
- return {'ClusterId': jobFlowId,
- 'InstanceGroupIds': instanceGroupIds,
- 'ClusterArn': clusterArn}
+ return {
+ 'ClusterId': jobFlowId,
+ 'InstanceGroupIds': instanceGroupIds,
+ 'ClusterArn': clusterArn,
+ }
else:
return {}
diff --git a/awscli/customizations/emr/addsteps.py b/awscli/customizations/emr/addsteps.py
index b816d2eee76a..0db662fcd055 100644
--- a/awscli/customizations/emr/addsteps.py
+++ b/awscli/customizations/emr/addsteps.py
@@ -11,50 +11,60 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import argumentschema
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import helptext
-from awscli.customizations.emr import steputils
+from awscli.customizations.emr import (
+ argumentschema,
+ emrutils,
+ helptext,
+ steputils,
+)
from awscli.customizations.emr.command import Command
class AddSteps(Command):
NAME = 'add-steps'
- DESCRIPTION = ('Add a list of steps to a cluster.')
+ DESCRIPTION = 'Add a list of steps to a cluster.'
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': helptext.CLUSTER_ID
- },
- {'name': 'steps',
- 'required': True,
- 'nargs': '+',
- 'schema': argumentschema.STEPS_SCHEMA,
- 'help_text': helptext.STEPS
- },
- {'name': 'execution-role-arn',
- 'required': False,
- 'help_text': helptext.EXECUTION_ROLE_ARN
- }
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': helptext.CLUSTER_ID,
+ },
+ {
+ 'name': 'steps',
+ 'required': True,
+ 'nargs': '+',
+ 'schema': argumentschema.STEPS_SCHEMA,
+ 'help_text': helptext.STEPS,
+ },
+ {
+ 'name': 'execution-role-arn',
+ 'required': False,
+ 'help_text': helptext.EXECUTION_ROLE_ARN,
+ },
]
def _run_main_command(self, parsed_args, parsed_globals):
parsed_steps = parsed_args.steps
release_label = emrutils.get_release_label(
- parsed_args.cluster_id, self._session, self.region,
- parsed_globals.endpoint_url, parsed_globals.verify_ssl)
+ parsed_args.cluster_id,
+ self._session,
+ self.region,
+ parsed_globals.endpoint_url,
+ parsed_globals.verify_ssl,
+ )
step_list = steputils.build_step_config_list(
- parsed_step_list=parsed_steps, region=self.region,
- release_label=release_label)
- parameters = {
- 'JobFlowId': parsed_args.cluster_id,
- 'Steps': step_list
- }
+ parsed_step_list=parsed_steps,
+ region=self.region,
+ release_label=release_label,
+ )
+ parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': step_list}
if parsed_args.execution_role_arn is not None:
parameters['ExecutionRoleArn'] = parsed_args.execution_role_arn
- emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
- parameters, parsed_globals)
+ emrutils.call_and_display_response(
+ self._session, 'AddJobFlowSteps', parameters, parsed_globals
+ )
return 0
diff --git a/awscli/customizations/emr/addtags.py b/awscli/customizations/emr/addtags.py
index 8332d9a54a51..fcedc2026755 100644
--- a/awscli/customizations/emr/addtags.py
+++ b/awscli/customizations/emr/addtags.py
@@ -13,13 +13,13 @@
from awscli.arguments import CustomArgument
-from awscli.customizations.emr import helptext
-from awscli.customizations.emr import emrutils
+from awscli.customizations.emr import emrutils, helptext
def modify_tags_argument(argument_table, **kwargs):
- argument_table['tags'] = TagsArgument('tags', required=True,
- help_text=helptext.TAGS, nargs='+')
+ argument_table['tags'] = TagsArgument(
+ 'tags', required=True, help_text=helptext.TAGS, nargs='+'
+ )
class TagsArgument(CustomArgument):
diff --git a/awscli/customizations/emr/applicationutils.py b/awscli/customizations/emr/applicationutils.py
index 8a11eb3ab092..4072fc4126c8 100644
--- a/awscli/customizations/emr/applicationutils.py
+++ b/awscli/customizations/emr/applicationutils.py
@@ -11,14 +11,11 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import exceptions
+from awscli.customizations.emr import constants, emrutils, exceptions
from awscli.customizations.exceptions import ParamValidationError
-def build_applications(region,
- parsed_applications, ami_version=None):
+def build_applications(region, parsed_applications, ami_version=None):
app_list = []
step_list = []
ba_list = []
@@ -28,38 +25,41 @@ def build_applications(region,
if app_name == constants.HIVE:
hive_version = constants.LATEST
- step_list.append(
- _build_install_hive_step(region=region))
+ step_list.append(_build_install_hive_step(region=region))
args = app_config.get('Args')
if args is not None:
hive_site_path = _find_matching_arg(
- key=constants.HIVE_SITE_KEY, args_list=args)
+ key=constants.HIVE_SITE_KEY, args_list=args
+ )
if hive_site_path is not None:
step_list.append(
_build_install_hive_site_step(
- region=region,
- hive_site_path=hive_site_path))
+ region=region, hive_site_path=hive_site_path
+ )
+ )
elif app_name == constants.PIG:
pig_version = constants.LATEST
- step_list.append(
- _build_pig_install_step(
- region=region))
+ step_list.append(_build_pig_install_step(region=region))
elif app_name == constants.GANGLIA:
ba_list.append(
- _build_ganglia_install_bootstrap_action(
- region=region))
+ _build_ganglia_install_bootstrap_action(region=region)
+ )
elif app_name == constants.HBASE:
ba_list.append(
- _build_hbase_install_bootstrap_action(
- region=region))
+ _build_hbase_install_bootstrap_action(region=region)
+ )
if ami_version >= '3.0':
step_list.append(
_build_hbase_install_step(
- constants.HBASE_PATH_HADOOP2_INSTALL_JAR))
+ constants.HBASE_PATH_HADOOP2_INSTALL_JAR
+ )
+ )
elif ami_version >= '2.1':
step_list.append(
_build_hbase_install_step(
- constants.HBASE_PATH_HADOOP1_INSTALL_JAR))
+ constants.HBASE_PATH_HADOOP1_INSTALL_JAR
+ )
+ )
else:
raise ParamValidationError(
'aws: error: AMI version %s is not '
@@ -68,12 +68,15 @@ def build_applications(region,
elif app_name == constants.IMPALA:
ba_list.append(
_build_impala_install_bootstrap_action(
- region=region,
- args=app_config.get('Args')))
+ region=region, args=app_config.get('Args')
+ )
+ )
else:
app_list.append(
_build_supported_product(
- app_config['Name'], app_config.get('Args')))
+ app_config['Name'], app_config.get('Args')
+ )
+ )
return app_list, ba_list, step_list
@@ -89,16 +92,18 @@ def _build_ganglia_install_bootstrap_action(region):
return emrutils.build_bootstrap_action(
name=constants.INSTALL_GANGLIA_NAME,
path=emrutils.build_s3_link(
- relative_path=constants.GANGLIA_INSTALL_BA_PATH,
- region=region))
+ relative_path=constants.GANGLIA_INSTALL_BA_PATH, region=region
+ ),
+ )
def _build_hbase_install_bootstrap_action(region):
return emrutils.build_bootstrap_action(
name=constants.INSTALL_HBASE_NAME,
path=emrutils.build_s3_link(
- relative_path=constants.HBASE_INSTALL_BA_PATH,
- region=region))
+ relative_path=constants.HBASE_INSTALL_BA_PATH, region=region
+ ),
+ )
def _build_hbase_install_step(jar):
@@ -106,7 +111,8 @@ def _build_hbase_install_step(jar):
jar=jar,
name=constants.START_HBASE_NAME,
action_on_failure=constants.TERMINATE_CLUSTER,
- args=constants.HBASE_INSTALL_ARG)
+ args=constants.HBASE_INSTALL_ARG,
+ )
def _build_impala_install_bootstrap_action(region, args=None):
@@ -114,37 +120,43 @@ def _build_impala_install_bootstrap_action(region, args=None):
constants.BASE_PATH_ARG,
emrutils.build_s3_link(region=region),
constants.IMPALA_VERSION,
- constants.LATEST]
+ constants.LATEST,
+ ]
if args is not None:
args_list.append(constants.IMPALA_CONF)
args_list.append(','.join(args))
return emrutils.build_bootstrap_action(
name=constants.INSTALL_IMPALA_NAME,
path=emrutils.build_s3_link(
- relative_path=constants.IMPALA_INSTALL_PATH,
- region=region),
- args=args_list)
+ relative_path=constants.IMPALA_INSTALL_PATH, region=region
+ ),
+ args=args_list,
+ )
-def _build_install_hive_step(region,
- action_on_failure=constants.TERMINATE_CLUSTER):
+def _build_install_hive_step(
+ region, action_on_failure=constants.TERMINATE_CLUSTER
+):
step_args = [
emrutils.build_s3_link(constants.HIVE_SCRIPT_PATH, region),
constants.INSTALL_HIVE_ARG,
constants.BASE_PATH_ARG,
emrutils.build_s3_link(constants.HIVE_BASE_PATH, region),
constants.HIVE_VERSIONS,
- constants.LATEST]
+ constants.LATEST,
+ ]
step = emrutils.build_step(
name=constants.INSTALL_HIVE_NAME,
action_on_failure=action_on_failure,
jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region),
- args=step_args)
+ args=step_args,
+ )
return step
-def _build_install_hive_site_step(region, hive_site_path,
- action_on_failure=constants.CANCEL_AND_WAIT):
+def _build_install_hive_site_step(
+ region, hive_site_path, action_on_failure=constants.CANCEL_AND_WAIT
+):
step_args = [
emrutils.build_s3_link(constants.HIVE_SCRIPT_PATH, region),
constants.BASE_PATH_ARG,
@@ -152,29 +164,34 @@ def _build_install_hive_site_step(region, hive_site_path,
constants.INSTALL_HIVE_SITE_ARG,
hive_site_path,
constants.HIVE_VERSIONS,
- constants.LATEST]
+ constants.LATEST,
+ ]
step = emrutils.build_step(
name=constants.INSTALL_HIVE_SITE_NAME,
action_on_failure=action_on_failure,
jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region),
- args=step_args)
+ args=step_args,
+ )
return step
-def _build_pig_install_step(region,
- action_on_failure=constants.TERMINATE_CLUSTER):
+def _build_pig_install_step(
+ region, action_on_failure=constants.TERMINATE_CLUSTER
+):
step_args = [
emrutils.build_s3_link(constants.PIG_SCRIPT_PATH, region),
constants.INSTALL_PIG_ARG,
constants.BASE_PATH_ARG,
emrutils.build_s3_link(constants.PIG_BASE_PATH, region),
constants.PIG_VERSIONS,
- constants.LATEST]
+ constants.LATEST,
+ ]
step = emrutils.build_step(
name=constants.INSTALL_PIG_NAME,
action_on_failure=action_on_failure,
jar=emrutils.build_s3_link(constants.SCRIPT_RUNNER_PATH, region),
- args=step_args)
+ args=step_args,
+ )
return step
diff --git a/awscli/customizations/emr/argumentschema.py b/awscli/customizations/emr/argumentschema.py
index 2022480a56ca..f816746ee0f2 100644
--- a/awscli/customizations/emr/argumentschema.py
+++ b/awscli/customizations/emr/argumentschema.py
@@ -16,15 +16,9 @@
CONFIGURATIONS_PROPERTIES_SCHEMA = {
"type": "map",
- "key": {
- "type": "string",
- "description": "Configuration key"
- },
- "value": {
- "type": "string",
- "description": "Configuration value"
- },
- "description": "Application configuration properties"
+ "key": {"type": "string", "description": "Configuration key"},
+ "value": {"type": "string", "description": "Configuration value"},
+ "description": "Application configuration properties",
}
CONFIGURATIONS_CLASSIFICATION_SCHEMA = {
@@ -38,10 +32,10 @@
"type": "object",
"properties": {
"Classification": CONFIGURATIONS_CLASSIFICATION_SCHEMA,
- "Properties": CONFIGURATIONS_PROPERTIES_SCHEMA
- }
+ "Properties": CONFIGURATIONS_PROPERTIES_SCHEMA,
+ },
},
- "description": "Instance group application configurations."
+ "description": "Instance group application configurations.",
}
OUTER_CONFIGURATIONS_SCHEMA = {
@@ -51,45 +45,48 @@
"properties": {
"Classification": CONFIGURATIONS_CLASSIFICATION_SCHEMA,
"Properties": CONFIGURATIONS_PROPERTIES_SCHEMA,
- "Configurations": INNER_CONFIGURATIONS_SCHEMA
- }
+ "Configurations": INNER_CONFIGURATIONS_SCHEMA,
+ },
},
- "description": "Instance group application configurations."
+ "description": "Instance group application configurations.",
}
ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA = {
"type": "object",
- "properties" : {
+ "properties": {
"UsageStrategy": {
"type": "string",
"description": "The strategy of whether to use available capacity reservations to fulfill On-Demand capacity.",
- "enum": ["use-capacity-reservations-first"]
+ "enum": ["use-capacity-reservations-first"],
},
"CapacityReservationPreference": {
"type": "string",
"description": "The preference of the capacity reservation of the instance.",
- "enum": [
- "open",
- "none"
- ]
+ "enum": ["open", "none"],
},
"CapacityReservationResourceGroupArn": {
"type": "string",
- "description": "The ARN of the capacity reservation resource group in which to run the instance."
- }
- }
+ "description": "The ARN of the capacity reservation resource group in which to run the instance.",
+ },
+ },
}
SPOT_ALLOCATION_STRATEGY_SCHEMA = {
"type": "string",
"description": "The strategy to use to launch Spot instance fleets.",
- "enum": ["capacity-optimized", "price-capacity-optimized", "lowest-price", "diversified", "capacity-optimized-prioritized"]
+ "enum": [
+ "capacity-optimized",
+ "price-capacity-optimized",
+ "lowest-price",
+ "diversified",
+ "capacity-optimized-prioritized",
+ ],
}
ONDEMAND_ALLOCATION_STRATEGY_SCHEMA = {
"type": "string",
"description": "The strategy to use to launch On-Demand instance fleets.",
- "enum": ["lowest-price", "prioritized"]
+ "enum": ["lowest-price", "prioritized"],
}
INSTANCE_GROUPS_SCHEMA = {
@@ -99,39 +96,35 @@
"properties": {
"Name": {
"type": "string",
- "description":
- "Friendly name given to the instance group."
+ "description": "Friendly name given to the instance group.",
},
"InstanceGroupType": {
"type": "string",
- "description":
- "The type of the instance group in the cluster.",
+ "description": "The type of the instance group in the cluster.",
"enum": ["MASTER", "CORE", "TASK"],
- "required": True
+ "required": True,
},
"BidPrice": {
"type": "string",
- "description":
- "Bid price for each Amazon EC2 instance in the "
- "instance group when launching nodes as Spot Instances, "
- "expressed in USD."
+ "description": "Bid price for each Amazon EC2 instance in the "
+ "instance group when launching nodes as Spot Instances, "
+ "expressed in USD.",
},
"InstanceType": {
"type": "string",
- "description":
- "The Amazon EC2 instance type for all instances "
- "in the instance group.",
- "required": True
+ "description": "The Amazon EC2 instance type for all instances "
+ "in the instance group.",
+ "required": True,
},
"InstanceCount": {
"type": "integer",
"description": "Target number of Amazon EC2 instances "
"for the instance group",
- "required": True
+ "required": True,
},
"CustomAmiId": {
"type": "string",
- "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances."
+ "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances.",
},
"EbsConfiguration": {
"type": "object",
@@ -146,19 +139,19 @@
"items": {
"type": "object",
"properties": {
- "VolumeSpecification" : {
+ "VolumeSpecification": {
"type": "object",
"description": "The EBS volume specification that will be created and attached to every instance in this instance group.",
"properties": {
"VolumeType": {
"type": "string",
"description": "The EBS volume type that is attached to all the instances in the instance group. Valid types are: gp2, io1, and standard.",
- "required": True
+ "required": True,
},
"SizeInGB": {
"type": "integer",
"description": "The EBS volume size, in GB, that is attached to all the instances in the instance group.",
- "required": True
+ "required": True,
},
"Iops": {
"type": "integer",
@@ -167,17 +160,17 @@
"Throughput": {
"type": "integer",
"description": "The throughput of the EBS volume that is attached to all the instances in the instance group.",
- }
- }
+ },
+ },
},
"VolumesPerInstance": {
"type": "integer",
"description": "The number of EBS volumes that will be created and attached to each instance in the instance group.",
- }
- }
- }
- }
- }
+ },
+ },
+ },
+ },
+ },
},
"AutoScalingPolicy": {
"type": "object",
@@ -190,14 +183,14 @@
"MinCapacity": {
"type": "integer",
"description": "The minimum value for the instances to scale in"
- " to in response to scaling activities."
+ " to in response to scaling activities.",
},
"MaxCapacity": {
"type": "integer",
"description": "The maximum value for the instances to scale out to in response"
- " to scaling activities"
- }
- }
+ " to scaling activities",
+ },
+ },
},
"Rules": {
"type": "array",
@@ -207,11 +200,11 @@
"properties": {
"Name": {
"type": "string",
- "description": "Name of the Auto Scaling rule."
+ "description": "Name of the Auto Scaling rule.",
},
"Description": {
"type": "string",
- "description": "Description of the Auto Scaling rule."
+ "description": "Description of the Auto Scaling rule.",
},
"Action": {
"type": "object",
@@ -220,35 +213,38 @@
"Market": { # Required for Instance Fleets
"type": "string",
"description": "Market type of the Amazon EC2 instances used to create a "
- "cluster node by Auto Scaling action.",
- "enum": ["ON_DEMAND", "SPOT"]
+ "cluster node by Auto Scaling action.",
+ "enum": ["ON_DEMAND", "SPOT"],
},
"SimpleScalingPolicyConfiguration": {
"type": "object",
"description": "The Simple scaling configuration that will be associated"
- "to Auto Scaling action.",
+ "to Auto Scaling action.",
"properties": {
"AdjustmentType": {
"type": "string",
"description": "Specifies how the ScalingAdjustment parameter is "
- "interpreted.",
- "enum": ["CHANGE_IN_CAPACITY", "PERCENT_CHANGE_IN_CAPACITY",
- "EXACT_CAPACITY"]
+ "interpreted.",
+ "enum": [
+ "CHANGE_IN_CAPACITY",
+ "PERCENT_CHANGE_IN_CAPACITY",
+ "EXACT_CAPACITY",
+ ],
},
"ScalingAdjustment": {
"type": "integer",
"description": "The amount by which to scale, based on the "
- "specified adjustment type."
+ "specified adjustment type.",
},
"CoolDown": {
"type": "integer",
"description": "The amount of time, in seconds, after a scaling "
- "activity completes and before the next scaling "
- "activity can start."
- }
- }
- }
- }
+ "activity completes and before the next scaling "
+ "activity can start.",
+ },
+ },
+ },
+ },
},
"Trigger": {
"type": "object",
@@ -257,44 +253,44 @@
"CloudWatchAlarmDefinition": {
"type": "object",
"description": "The Alarm to be registered with CloudWatch, to trigger"
- " scaling activities.",
+ " scaling activities.",
"properties": {
"ComparisonOperator": {
"type": "string",
"description": "The arithmetic operation to use when comparing the"
- " specified Statistic and Threshold."
+ " specified Statistic and Threshold.",
},
"EvaluationPeriods": {
"type": "integer",
"description": "The number of periods over which data is compared"
- " to the specified threshold."
+ " to the specified threshold.",
},
"MetricName": {
"type": "string",
- "description": "The name for the alarm's associated metric."
+ "description": "The name for the alarm's associated metric.",
},
"Namespace": {
"type": "string",
- "description": "The namespace for the alarm's associated metric."
+ "description": "The namespace for the alarm's associated metric.",
},
"Period": {
"type": "integer",
"description": "The period in seconds over which the specified "
- "statistic is applied."
+ "statistic is applied.",
},
"Statistic": {
"type": "string",
"description": "The statistic to apply to the alarm's associated "
- "metric."
+ "metric.",
},
"Threshold": {
"type": "double",
"description": "The value against which the specified statistic is "
- "compared."
+ "compared.",
},
"Unit": {
"type": "string",
- "description": "The statistic's unit of measure."
+ "description": "The statistic's unit of measure.",
},
"Dimensions": {
"type": "array",
@@ -304,27 +300,27 @@
"properties": {
"Key": {
"type": "string",
- "description": "Dimension Key."
+ "description": "Dimension Key.",
},
"Value": {
"type": "string",
- "description": "Dimension Value."
- }
- }
- }
- }
- }
+ "description": "Dimension Value.",
+ },
+ },
+ },
+ },
+ },
}
- }
- }
- }
- }
- }
- }
+ },
+ },
+ },
+ },
+ },
+ },
},
- "Configurations": OUTER_CONFIGURATIONS_SCHEMA
- }
- }
+ "Configurations": OUTER_CONFIGURATIONS_SCHEMA,
+ },
+ },
}
INSTANCE_FLEETS_SCHEMA = {
@@ -334,21 +330,21 @@
"properties": {
"Name": {
"type": "string",
- "description": "Friendly name given to the instance fleet."
+ "description": "Friendly name given to the instance fleet.",
},
"InstanceFleetType": {
"type": "string",
"description": "The type of the instance fleet in the cluster.",
"enum": ["MASTER", "CORE", "TASK"],
- "required": True
+ "required": True,
},
"TargetOnDemandCapacity": {
"type": "integer",
- "description": "Target on-demand capacity for the instance fleet."
+ "description": "Target on-demand capacity for the instance fleet.",
},
"TargetSpotCapacity": {
"type": "integer",
- "description": "Target spot capacity for the instance fleet."
+ "description": "Target spot capacity for the instance fleet.",
},
"InstanceTypeConfigs": {
"type": "array",
@@ -358,30 +354,30 @@
"InstanceType": {
"type": "string",
"description": "The Amazon EC2 instance type for the instance fleet.",
- "required": True
+ "required": True,
},
"WeightedCapacity": {
"type": "integer",
- "description": "The weight assigned to an instance type, which will impact the overall fulfillment of the capacity."
+ "description": "The weight assigned to an instance type, which will impact the overall fulfillment of the capacity.",
},
"BidPrice": {
"type": "string",
"description": "Bid price for each Amazon EC2 instance in the "
- "instance fleet when launching nodes as Spot Instances, "
- "expressed in USD."
+ "instance fleet when launching nodes as Spot Instances, "
+ "expressed in USD.",
},
"BidPriceAsPercentageOfOnDemandPrice": {
"type": "double",
- "description": "Bid price as percentage of on-demand price."
+ "description": "Bid price as percentage of on-demand price.",
},
"CustomAmiId": {
"type": "string",
- "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances."
+ "description": "The AMI ID of a custom AMI to use when Amazon EMR provisions EC2 instances.",
},
"Priority": {
"type": "double",
"description": "The priority at which Amazon EMR launches the EC2 instances with this instance type. "
- "Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first."
+ "Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.",
},
"EbsConfiguration": {
"type": "object",
@@ -396,83 +392,83 @@
"items": {
"type": "object",
"properties": {
- "VolumeSpecification" : {
+ "VolumeSpecification": {
"type": "object",
"description": "The EBS volume specification that is created "
- "and attached to each instance in the instance group.",
+ "and attached to each instance in the instance group.",
"properties": {
"VolumeType": {
"type": "string",
"description": "The EBS volume type that is attached to all "
- "the instances in the instance group. Valid types are: "
- "gp2, io1, and standard.",
- "required": True
+ "the instances in the instance group. Valid types are: "
+ "gp2, io1, and standard.",
+ "required": True,
},
"SizeInGB": {
"type": "integer",
"description": "The EBS volume size, in GB, that is attached "
- "to all the instances in the instance group.",
- "required": True
+ "to all the instances in the instance group.",
+ "required": True,
},
"Iops": {
"type": "integer",
"description": "The IOPS of the EBS volume that is attached to "
- "all the instances in the instance group.",
+ "all the instances in the instance group.",
},
"Throughput": {
- "type": "integer",
- "description": "The throughput of the EBS volume that is attached to "
- "all the instances in the instance group.",
- }
- }
+ "type": "integer",
+ "description": "The throughput of the EBS volume that is attached to "
+ "all the instances in the instance group.",
+ },
+ },
},
"VolumesPerInstance": {
"type": "integer",
"description": "The number of EBS volumes that will be created and "
- "attached to each instance in the instance group.",
- }
- }
- }
- }
- }
+ "attached to each instance in the instance group.",
+ },
+ },
+ },
+ },
+ },
},
- "Configurations": OUTER_CONFIGURATIONS_SCHEMA
- }
- }
+ "Configurations": OUTER_CONFIGURATIONS_SCHEMA,
+ },
+ },
},
"LaunchSpecifications": {
"type": "object",
- "properties" : {
+ "properties": {
"OnDemandSpecification": {
"type": "object",
"properties": {
"AllocationStrategy": ONDEMAND_ALLOCATION_STRATEGY_SCHEMA,
- "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA
- }
+ "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA,
+ },
},
"SpotSpecification": {
"type": "object",
"properties": {
"TimeoutDurationMinutes": {
"type": "integer",
- "description": "The time, in minutes, after which the action specified in TimeoutAction field will be performed if requested resources are unavailable."
+ "description": "The time, in minutes, after which the action specified in TimeoutAction field will be performed if requested resources are unavailable.",
},
"TimeoutAction": {
"type": "string",
"description": "The action that is performed after TimeoutDurationMinutes.",
"enum": [
"TERMINATE_CLUSTER",
- "SWITCH_TO_ONDEMAND"
- ]
+ "SWITCH_TO_ONDEMAND",
+ ],
},
"BlockDurationMinutes": {
"type": "integer",
- "description": "Block duration in minutes."
+ "description": "Block duration in minutes.",
},
- "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA
- }
- }
- }
+ "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA,
+ },
+ },
+ },
},
"ResizeSpecifications": {
"type": "object",
@@ -481,31 +477,28 @@
"type": "object",
"properties": {
"TimeoutDurationMinutes": {
- "type" : "integer",
- "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable."
+ "type": "integer",
+ "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable.",
},
- "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA
- }
+ "AllocationStrategy": SPOT_ALLOCATION_STRATEGY_SCHEMA,
+ },
},
"OnDemandResizeSpecification": {
"type": "object",
"properties": {
"TimeoutDurationMinutes": {
- "type" : "integer",
- "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable."
+ "type": "integer",
+ "description": "The time, in minutes, after which the resize will be stopped if requested resources are unavailable.",
},
"AllocationStrategy": ONDEMAND_ALLOCATION_STRATEGY_SCHEMA,
- "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA
- }
- }
- }
+ "CapacityReservationOptions": ONDEMAND_CAPACITY_RESERVATION_OPTIONS_SCHEMA,
+ },
+ },
+ },
},
- "Context": {
- "type": "string",
- "description": "Reserved."
- }
- }
- }
+ "Context": {"type": "string", "description": "Reserved."},
+ },
+ },
}
EC2_ATTRIBUTES_SCHEMA = {
@@ -513,75 +506,64 @@
"properties": {
"KeyName": {
"type": "string",
- "description":
- "The name of the Amazon EC2 key pair that can "
- "be used to ssh to the master node as the user 'hadoop'."
+ "description": "The name of the Amazon EC2 key pair that can "
+ "be used to ssh to the master node as the user 'hadoop'.",
},
"SubnetId": {
"type": "string",
- "description":
- "To launch the cluster in Amazon "
- "Virtual Private Cloud (Amazon VPC), set this parameter to "
- "the identifier of the Amazon VPC subnet where you want "
- "the cluster to launch. If you do not specify this value, "
- "the cluster is launched in the normal Amazon Web Services "
- "cloud, outside of an Amazon VPC. "
+ "description": "To launch the cluster in Amazon "
+ "Virtual Private Cloud (Amazon VPC), set this parameter to "
+ "the identifier of the Amazon VPC subnet where you want "
+ "the cluster to launch. If you do not specify this value, "
+ "the cluster is launched in the normal Amazon Web Services "
+ "cloud, outside of an Amazon VPC. ",
},
"SubnetIds": {
"type": "array",
- "description":
- "List of SubnetIds.",
- "items": {
- "type": "string"
- }
+ "description": "List of SubnetIds.",
+ "items": {"type": "string"},
},
"AvailabilityZone": {
"type": "string",
- "description": "The Availability Zone the cluster will run in."
+ "description": "The Availability Zone the cluster will run in.",
},
"AvailabilityZones": {
"type": "array",
"description": "List of AvailabilityZones.",
- "items": {
- "type": "string"
- }
+ "items": {"type": "string"},
},
"InstanceProfile": {
"type": "string",
- "description":
- "An IAM role for the cluster. The EC2 instances of the cluster"
- " assume this role. The default role is " +
- EC2_ROLE_NAME + ". In order to use the default"
- " role, you must have already created it using the "
- "create-default-roles
command. "
+ "description": "An IAM role for the cluster. The EC2 instances of the cluster"
+ " assume this role. The default role is "
+ + EC2_ROLE_NAME
+ + ". In order to use the default"
+ " role, you must have already created it using the "
+ "create-default-roles
command. ",
},
"EmrManagedMasterSecurityGroup": {
"type": "string",
- "description": helptext.EMR_MANAGED_MASTER_SECURITY_GROUP
+ "description": helptext.EMR_MANAGED_MASTER_SECURITY_GROUP,
},
"EmrManagedSlaveSecurityGroup": {
"type": "string",
- "description": helptext.EMR_MANAGED_SLAVE_SECURITY_GROUP
+ "description": helptext.EMR_MANAGED_SLAVE_SECURITY_GROUP,
},
"ServiceAccessSecurityGroup": {
"type": "string",
- "description": helptext.SERVICE_ACCESS_SECURITY_GROUP
+ "description": helptext.SERVICE_ACCESS_SECURITY_GROUP,
},
"AdditionalMasterSecurityGroups": {
"type": "array",
"description": helptext.ADDITIONAL_MASTER_SECURITY_GROUPS,
- "items": {
- "type": "string"
- }
+ "items": {"type": "string"},
},
"AdditionalSlaveSecurityGroups": {
"type": "array",
"description": helptext.ADDITIONAL_SLAVE_SECURITY_GROUPS,
- "items": {
- "type": "string"
- }
- }
- }
+ "items": {"type": "string"},
+ },
+ },
}
@@ -593,20 +575,26 @@
"Name": {
"type": "string",
"description": "Application name.",
- "enum": ["MapR", "HUE", "HIVE", "PIG", "HBASE",
- "IMPALA", "GANGLIA", "HADOOP", "SPARK"],
- "required": True
+ "enum": [
+ "MapR",
+ "HUE",
+ "HIVE",
+ "PIG",
+ "HBASE",
+ "IMPALA",
+ "GANGLIA",
+ "HADOOP",
+ "SPARK",
+ ],
+ "required": True,
},
"Args": {
"type": "array",
- "description":
- "A list of arguments to pass to the application.",
- "items": {
- "type": "string"
- }
- }
- }
- }
+ "description": "A list of arguments to pass to the application.",
+ "items": {"type": "string"},
+ },
+ },
+ },
}
BOOTSTRAP_ACTIONS_SCHEMA = {
@@ -614,29 +602,22 @@
"items": {
"type": "object",
"properties": {
- "Name": {
- "type": "string",
- "default": "Bootstrap Action"
- },
+ "Name": {"type": "string", "default": "Bootstrap Action"},
"Path": {
"type": "string",
- "description":
- "Location of the script to run during a bootstrap action. "
- "Can be either a location in Amazon S3 or "
- "on a local file system.",
- "required": True
+ "description": "Location of the script to run during a bootstrap action. "
+ "Can be either a location in Amazon S3 or "
+ "on a local file system.",
+ "required": True,
},
"Args": {
"type": "array",
- "description":
- "A list of command line arguments to pass to "
- "the bootstrap action script",
- "items": {
- "type": "string"
- }
- }
- }
- }
+ "description": "A list of command line arguments to pass to "
+ "the bootstrap action script",
+ "items": {"type": "string"},
+ },
+ },
+ },
}
@@ -647,8 +628,7 @@
"properties": {
"Type": {
"type": "string",
- "description":
- "The type of a step to be added to the cluster.",
+ "description": "The type of a step to be added to the cluster.",
"default": "custom_jar",
"enum": ["CUSTOM_JAR", "STREAMING", "HIVE", "PIG", "IMPALA"],
},
@@ -660,7 +640,7 @@
"type": "string",
"description": "The action to take if the cluster step fails.",
"enum": ["TERMINATE_CLUSTER", "CANCEL_AND_WAIT", "CONTINUE"],
- "default": "CONTINUE"
+ "default": "CONTINUE",
},
"Jar": {
"type": "string",
@@ -668,42 +648,34 @@
},
"Args": {
"type": "array",
- "description":
- "A list of command line arguments to pass to the step.",
- "items": {
- "type": "string"
- }
+ "description": "A list of command line arguments to pass to the step.",
+ "items": {"type": "string"},
},
"MainClass": {
"type": "string",
- "description":
- "The name of the main class in the specified "
- "Java file. If not specified, the JAR file should "
- "specify a Main-Class in its manifest file."
+ "description": "The name of the main class in the specified "
+ "Java file. If not specified, the JAR file should "
+ "specify a Main-Class in its manifest file.",
},
"Properties": {
"type": "string",
- "description":
- "A list of Java properties that are set when the step "
- "runs. You can use these properties to pass key value "
- "pairs to your main function."
- }
- }
- }
+ "description": "A list of Java properties that are set when the step "
+ "runs. You can use these properties to pass key value "
+ "pairs to your main function.",
+ },
+ },
+ },
}
HBASE_RESTORE_FROM_BACKUP_SCHEMA = {
"type": "object",
"properties": {
- "Dir": {
- "type": "string",
- "description": helptext.HBASE_BACKUP_DIR
- },
+ "Dir": {"type": "string", "description": helptext.HBASE_BACKUP_DIR},
"BackupVersion": {
"type": "string",
- "description": helptext.HBASE_BACKUP_VERSION
- }
- }
+ "description": helptext.HBASE_BACKUP_VERSION,
+ },
+ },
}
EMR_FS_SCHEMA = {
@@ -711,41 +683,38 @@
"properties": {
"Consistent": {
"type": "boolean",
- "description": "Enable EMRFS consistent view."
+ "description": "Enable EMRFS consistent view.",
},
"SSE": {
"type": "boolean",
"description": "Enable Amazon S3 server-side encryption on files "
- "written to S3 by EMRFS."
+ "written to S3 by EMRFS.",
},
"RetryCount": {
"type": "integer",
- "description":
- "The maximum number of times to retry upon S3 inconsistency."
+ "description": "The maximum number of times to retry upon S3 inconsistency.",
},
"RetryPeriod": {
"type": "integer",
"description": "The amount of time (in seconds) until the first "
- "retry. Subsequent retries use an exponential "
- "back-off."
+ "retry. Subsequent retries use an exponential "
+ "back-off.",
},
"Args": {
"type": "array",
"description": "A list of arguments to pass for additional "
- "EMRFS configuration.",
- "items": {
- "type": "string"
- }
+ "EMRFS configuration.",
+ "items": {"type": "string"},
},
"Encryption": {
"type": "string",
"description": "EMRFS encryption type.",
- "enum": ["SERVERSIDE", "CLIENTSIDE"]
+ "enum": ["SERVERSIDE", "CLIENTSIDE"],
},
"ProviderType": {
"type": "string",
"description": "EMRFS client-side encryption provider type.",
- "enum": ["KMS", "CUSTOM"]
+ "enum": ["KMS", "CUSTOM"],
},
"KMSKeyId": {
"type": "string",
@@ -753,46 +722,41 @@
},
"CustomProviderLocation": {
"type": "string",
- "description": "Custom encryption provider JAR location."
+ "description": "Custom encryption provider JAR location.",
},
"CustomProviderClass": {
"type": "string",
- "description": "Custom encryption provider full class name."
- }
- }
+ "description": "Custom encryption provider full class name.",
+ },
+ },
}
-TAGS_SCHEMA = {
- "type": "array",
- "items": {
- "type": "string"
- }
-}
+TAGS_SCHEMA = {"type": "array", "items": {"type": "string"}}
KERBEROS_ATTRIBUTES_SCHEMA = {
"type": "object",
"properties": {
"Realm": {
"type": "string",
- "description": "The name of Kerberos realm."
+ "description": "The name of Kerberos realm.",
},
"KdcAdminPassword": {
"type": "string",
- "description": "The password of Kerberos administrator."
+ "description": "The password of Kerberos administrator.",
},
"CrossRealmTrustPrincipalPassword": {
"type": "string",
- "description": "The password to establish cross-realm trusts."
+ "description": "The password to establish cross-realm trusts.",
},
"ADDomainJoinUser": {
"type": "string",
- "description": "The name of the user with privileges to join instances to Active Directory."
+ "description": "The name of the user with privileges to join instances to Active Directory.",
},
"ADDomainJoinPassword": {
"type": "string",
- "description": "The password of the user with privileges to join instances to Active Directory."
- }
- }
+ "description": "The password of the user with privileges to join instances to Active Directory.",
+ },
+ },
}
MANAGED_SCALING_POLICY_SCHEMA = {
@@ -800,73 +764,66 @@
"properties": {
"ComputeLimits": {
"type": "object",
- "description":
- "The EC2 unit limits for a managed scaling policy. "
- "The managed scaling activity of a cluster is not allowed to go above "
- "or below these limits. The limits apply to CORE and TASK groups "
- "and exclude the capacity of the MASTER group.",
+ "description": "The EC2 unit limits for a managed scaling policy. "
+ "The managed scaling activity of a cluster is not allowed to go above "
+ "or below these limits. The limits apply to CORE and TASK groups "
+ "and exclude the capacity of the MASTER group.",
"properties": {
- "MinimumCapacityUnits": {
- "type": "integer",
- "description":
- "The lower boundary of EC2 units. It is measured through "
- "VCPU cores or instances for instance groups and measured "
- "through units for instance fleets. Managed scaling "
- "activities are not allowed beyond this boundary.",
- "required": True
- },
- "MaximumCapacityUnits": {
- "type": "integer",
- "description":
- "The upper boundary of EC2 units. It is measured through "
- "VCPU cores or instances for instance groups and measured "
- "through units for instance fleets. Managed scaling "
- "activities are not allowed beyond this boundary.",
- "required": True
- },
- "MaximumOnDemandCapacityUnits": {
- "type": "integer",
- "description":
- "The upper boundary of on-demand EC2 units. It is measured through "
- "VCPU cores or instances for instance groups and measured "
- "through units for instance fleets. The on-demand units are not "
- "allowed to scale beyond this boundary. "
- "This value must be lower than MaximumCapacityUnits."
- },
- "UnitType": {
- "type": "string",
- "description": "The unit type used for specifying a managed scaling policy.",
- "enum": ["VCPU", "Instances", "InstanceFleetUnits"],
- "required": True
- },
- "MaximumCoreCapacityUnits": {
- "type": "integer",
- "description":
- "The upper boundary of EC2 units for core node type in a cluster. "
- "It is measured through VCPU cores or instances for instance groups "
- "and measured through units for instance fleets. "
- "The core units are not allowed to scale beyond this boundary. "
- "The parameter is used to split capacity allocation between core and task nodes."
- }
- }
+ "MinimumCapacityUnits": {
+ "type": "integer",
+ "description": "The lower boundary of EC2 units. It is measured through "
+ "VCPU cores or instances for instance groups and measured "
+ "through units for instance fleets. Managed scaling "
+ "activities are not allowed beyond this boundary.",
+ "required": True,
+ },
+ "MaximumCapacityUnits": {
+ "type": "integer",
+ "description": "The upper boundary of EC2 units. It is measured through "
+ "VCPU cores or instances for instance groups and measured "
+ "through units for instance fleets. Managed scaling "
+ "activities are not allowed beyond this boundary.",
+ "required": True,
+ },
+ "MaximumOnDemandCapacityUnits": {
+ "type": "integer",
+ "description": "The upper boundary of on-demand EC2 units. It is measured through "
+ "VCPU cores or instances for instance groups and measured "
+ "through units for instance fleets. The on-demand units are not "
+ "allowed to scale beyond this boundary. "
+ "This value must be lower than MaximumCapacityUnits.",
+ },
+ "UnitType": {
+ "type": "string",
+ "description": "The unit type used for specifying a managed scaling policy.",
+ "enum": ["VCPU", "Instances", "InstanceFleetUnits"],
+ "required": True,
+ },
+ "MaximumCoreCapacityUnits": {
+ "type": "integer",
+ "description": "The upper boundary of EC2 units for core node type in a cluster. "
+ "It is measured through VCPU cores or instances for instance groups "
+ "and measured through units for instance fleets. "
+ "The core units are not allowed to scale beyond this boundary. "
+ "The parameter is used to split capacity allocation between core and task nodes.",
+ },
+ },
},
"ScalingStrategy": {
"type": "string",
"enum": ["DEFAULT", "ADVANCED"],
- "description":
- "Determines whether a custom scaling utilization performance index can be set. "
- "Possible values include ADVANCED or DEFAULT."
+ "description": "Determines whether a custom scaling utilization performance index can be set. "
+ "Possible values include ADVANCED or DEFAULT.",
},
"UtilizationPerformanceIndex": {
"type": "integer",
- "description":
- "An integer value that represents an advanced scaling strategy. "
- "Setting a higher value optimizes for performance. "
- "Setting a lower value optimizes for resource conservation. "
- "Setting the value to 50 balances performance and resource conservation. "
- "Possible values are 1, 25, 50, 75, and 100."
- }
- }
+ "description": "An integer value that represents an advanced scaling strategy. "
+ "Setting a higher value optimizes for performance. "
+ "Setting a lower value optimizes for resource conservation. "
+ "Setting the value to 50 balances performance and resource conservation. "
+ "Possible values are 1, 25, 50, 75, and 100.",
+ },
+ },
}
PLACEMENT_GROUP_CONFIGS_SCHEMA = {
@@ -878,26 +835,25 @@
"type": "string",
"description": "Role of the instance in the cluster.",
"enum": ["MASTER", "CORE", "TASK"],
- "required": True
+ "required": True,
},
"PlacementStrategy": {
"type": "string",
"description": "EC2 Placement Group strategy associated "
- "with instance role.",
- "enum": ["SPREAD", "PARTITION", "CLUSTER", "NONE"]
- }
- }
- }
+ "with instance role.",
+ "enum": ["SPREAD", "PARTITION", "CLUSTER", "NONE"],
+ },
+ },
+ },
}
AUTO_TERMINATION_POLICY_SCHEMA = {
"type": "object",
- "properties": {
+ "properties": {
"IdleTimeout": {
"type": "long",
- "description":
- "Specifies the amount of idle time in seconds after which the cluster automatically terminates. "
- "You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).",
+ "description": "Specifies the amount of idle time in seconds after which the cluster automatically terminates. "
+ "You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).",
}
- }
+ },
}
diff --git a/awscli/customizations/emr/command.py b/awscli/customizations/emr/command.py
index b208a3c13170..d55819a6bc6b 100644
--- a/awscli/customizations/emr/command.py
+++ b/awscli/customizations/emr/command.py
@@ -12,11 +12,9 @@
# language governing permissions and limitations under the License.
import logging
+
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.emr import config
-from awscli.customizations.emr import configutils
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import exceptions
+from awscli.customizations.emr import config, configutils, emrutils, exceptions
LOG = logging.getLogger(__name__)
@@ -24,36 +22,42 @@
class Command(BasicCommand):
region = None
- UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS = set([
- 'install-applications',
- 'restore-from-hbase-backup',
- 'schedule-hbase-backup',
- 'create-hbase-backup',
- 'disable-hbase-backups',
- ])
+ UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS = set(
+ [
+ 'install-applications',
+ 'restore-from-hbase-backup',
+ 'schedule-hbase-backup',
+ 'create-hbase-backup',
+ 'disable-hbase-backups',
+ ]
+ )
def supports_arg(self, name):
return any((x['name'] == name for x in self.ARG_TABLE))
def _run_main(self, parsed_args, parsed_globals):
-
- self._apply_configs(parsed_args,
- configutils.get_configs(self._session))
+ self._apply_configs(
+ parsed_args, configutils.get_configs(self._session)
+ )
self.region = emrutils.get_region(self._session, parsed_globals)
self._validate_unsupported_commands_for_release_based_clusters(
- parsed_args, parsed_globals)
+ parsed_args, parsed_globals
+ )
return self._run_main_command(parsed_args, parsed_globals)
def _apply_configs(self, parsed_args, parsed_configs):
- applicable_configurations = \
- self._get_applicable_configurations(parsed_args, parsed_configs)
+ applicable_configurations = self._get_applicable_configurations(
+ parsed_args, parsed_configs
+ )
configs_added = {}
for configuration in applicable_configurations:
- configuration.add(self, parsed_args,
- parsed_configs[configuration.name])
- configs_added[configuration.name] = \
- parsed_configs[configuration.name]
+ configuration.add(
+ self, parsed_args, parsed_configs[configuration.name]
+ )
+ configs_added[configuration.name] = parsed_configs[
+ configuration.name
+ ]
if configs_added:
LOG.debug("Updated arguments with configs: %s" % configs_added)
@@ -68,20 +72,23 @@ def _get_applicable_configurations(self, parsed_args, parsed_configs):
# 3. Configurations that are present in parsed_configs
# 2. Configurations that are not present in parsed_args
- configurations = \
- config.get_applicable_configurations(self)
+ configurations = config.get_applicable_configurations(self)
- configurations = [x for x in configurations
- if x.name in parsed_configs and
- not x.is_present(parsed_args)]
+ configurations = [
+ x
+ for x in configurations
+ if x.name in parsed_configs and not x.is_present(parsed_args)
+ ]
configurations = self._filter_configurations_in_special_cases(
- configurations, parsed_args, parsed_configs)
+ configurations, parsed_args, parsed_configs
+ )
return configurations
- def _filter_configurations_in_special_cases(self, configurations,
- parsed_args, parsed_configs):
+ def _filter_configurations_in_special_cases(
+ self, configurations, parsed_args, parsed_configs
+ ):
# Subclasses can override this method to filter the applicable
# configurations further based upon some custom logic
# Default behavior is to return the configurations list as is
@@ -99,18 +106,25 @@ def _run_main_command(self, parsed_args, parsed_globals):
raise NotImplementedError("_run_main_command")
def _validate_unsupported_commands_for_release_based_clusters(
- self, parsed_args, parsed_globals):
+ self, parsed_args, parsed_globals
+ ):
command = self.NAME
- if (command in self.UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS and
- hasattr(parsed_args, 'cluster_id')):
+ if (
+ command in self.UNSUPPORTED_COMMANDS_FOR_RELEASE_BASED_CLUSTERS
+ and hasattr(parsed_args, 'cluster_id')
+ ):
release_label = emrutils.get_release_label(
- parsed_args.cluster_id, self._session, self.region,
- parsed_globals.endpoint_url, parsed_globals.verify_ssl)
+ parsed_args.cluster_id,
+ self._session,
+ self.region,
+ parsed_globals.endpoint_url,
+ parsed_globals.verify_ssl,
+ )
if release_label:
raise exceptions.UnsupportedCommandWithReleaseError(
- command=command,
- release_label=release_label)
+ command=command, release_label=release_label
+ )
def override_args_required_option(argument_table, args, session, **kwargs):
@@ -119,8 +133,7 @@ def override_args_required_option(argument_table, args, session, **kwargs):
# file
# We don't want to override when user is viewing the help so that we
# can show the required options correctly in the help
- need_to_override = False if len(args) == 1 and args[0] == 'help' \
- else True
+ need_to_override = False if len(args) == 1 and args[0] == 'help' else True
if need_to_override:
parsed_configs = configutils.get_configs(session)
diff --git a/awscli/customizations/emr/config.py b/awscli/customizations/emr/config.py
index f0b615fa8ab9..15ebc81f8a0c 100644
--- a/awscli/customizations/emr/config.py
+++ b/awscli/customizations/emr/config.py
@@ -12,20 +12,26 @@
# language governing permissions and limitations under the License.
import logging
-from awscli.customizations.emr import configutils
-from awscli.customizations.emr import exceptions
+
+from awscli.customizations.emr import configutils, exceptions
LOG = logging.getLogger(__name__)
SUPPORTED_CONFIG_LIST = [
{'name': 'service_role'},
{'name': 'log_uri'},
- {'name': 'instance_profile', 'arg_name': 'ec2_attributes',
- 'arg_value_key': 'InstanceProfile'},
- {'name': 'key_name', 'arg_name': 'ec2_attributes',
- 'arg_value_key': 'KeyName'},
+ {
+ 'name': 'instance_profile',
+ 'arg_name': 'ec2_attributes',
+ 'arg_value_key': 'InstanceProfile',
+ },
+ {
+ 'name': 'key_name',
+ 'arg_name': 'ec2_attributes',
+ 'arg_value_key': 'KeyName',
+ },
{'name': 'enable_debugging', 'type': 'boolean'},
- {'name': 'key_pair_file'}
+ {'name': 'key_pair_file'},
]
TYPES = ['string', 'boolean']
@@ -39,27 +45,30 @@ def get_applicable_configurations(command):
def _create_supported_configuration(config):
config_type = config['type'] if 'type' in config else 'string'
- if (config_type == 'string'):
- config_arg_name = config['arg_name'] \
- if 'arg_name' in config else config['name']
- config_arg_value_key = config['arg_value_key'] \
- if 'arg_value_key' in config else None
- configuration = StringConfiguration(config['name'],
- config_arg_name,
- config_arg_value_key)
- elif (config_type == 'boolean'):
+ if config_type == 'string':
+ config_arg_name = (
+ config['arg_name'] if 'arg_name' in config else config['name']
+ )
+ config_arg_value_key = (
+ config['arg_value_key'] if 'arg_value_key' in config else None
+ )
+ configuration = StringConfiguration(
+ config['name'], config_arg_name, config_arg_value_key
+ )
+ elif config_type == 'boolean':
configuration = BooleanConfiguration(config['name'])
return configuration
def _create_supported_configurations():
- return [_create_supported_configuration(config)
- for config in SUPPORTED_CONFIG_LIST]
+ return [
+ _create_supported_configuration(config)
+ for config in SUPPORTED_CONFIG_LIST
+ ]
class Configuration(object):
-
def __init__(self, name, arg_name):
self.name = name
self.arg_name = arg_name
@@ -78,7 +87,6 @@ def _check_arg(self, parsed_args, arg_name):
class StringConfiguration(Configuration):
-
def __init__(self, name, arg_name, arg_value_key=None):
super(StringConfiguration, self).__init__(name, arg_name)
self.arg_value_key = arg_value_key
@@ -87,40 +95,42 @@ def is_applicable(self, command):
return command.supports_arg(self.arg_name.replace('_', '-'))
def is_present(self, parsed_args):
- if (not self.arg_value_key):
+ if not self.arg_value_key:
return self._check_arg(parsed_args, self.arg_name)
else:
- return self._check_arg(parsed_args, self.arg_name) \
- and self.arg_value_key in getattr(parsed_args, self.arg_name)
+ return self._check_arg(
+ parsed_args, self.arg_name
+ ) and self.arg_value_key in getattr(parsed_args, self.arg_name)
def add(self, command, parsed_args, value):
- if (not self.arg_value_key):
+ if not self.arg_value_key:
setattr(parsed_args, self.arg_name, value)
else:
- if (not self._check_arg(parsed_args, self.arg_name)):
+ if not self._check_arg(parsed_args, self.arg_name):
setattr(parsed_args, self.arg_name, {})
getattr(parsed_args, self.arg_name)[self.arg_value_key] = value
class BooleanConfiguration(Configuration):
-
def __init__(self, name):
super(BooleanConfiguration, self).__init__(name, name)
self.no_version_arg_name = "no_" + name
def is_applicable(self, command):
- return command.supports_arg(self.arg_name.replace('_', '-')) and \
- command.supports_arg(self.no_version_arg_name.replace('_', '-'))
+ return command.supports_arg(
+ self.arg_name.replace('_', '-')
+ ) and command.supports_arg(self.no_version_arg_name.replace('_', '-'))
def is_present(self, parsed_args):
- return self._check_arg(parsed_args, self.arg_name) \
- or self._check_arg(parsed_args, self.no_version_arg_name)
+ return self._check_arg(parsed_args, self.arg_name) or self._check_arg(
+ parsed_args, self.no_version_arg_name
+ )
def add(self, command, parsed_args, value):
- if (value.lower() == 'true'):
+ if value.lower() == 'true':
setattr(parsed_args, self.arg_name, True)
setattr(parsed_args, self.no_version_arg_name, False)
- elif (value.lower() == 'false'):
+ elif value.lower() == 'false':
setattr(parsed_args, self.arg_name, False)
setattr(parsed_args, self.no_version_arg_name, True)
else:
@@ -128,4 +138,6 @@ def add(self, command, parsed_args, value):
config_value=value,
config_key=self.arg_name,
profile_var_name=configutils.get_current_profile_var_name(
- command._session))
+ command._session
+ ),
+ )
diff --git a/awscli/customizations/emr/configutils.py b/awscli/customizations/emr/configutils.py
index b893de43d85b..cb4ee4f4d307 100644
--- a/awscli/customizations/emr/configutils.py
+++ b/awscli/customizations/emr/configutils.py
@@ -14,8 +14,7 @@
import os
from awscli.customizations.configure.writer import ConfigFileWriter
-from awscli.customizations.emr.constants import EC2_ROLE_NAME
-from awscli.customizations.emr.constants import EMR_ROLE_NAME
+from awscli.customizations.emr.constants import EC2_ROLE_NAME, EMR_ROLE_NAME
LOG = logging.getLogger(__name__)
@@ -35,21 +34,31 @@ def get_current_profile_var_name(session):
def _get_profile_str(session, separator):
profile_name = session.get_config_variable('profile')
- return 'default' if profile_name is None \
+ return (
+ 'default'
+ if profile_name is None
else 'profile%c%s' % (separator, profile_name)
+ )
def is_any_role_configured(session):
parsed_configs = get_configs(session)
- return True if ('instance_profile' in parsed_configs or
- 'service_role' in parsed_configs) \
+ return (
+ True
+ if (
+ 'instance_profile' in parsed_configs
+ or 'service_role' in parsed_configs
+ )
else False
+ )
def update_roles(session):
if is_any_role_configured(session):
- LOG.debug("At least one of the roles is already associated with "
- "your current profile ")
+ LOG.debug(
+ "At least one of the roles is already associated with "
+ "your current profile "
+ )
else:
config_writer = ConfigWriter(session)
config_writer.update_config('service_role', EMR_ROLE_NAME)
@@ -58,15 +67,14 @@ def update_roles(session):
class ConfigWriter(object):
-
def __init__(self, session):
self.session = session
self.section = _get_profile_str(session, ' ')
self.config_file_writer = ConfigFileWriter()
def update_config(self, key, value):
- config_filename = \
- os.path.expanduser(self.session.get_config_variable('config_file'))
- updated_config = {'__section__': self.section,
- 'emr': {key: value}}
+ config_filename = os.path.expanduser(
+ self.session.get_config_variable('config_file')
+ )
+ updated_config = {'__section__': self.section, 'emr': {key: value}}
self.config_file_writer.update_config(updated_config, config_filename)
diff --git a/awscli/customizations/emr/constants.py b/awscli/customizations/emr/constants.py
index 8d2bb51a6bbe..ff7a64b1ef78 100644
--- a/awscli/customizations/emr/constants.py
+++ b/awscli/customizations/emr/constants.py
@@ -16,7 +16,9 @@
EC2_ROLE_NAME = "EMR_EC2_DefaultRole"
EMR_ROLE_NAME = "EMR_DefaultRole"
EMR_AUTOSCALING_ROLE_NAME = "EMR_AutoScaling_DefaultRole"
-ROLE_ARN_PATTERN = "arn:{{region_suffix}}:iam::aws:policy/service-role/{{policy_name}}"
+ROLE_ARN_PATTERN = (
+ "arn:{{region_suffix}}:iam::aws:policy/service-role/{{policy_name}}"
+)
EC2_ROLE_POLICY_NAME = "AmazonElasticMapReduceforEC2Role"
EMR_ROLE_POLICY_NAME = "AmazonElasticMapReduceRole"
EMR_AUTOSCALING_ROLE_POLICY_NAME = "AmazonElasticMapReduceforAutoScalingRole"
@@ -57,12 +59,14 @@
EMRFS_RETRY_PERIOD_KEY = 'fs.s3.consistent.retryPeriodSeconds'
EMRFS_CSE_KEY = 'fs.s3.cse.enabled'
EMRFS_CSE_KMS_KEY_ID_KEY = 'fs.s3.cse.kms.keyId'
-EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY = \
+EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY = (
'fs.s3.cse.encryptionMaterialsProvider'
+)
EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY = 'fs.s3.cse.encryptionMaterialsProvider.uri'
-EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME = ('com.amazon.ws.emr.hadoop.fs.cse.'
- 'KMSEncryptionMaterialsProvider')
+EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME = (
+ 'com.amazon.ws.emr.hadoop.fs.cse.' 'KMSEncryptionMaterialsProvider'
+)
EMRFS_CSE_CUSTOM_S3_GET_BA_PATH = 'file:/usr/share/aws/emr/scripts/s3get'
EMRFS_CUSTOM_DEST_PATH = '/usr/share/aws/emr/auxlib'
@@ -181,16 +185,31 @@
APPLICATION_AUTOSCALING = 'application-autoscaling'
LATEST = 'latest'
-APPLICATIONS = ["HIVE", "PIG", "HBASE", "GANGLIA", "IMPALA", "SPARK", "MAPR",
- "MAPR_M3", "MAPR_M5", "MAPR_M7"]
+APPLICATIONS = [
+ "HIVE",
+ "PIG",
+ "HBASE",
+ "GANGLIA",
+ "IMPALA",
+ "SPARK",
+ "MAPR",
+ "MAPR_M3",
+ "MAPR_M5",
+ "MAPR_M7",
+]
SSH_USER = 'hadoop'
STARTING_STATES = ['STARTING', 'BOOTSTRAPPING']
TERMINATED_STATES = ['TERMINATED', 'TERMINATING', 'TERMINATED_WITH_ERRORS']
# list-clusters
-LIST_CLUSTERS_ACTIVE_STATES = ['STARTING', 'BOOTSTRAPPING', 'RUNNING',
- 'WAITING', 'TERMINATING']
+LIST_CLUSTERS_ACTIVE_STATES = [
+ 'STARTING',
+ 'BOOTSTRAPPING',
+ 'RUNNING',
+ 'WAITING',
+ 'TERMINATING',
+]
LIST_CLUSTERS_TERMINATED_STATES = ['TERMINATED']
LIST_CLUSTERS_FAILED_STATES = ['TERMINATED_WITH_ERRORS']
diff --git a/awscli/customizations/emr/createcluster.py b/awscli/customizations/emr/createcluster.py
index 17b780ee2ce0..22d333bbe733 100644
--- a/awscli/customizations/emr/createcluster.py
+++ b/awscli/customizations/emr/createcluster.py
@@ -13,22 +13,23 @@
import re
-from awscli.customizations.exceptions import ParamValidationError
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.emr import applicationutils
-from awscli.customizations.emr import argumentschema
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import emrfsutils
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import exceptions
-from awscli.customizations.emr import hbaseutils
-from awscli.customizations.emr import helptext
-from awscli.customizations.emr import instancegroupsutils
-from awscli.customizations.emr import instancefleetsutils
-from awscli.customizations.emr import steputils
+from awscli.customizations.emr import (
+ applicationutils,
+ argumentschema,
+ constants,
+ emrfsutils,
+ emrutils,
+ exceptions,
+ hbaseutils,
+ helptext,
+ instancefleetsutils,
+ instancegroupsutils,
+ steputils,
+)
from awscli.customizations.emr.command import Command
-from awscli.customizations.emr.constants import EC2_ROLE_NAME
-from awscli.customizations.emr.constants import EMR_ROLE_NAME
+from awscli.customizations.emr.constants import EC2_ROLE_NAME, EMR_ROLE_NAME
+from awscli.customizations.exceptions import ParamValidationError
from botocore.compat import json
@@ -36,114 +37,181 @@ class CreateCluster(Command):
NAME = 'create-cluster'
DESCRIPTION = helptext.CREATE_CLUSTER_DESCRIPTION
ARG_TABLE = [
- {'name': 'release-label',
- 'help_text': helptext.RELEASE_LABEL},
- {'name': 'os-release-label',
- 'help_text': helptext.OS_RELEASE_LABEL},
- {'name': 'ami-version',
- 'help_text': helptext.AMI_VERSION},
- {'name': 'instance-groups',
- 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA,
- 'help_text': helptext.INSTANCE_GROUPS},
- {'name': 'instance-type',
- 'help_text': helptext.INSTANCE_TYPE},
- {'name': 'instance-count',
- 'help_text': helptext.INSTANCE_COUNT},
- {'name': 'auto-terminate', 'action': 'store_true',
- 'group_name': 'auto_terminate',
- 'help_text': helptext.AUTO_TERMINATE},
- {'name': 'no-auto-terminate', 'action': 'store_true',
- 'group_name': 'auto_terminate'},
- {'name': 'instance-fleets',
- 'schema': argumentschema.INSTANCE_FLEETS_SCHEMA,
- 'help_text': helptext.INSTANCE_FLEETS},
- {'name': 'name',
- 'default': 'Development Cluster',
- 'help_text': helptext.CLUSTER_NAME},
- {'name': 'log-uri',
- 'help_text': helptext.LOG_URI},
- {'name': 'log-encryption-kms-key-id',
- 'help_text': helptext.LOG_ENCRYPTION_KMS_KEY_ID},
- {'name': 'service-role',
- 'help_text': helptext.SERVICE_ROLE},
- {'name': 'auto-scaling-role',
- 'help_text': helptext.AUTOSCALING_ROLE},
- {'name': 'use-default-roles', 'action': 'store_true',
- 'help_text': helptext.USE_DEFAULT_ROLES},
- {'name': 'configurations',
- 'help_text': helptext.CONFIGURATIONS},
- {'name': 'ec2-attributes',
- 'help_text': helptext.EC2_ATTRIBUTES,
- 'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA},
- {'name': 'termination-protected', 'action': 'store_true',
- 'group_name': 'termination_protected',
- 'help_text': helptext.TERMINATION_PROTECTED},
- {'name': 'no-termination-protected', 'action': 'store_true',
- 'group_name': 'termination_protected'},
- {'name': 'unhealthy-node-replacement', 'action': 'store_true',
- 'group_name': 'unhealthy_node_replacement',
- 'help_text': helptext.UNHEALTHY_NODE_REPLACEMENT},
- {'name': 'no-unhealthy-node-replacement', 'action': 'store_true',
- 'group_name': 'unhealthy_node_replacement'},
- {'name': 'scale-down-behavior',
- 'help_text': helptext.SCALE_DOWN_BEHAVIOR},
- {'name': 'visible-to-all-users', 'action': 'store_true',
- 'group_name': 'visibility',
- 'help_text': helptext.VISIBILITY},
- {'name': 'no-visible-to-all-users', 'action': 'store_true',
- 'group_name': 'visibility'},
- {'name': 'enable-debugging', 'action': 'store_true',
- 'group_name': 'debug',
- 'help_text': helptext.DEBUGGING},
- {'name': 'no-enable-debugging', 'action': 'store_true',
- 'group_name': 'debug'},
- {'name': 'tags', 'nargs': '+',
- 'help_text': helptext.TAGS,
- 'schema': argumentschema.TAGS_SCHEMA},
- {'name': 'bootstrap-actions',
- 'help_text': helptext.BOOTSTRAP_ACTIONS,
- 'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA},
- {'name': 'applications',
- 'help_text': helptext.APPLICATIONS,
- 'schema': argumentschema.APPLICATIONS_SCHEMA},
- {'name': 'emrfs',
- 'help_text': helptext.EMR_FS,
- 'schema': argumentschema.EMR_FS_SCHEMA},
- {'name': 'steps',
- 'schema': argumentschema.STEPS_SCHEMA,
- 'help_text': helptext.STEPS},
- {'name': 'additional-info',
- 'help_text': helptext.ADDITIONAL_INFO},
- {'name': 'restore-from-hbase-backup',
- 'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA,
- 'help_text': helptext.RESTORE_FROM_HBASE},
- {'name': 'security-configuration',
- 'help_text': helptext.SECURITY_CONFIG},
- {'name': 'custom-ami-id',
- 'help_text' : helptext.CUSTOM_AMI_ID},
- {'name': 'ebs-root-volume-size',
- 'help_text' : helptext.EBS_ROOT_VOLUME_SIZE},
- {'name': 'ebs-root-volume-iops',
- 'help_text' : helptext.EBS_ROOT_VOLUME_IOPS},
- {'name': 'ebs-root-volume-throughput',
- 'help_text' : helptext.EBS_ROOT_VOLUME_THROUGHPUT},
- {'name': 'repo-upgrade-on-boot',
- 'help_text' : helptext.REPO_UPGRADE_ON_BOOT},
- {'name': 'kerberos-attributes',
- 'schema': argumentschema.KERBEROS_ATTRIBUTES_SCHEMA,
- 'help_text': helptext.KERBEROS_ATTRIBUTES},
- {'name': 'step-concurrency-level',
- 'cli_type_name': 'integer',
- 'help_text': helptext.STEP_CONCURRENCY_LEVEL},
- {'name': 'managed-scaling-policy',
- 'schema': argumentschema.MANAGED_SCALING_POLICY_SCHEMA,
- 'help_text': helptext.MANAGED_SCALING_POLICY},
- {'name': 'placement-group-configs',
- 'schema': argumentschema.PLACEMENT_GROUP_CONFIGS_SCHEMA,
- 'help_text': helptext.PLACEMENT_GROUP_CONFIGS},
- {'name': 'auto-termination-policy',
- 'schema': argumentschema.AUTO_TERMINATION_POLICY_SCHEMA,
- 'help_text': helptext.AUTO_TERMINATION_POLICY}
+ {'name': 'release-label', 'help_text': helptext.RELEASE_LABEL},
+ {'name': 'os-release-label', 'help_text': helptext.OS_RELEASE_LABEL},
+ {'name': 'ami-version', 'help_text': helptext.AMI_VERSION},
+ {
+ 'name': 'instance-groups',
+ 'schema': argumentschema.INSTANCE_GROUPS_SCHEMA,
+ 'help_text': helptext.INSTANCE_GROUPS,
+ },
+ {'name': 'instance-type', 'help_text': helptext.INSTANCE_TYPE},
+ {'name': 'instance-count', 'help_text': helptext.INSTANCE_COUNT},
+ {
+ 'name': 'auto-terminate',
+ 'action': 'store_true',
+ 'group_name': 'auto_terminate',
+ 'help_text': helptext.AUTO_TERMINATE,
+ },
+ {
+ 'name': 'no-auto-terminate',
+ 'action': 'store_true',
+ 'group_name': 'auto_terminate',
+ },
+ {
+ 'name': 'instance-fleets',
+ 'schema': argumentschema.INSTANCE_FLEETS_SCHEMA,
+ 'help_text': helptext.INSTANCE_FLEETS,
+ },
+ {
+ 'name': 'name',
+ 'default': 'Development Cluster',
+ 'help_text': helptext.CLUSTER_NAME,
+ },
+ {'name': 'log-uri', 'help_text': helptext.LOG_URI},
+ {
+ 'name': 'log-encryption-kms-key-id',
+ 'help_text': helptext.LOG_ENCRYPTION_KMS_KEY_ID,
+ },
+ {'name': 'service-role', 'help_text': helptext.SERVICE_ROLE},
+ {'name': 'auto-scaling-role', 'help_text': helptext.AUTOSCALING_ROLE},
+ {
+ 'name': 'use-default-roles',
+ 'action': 'store_true',
+ 'help_text': helptext.USE_DEFAULT_ROLES,
+ },
+ {'name': 'configurations', 'help_text': helptext.CONFIGURATIONS},
+ {
+ 'name': 'ec2-attributes',
+ 'help_text': helptext.EC2_ATTRIBUTES,
+ 'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA,
+ },
+ {
+ 'name': 'termination-protected',
+ 'action': 'store_true',
+ 'group_name': 'termination_protected',
+ 'help_text': helptext.TERMINATION_PROTECTED,
+ },
+ {
+ 'name': 'no-termination-protected',
+ 'action': 'store_true',
+ 'group_name': 'termination_protected',
+ },
+ {
+ 'name': 'unhealthy-node-replacement',
+ 'action': 'store_true',
+ 'group_name': 'unhealthy_node_replacement',
+ 'help_text': helptext.UNHEALTHY_NODE_REPLACEMENT,
+ },
+ {
+ 'name': 'no-unhealthy-node-replacement',
+ 'action': 'store_true',
+ 'group_name': 'unhealthy_node_replacement',
+ },
+ {
+ 'name': 'scale-down-behavior',
+ 'help_text': helptext.SCALE_DOWN_BEHAVIOR,
+ },
+ {
+ 'name': 'visible-to-all-users',
+ 'action': 'store_true',
+ 'group_name': 'visibility',
+ 'help_text': helptext.VISIBILITY,
+ },
+ {
+ 'name': 'no-visible-to-all-users',
+ 'action': 'store_true',
+ 'group_name': 'visibility',
+ },
+ {
+ 'name': 'enable-debugging',
+ 'action': 'store_true',
+ 'group_name': 'debug',
+ 'help_text': helptext.DEBUGGING,
+ },
+ {
+ 'name': 'no-enable-debugging',
+ 'action': 'store_true',
+ 'group_name': 'debug',
+ },
+ {
+ 'name': 'tags',
+ 'nargs': '+',
+ 'help_text': helptext.TAGS,
+ 'schema': argumentschema.TAGS_SCHEMA,
+ },
+ {
+ 'name': 'bootstrap-actions',
+ 'help_text': helptext.BOOTSTRAP_ACTIONS,
+ 'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA,
+ },
+ {
+ 'name': 'applications',
+ 'help_text': helptext.APPLICATIONS,
+ 'schema': argumentschema.APPLICATIONS_SCHEMA,
+ },
+ {
+ 'name': 'emrfs',
+ 'help_text': helptext.EMR_FS,
+ 'schema': argumentschema.EMR_FS_SCHEMA,
+ },
+ {
+ 'name': 'steps',
+ 'schema': argumentschema.STEPS_SCHEMA,
+ 'help_text': helptext.STEPS,
+ },
+ {'name': 'additional-info', 'help_text': helptext.ADDITIONAL_INFO},
+ {
+ 'name': 'restore-from-hbase-backup',
+ 'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA,
+ 'help_text': helptext.RESTORE_FROM_HBASE,
+ },
+ {
+ 'name': 'security-configuration',
+ 'help_text': helptext.SECURITY_CONFIG,
+ },
+ {'name': 'custom-ami-id', 'help_text': helptext.CUSTOM_AMI_ID},
+ {
+ 'name': 'ebs-root-volume-size',
+ 'help_text': helptext.EBS_ROOT_VOLUME_SIZE,
+ },
+ {
+ 'name': 'ebs-root-volume-iops',
+ 'help_text': helptext.EBS_ROOT_VOLUME_IOPS,
+ },
+ {
+ 'name': 'ebs-root-volume-throughput',
+ 'help_text': helptext.EBS_ROOT_VOLUME_THROUGHPUT,
+ },
+ {
+ 'name': 'repo-upgrade-on-boot',
+ 'help_text': helptext.REPO_UPGRADE_ON_BOOT,
+ },
+ {
+ 'name': 'kerberos-attributes',
+ 'schema': argumentschema.KERBEROS_ATTRIBUTES_SCHEMA,
+ 'help_text': helptext.KERBEROS_ATTRIBUTES,
+ },
+ {
+ 'name': 'step-concurrency-level',
+ 'cli_type_name': 'integer',
+ 'help_text': helptext.STEP_CONCURRENCY_LEVEL,
+ },
+ {
+ 'name': 'managed-scaling-policy',
+ 'schema': argumentschema.MANAGED_SCALING_POLICY_SCHEMA,
+ 'help_text': helptext.MANAGED_SCALING_POLICY,
+ },
+ {
+ 'name': 'placement-group-configs',
+ 'schema': argumentschema.PLACEMENT_GROUP_CONFIGS_SCHEMA,
+ 'help_text': helptext.PLACEMENT_GROUP_CONFIGS,
+ },
+ {
+ 'name': 'auto-termination-policy',
+ 'schema': argumentschema.AUTO_TERMINATION_POLICY_SCHEMA,
+ 'help_text': helptext.AUTO_TERMINATION_POLICY,
+ },
]
SYNOPSIS = BasicCommand.FROM_FILE('emr', 'create-cluster-synopsis.txt')
EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-cluster-examples.rst')
@@ -156,71 +224,95 @@ def _run_main_command(self, parsed_args, parsed_globals):
service_role_validation_message = (
" Either choose --use-default-roles or use both --service-role "
- "The IAM endpoint to call for creating the roles.' - ' This is optional and should only be specified when a' - ' custom endpoint should be called for IAM operations' - '.
'} + { + 'name': 'iam-endpoint', + 'no_paramfile': True, + 'help_text': 'The IAM endpoint to call for creating the roles.' + ' This is optional and should only be specified when a' + ' custom endpoint should be called for IAM operations' + '.
', + } ] def _run_main_command(self, parsed_args, parsed_globals): - self.iam_endpoint_url = parsed_args.iam_endpoint self._check_for_iam_endpoint(self.region, self.iam_endpoint_url) - self.emr_endpoint_url = \ - self._session.create_client( - 'emr', - region_name=self.region, - endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl).meta.endpoint_url - - LOG.debug('elasticmapreduce endpoint used for resolving' - ' service principal: ' + self.emr_endpoint_url) + self.emr_endpoint_url = self._session.create_client( + 'emr', + region_name=self.region, + endpoint_url=parsed_globals.endpoint_url, + verify=parsed_globals.verify_ssl, + ).meta.endpoint_url + + LOG.debug( + 'elasticmapreduce endpoint used for resolving' + ' service principal: ' + self.emr_endpoint_url + ) # Create default EC2 Role for EMR if it does not exist. - ec2_result, ec2_policy = self._create_role_if_not_exists(parsed_globals, EC2_ROLE_NAME, - EC2_ROLE_POLICY_NAME, [EC2]) + ec2_result, ec2_policy = self._create_role_if_not_exists( + parsed_globals, EC2_ROLE_NAME, EC2_ROLE_POLICY_NAME, [EC2] + ) # Create default EC2 Instance Profile for EMR if it does not exist. instance_profile_name = EC2_ROLE_NAME - if self.check_if_instance_profile_exists(instance_profile_name, - parsed_globals): + if self.check_if_instance_profile_exists( + instance_profile_name, parsed_globals + ): LOG.debug('Instance Profile ' + instance_profile_name + ' exists.') else: - LOG.debug('Instance Profile ' + instance_profile_name + - 'does not exist. Creating default Instance Profile ' + - instance_profile_name) - self._create_instance_profile_with_role(instance_profile_name, - instance_profile_name, - parsed_globals) + LOG.debug( + 'Instance Profile ' + + instance_profile_name + + 'does not exist. Creating default Instance Profile ' + + instance_profile_name + ) + self._create_instance_profile_with_role( + instance_profile_name, instance_profile_name, parsed_globals + ) # Create default EMR Role if it does not exist. - emr_result, emr_policy = self._create_role_if_not_exists(parsed_globals, EMR_ROLE_NAME, - EMR_ROLE_POLICY_NAME, [EMR]) + emr_result, emr_policy = self._create_role_if_not_exists( + parsed_globals, EMR_ROLE_NAME, EMR_ROLE_POLICY_NAME, [EMR] + ) # Create default EMR AutoScaling Role if it does not exist. - emr_autoscaling_result, emr_autoscaling_policy = \ - self._create_role_if_not_exists(parsed_globals, EMR_AUTOSCALING_ROLE_NAME, - EMR_AUTOSCALING_ROLE_POLICY_NAME, [EMR, APPLICATION_AUTOSCALING]) + emr_autoscaling_result, emr_autoscaling_policy = ( + self._create_role_if_not_exists( + parsed_globals, + EMR_AUTOSCALING_ROLE_NAME, + EMR_AUTOSCALING_ROLE_POLICY_NAME, + [EMR, APPLICATION_AUTOSCALING], + ) + ) configutils.update_roles(self._session) emrutils.display_response( self._session, 'create_role', - self._construct_result(ec2_result, ec2_policy, - emr_result, emr_policy, - emr_autoscaling_result, emr_autoscaling_policy), - parsed_globals) + self._construct_result( + ec2_result, + ec2_policy, + emr_result, + emr_policy, + emr_autoscaling_result, + emr_autoscaling_policy, + ), + parsed_globals, + ) return 0 - def _create_role_if_not_exists(self, parsed_globals, role_name, policy_name, service_names): + def _create_role_if_not_exists( + self, parsed_globals, role_name, policy_name, service_names + ): result = None policy = None if self.check_if_role_exists(role_name, parsed_globals): LOG.debug('Role ' + role_name + ' exists.') else: - LOG.debug('Role ' + role_name + ' does not exist.' - ' Creating default role: ' + role_name) + LOG.debug( + 'Role ' + role_name + ' does not exist.' + ' Creating default role: ' + role_name + ) role_arn = get_role_policy_arn(self.region, policy_name) result = self._create_role_with_role_policy( - role_name, service_names, role_arn, parsed_globals) + role_name, service_names, role_arn, parsed_globals + ) policy = self._get_role_policy(role_arn, parsed_globals) return result, policy @@ -205,20 +233,30 @@ def _check_for_iam_endpoint(self, region, iam_endpoint): if iam_endpoint is None: raise exceptions.UnknownIamEndpointError(region=region) - def _construct_result(self, ec2_response, ec2_policy, - emr_response, emr_policy, - emr_autoscaling_response, emr_autoscaling_policy): + def _construct_result( + self, + ec2_response, + ec2_policy, + emr_response, + emr_policy, + emr_autoscaling_response, + emr_autoscaling_policy, + ): result = [] self._construct_role_and_role_policy_structure( - result, ec2_response, ec2_policy) + result, ec2_response, ec2_policy + ) self._construct_role_and_role_policy_structure( - result, emr_response, emr_policy) + result, emr_response, emr_policy + ) self._construct_role_and_role_policy_structure( - result, emr_autoscaling_response, emr_autoscaling_policy) + result, emr_autoscaling_response, emr_autoscaling_policy + ) return result def _construct_role_and_role_policy_structure( - self, list, response, policy): + self, list, response, policy + ): if response is not None and response['Role'] is not None: list.append({'Role': response['Role'], 'RolePolicy': policy}) return list @@ -240,12 +278,14 @@ def check_if_role_exists(self, role_name, parsed_globals): return True - def check_if_instance_profile_exists(self, instance_profile_name, - parsed_globals): + def check_if_instance_profile_exists( + self, instance_profile_name, parsed_globals + ): parameters = {'InstanceProfileName': instance_profile_name} try: - self._call_iam_operation('GetInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'GetInstanceProfile', parameters, parsed_globals + ) except botocore.exceptions.ClientError as e: profile_not_found_code = 'NoSuchEntity' error_code = e.response.get('Error', {}).get('Code') @@ -261,59 +301,74 @@ def check_if_instance_profile_exists(self, instance_profile_name, def _get_role_policy(self, arn, parsed_globals): parameters = {} parameters['PolicyArn'] = arn - policy_details = self._call_iam_operation('GetPolicy', parameters, - parsed_globals) + policy_details = self._call_iam_operation( + 'GetPolicy', parameters, parsed_globals + ) parameters["VersionId"] = policy_details["Policy"]["DefaultVersionId"] - policy_version_details = self._call_iam_operation('GetPolicyVersion', - parameters, - parsed_globals) + policy_version_details = self._call_iam_operation( + 'GetPolicyVersion', parameters, parsed_globals + ) return policy_version_details["PolicyVersion"]["Document"] def _create_role_with_role_policy( - self, role_name, service_names, role_arn, parsed_globals): - + self, role_name, service_names, role_arn, parsed_globals + ): if len(service_names) == 1: service_principal = get_service_principal( - service_names[0], self.emr_endpoint_url, self._session) + service_names[0], self.emr_endpoint_url, self._session + ) else: service_principal = [] for service in service_names: - service_principal.append(get_service_principal( - service, self.emr_endpoint_url, self._session)) + service_principal.append( + get_service_principal( + service, self.emr_endpoint_url, self._session + ) + ) - LOG.debug(f'Adding service principal(s) to trust policy: {service_principal}') + LOG.debug( + f'Adding service principal(s) to trust policy: {service_principal}' + ) parameters = {'RoleName': role_name} - _assume_role_policy = \ - emrutils.dict_to_string(assume_role_policy(service_principal)) + _assume_role_policy = emrutils.dict_to_string( + assume_role_policy(service_principal) + ) parameters['AssumeRolePolicyDocument'] = _assume_role_policy - create_role_response = self._call_iam_operation('CreateRole', - parameters, - parsed_globals) + create_role_response = self._call_iam_operation( + 'CreateRole', parameters, parsed_globals + ) parameters = {} parameters['PolicyArn'] = role_arn parameters['RoleName'] = role_name - self._call_iam_operation('AttachRolePolicy', - parameters, parsed_globals) + self._call_iam_operation( + 'AttachRolePolicy', parameters, parsed_globals + ) return create_role_response - def _create_instance_profile_with_role(self, instance_profile_name, - role_name, parsed_globals): + def _create_instance_profile_with_role( + self, instance_profile_name, role_name, parsed_globals + ): # Creating an Instance Profile parameters = {'InstanceProfileName': instance_profile_name} - self._call_iam_operation('CreateInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'CreateInstanceProfile', parameters, parsed_globals + ) # Adding the role to the Instance Profile parameters = {} parameters['InstanceProfileName'] = instance_profile_name parameters['RoleName'] = role_name - self._call_iam_operation('AddRoleToInstanceProfile', parameters, - parsed_globals) + self._call_iam_operation( + 'AddRoleToInstanceProfile', parameters, parsed_globals + ) def _call_iam_operation(self, operation_name, parameters, parsed_globals): client = self._session.create_client( - 'iam', region_name=self.region, endpoint_url=self.iam_endpoint_url, - verify=parsed_globals.verify_ssl) + 'iam', + region_name=self.region, + endpoint_url=self.iam_endpoint_url, + verify=parsed_globals.verify_ssl, + ) return getattr(client, xform_name(operation_name))(**parameters) diff --git a/awscli/customizations/emr/describecluster.py b/awscli/customizations/emr/describecluster.py index 01f4b40b6d40..22478bde6b4a 100644 --- a/awscli/customizations/emr/describecluster.py +++ b/awscli/customizations/emr/describecluster.py @@ -12,9 +12,7 @@ # language governing permissions and limitations under the License. from awscli.customizations.commands import BasicCommand -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import helptext +from awscli.customizations.emr import constants, emrutils, helptext from awscli.customizations.emr.command import Command from botocore.exceptions import NoCredentialsError @@ -23,8 +21,11 @@ class DescribeCluster(Command): NAME = 'describe-cluster' DESCRIPTION = helptext.DESCRIBE_CLUSTER_DESCRIPTION ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + } ] def _run_main_command(self, parsed_args, parsed_globals): @@ -34,44 +35,61 @@ def _run_main_command(self, parsed_args, parsed_globals): is_fleet_based_cluster = False describe_cluster_result = self._call( - self._session, 'describe_cluster', parameters, parsed_globals) - + self._session, 'describe_cluster', parameters, parsed_globals + ) if 'Cluster' in describe_cluster_result: describe_cluster = describe_cluster_result['Cluster'] - if describe_cluster.get('InstanceCollectionType') == constants.INSTANCE_FLEET_TYPE: + if ( + describe_cluster.get('InstanceCollectionType') + == constants.INSTANCE_FLEET_TYPE + ): is_fleet_based_cluster = True if is_fleet_based_cluster: list_instance_fleets_result = self._call( - self._session, 'list_instance_fleets', parameters, - parsed_globals) + self._session, + 'list_instance_fleets', + parameters, + parsed_globals, + ) else: list_instance_groups_result = self._call( - self._session, 'list_instance_groups', parameters, - parsed_globals) + self._session, + 'list_instance_groups', + parameters, + parsed_globals, + ) list_bootstrap_actions_result = self._call( - self._session, 'list_bootstrap_actions', - parameters, parsed_globals) + self._session, 'list_bootstrap_actions', parameters, parsed_globals + ) constructed_result = self._construct_result( describe_cluster_result, list_instance_fleets_result, list_instance_groups_result, - list_bootstrap_actions_result) + list_bootstrap_actions_result, + ) - emrutils.display_response(self._session, 'describe_cluster', - constructed_result, parsed_globals) + emrutils.display_response( + self._session, + 'describe_cluster', + constructed_result, + parsed_globals, + ) return 0 def _call(self, session, operation_name, parameters, parsed_globals): return emrutils.call( - session, operation_name, parameters, + session, + operation_name, + parameters, region_name=self.region, endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) def _get_key_of_result(self, keys): # Return the first key that is not "Marker" @@ -80,23 +98,36 @@ def _get_key_of_result(self, keys): return key def _construct_result( - self, describe_cluster_result, list_instance_fleets_result, - list_instance_groups_result, list_bootstrap_actions_result): + self, + describe_cluster_result, + list_instance_fleets_result, + list_instance_groups_result, + list_bootstrap_actions_result, + ): result = describe_cluster_result result['Cluster']['BootstrapActions'] = [] - if (list_instance_fleets_result is not None and - list_instance_fleets_result.get('InstanceFleets') is not None): - result['Cluster']['InstanceFleets'] = \ + if ( + list_instance_fleets_result is not None + and list_instance_fleets_result.get('InstanceFleets') is not None + ): + result['Cluster']['InstanceFleets'] = ( list_instance_fleets_result.get('InstanceFleets') - if (list_instance_groups_result is not None and - list_instance_groups_result.get('InstanceGroups') is not None): - result['Cluster']['InstanceGroups'] = \ + ) + if ( + list_instance_groups_result is not None + and list_instance_groups_result.get('InstanceGroups') is not None + ): + result['Cluster']['InstanceGroups'] = ( list_instance_groups_result.get('InstanceGroups') - if (list_bootstrap_actions_result is not None and - list_bootstrap_actions_result.get('BootstrapActions') - is not None): - result['Cluster']['BootstrapActions'] = \ + ) + if ( + list_bootstrap_actions_result is not None + and list_bootstrap_actions_result.get('BootstrapActions') + is not None + ): + result['Cluster']['BootstrapActions'] = ( list_bootstrap_actions_result['BootstrapActions'] + ) return result diff --git a/awscli/customizations/emr/emr.py b/awscli/customizations/emr/emr.py index fc42bfcecf39..361d129f4e76 100644 --- a/awscli/customizations/emr/emr.py +++ b/awscli/customizations/emr/emr.py @@ -11,20 +11,20 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import hbase -from awscli.customizations.emr import ssh +from awscli.customizations.emr import hbase, ssh +from awscli.customizations.emr.addinstancegroups import AddInstanceGroups from awscli.customizations.emr.addsteps import AddSteps +from awscli.customizations.emr.addtags import modify_tags_argument +from awscli.customizations.emr.command import override_args_required_option from awscli.customizations.emr.createcluster import CreateCluster -from awscli.customizations.emr.addinstancegroups import AddInstanceGroups from awscli.customizations.emr.createdefaultroles import CreateDefaultRoles -from awscli.customizations.emr.modifyclusterattributes import ModifyClusterAttr -from awscli.customizations.emr.installapplications import InstallApplications from awscli.customizations.emr.describecluster import DescribeCluster +from awscli.customizations.emr.installapplications import InstallApplications +from awscli.customizations.emr.listclusters import ( + modify_list_clusters_argument, +) +from awscli.customizations.emr.modifyclusterattributes import ModifyClusterAttr from awscli.customizations.emr.terminateclusters import TerminateClusters -from awscli.customizations.emr.addtags import modify_tags_argument -from awscli.customizations.emr.listclusters \ - import modify_list_clusters_argument -from awscli.customizations.emr.command import override_args_required_option def emr_initialize(cli): @@ -35,9 +35,12 @@ def emr_initialize(cli): cli.register('building-argument-table.emr.add-tags', modify_tags_argument) cli.register( 'building-argument-table.emr.list-clusters', - modify_list_clusters_argument) - cli.register('before-building-argument-table-parser.emr.*', - override_args_required_option) + modify_list_clusters_argument, + ) + cli.register( + 'before-building-argument-table-parser.emr.*', + override_args_required_option, + ) def register_commands(command_table, session, **kwargs): @@ -52,12 +55,12 @@ def register_commands(command_table, session, **kwargs): command_table['install-applications'] = InstallApplications(session) command_table['create-cluster'] = CreateCluster(session) command_table['add-steps'] = AddSteps(session) - command_table['restore-from-hbase-backup'] = \ - hbase.RestoreFromHBaseBackup(session) + command_table['restore-from-hbase-backup'] = hbase.RestoreFromHBaseBackup( + session + ) command_table['create-hbase-backup'] = hbase.CreateHBaseBackup(session) command_table['schedule-hbase-backup'] = hbase.ScheduleHBaseBackup(session) - command_table['disable-hbase-backups'] = \ - hbase.DisableHBaseBackups(session) + command_table['disable-hbase-backups'] = hbase.DisableHBaseBackups(session) command_table['create-default-roles'] = CreateDefaultRoles(session) command_table['add-instance-groups'] = AddInstanceGroups(session) command_table['ssh'] = ssh.SSH(session) diff --git a/awscli/customizations/emr/emrfsutils.py b/awscli/customizations/emr/emrfsutils.py index ab6bdadc85d0..2c040e0f9b8f 100644 --- a/awscli/customizations/emr/emrfsutils.py +++ b/awscli/customizations/emr/emrfsutils.py @@ -11,12 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr import constants -from awscli.customizations.emr import emrutils -from awscli.customizations.emr import exceptions +from awscli.customizations.emr import constants, emrutils, exceptions from botocore.compat import OrderedDict - CONSISTENT_OPTIONAL_KEYS = ['RetryCount', 'RetryPeriod'] CSE_KMS_REQUIRED_KEYS = ['KMSKeyId'] CSE_CUSTOM_REQUIRED_KEYS = ['CustomProviderLocation', 'CustomProviderClass'] @@ -40,20 +37,26 @@ def build_bootstrap_action_configs(region, emrfs_args): emrutils.build_bootstrap_action( path=constants.EMRFS_CSE_CUSTOM_S3_GET_BA_PATH, name=constants.S3_GET_BA_NAME, - args=[constants.S3_GET_BA_SRC, - emrfs_args.get('CustomProviderLocation'), - constants.S3_GET_BA_DEST, - constants.EMRFS_CUSTOM_DEST_PATH, - constants.S3_GET_BA_FORCE])) + args=[ + constants.S3_GET_BA_SRC, + emrfs_args.get('CustomProviderLocation'), + constants.S3_GET_BA_DEST, + constants.EMRFS_CUSTOM_DEST_PATH, + constants.S3_GET_BA_FORCE, + ], + ) + ) emrfs_setup_ba_args = _build_ba_args_to_setup_emrfs(emrfs_args) bootstrap_actions.append( emrutils.build_bootstrap_action( path=emrutils.build_s3_link( - relative_path=constants.CONFIG_HADOOP_PATH, - region=region), + relative_path=constants.CONFIG_HADOOP_PATH, region=region + ), name=constants.EMRFS_BA_NAME, - args=emrfs_setup_ba_args)) + args=emrfs_setup_ba_args, + ) + ) return bootstrap_actions @@ -63,74 +66,95 @@ def build_emrfs_confiuration(emrfs_args): emrfs_properties = _build_emrfs_properties(emrfs_args) if _need_to_configure_cse(emrfs_args, 'CUSTOM'): - emrfs_properties[constants.EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY] = \ + emrfs_properties[constants.EMRFS_CSE_CUSTOM_PROVIDER_URI_KEY] = ( emrfs_args.get('CustomProviderLocation') + ) emrfs_configuration = { 'Classification': constants.EMRFS_SITE, - 'Properties': emrfs_properties} + 'Properties': emrfs_properties, + } return emrfs_configuration def _verify_emrfs_args(emrfs_args): # Encryption should have a valid value - if 'Encryption' in emrfs_args \ - and emrfs_args['Encryption'].upper() not in ENCRYPTION_TYPES: + if ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() not in ENCRYPTION_TYPES + ): raise exceptions.UnknownEncryptionTypeError( - encryption=emrfs_args['Encryption']) + encryption=emrfs_args['Encryption'] + ) # Only one of SSE and Encryption should be configured if 'SSE' in emrfs_args and 'Encryption' in emrfs_args: raise exceptions.BothSseAndEncryptionConfiguredError( - sse=emrfs_args['SSE'], encryption=emrfs_args['Encryption']) + sse=emrfs_args['SSE'], encryption=emrfs_args['Encryption'] + ) # CSE should be configured correctly # ProviderType should be present and should have valid value # Given the type, the required parameters should be present - if ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE): + if ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE + ): if 'ProviderType' not in emrfs_args: raise exceptions.MissingParametersError( - object_name=CSE_OPTION_NAME, missing='ProviderType') + object_name=CSE_OPTION_NAME, missing='ProviderType' + ) elif emrfs_args['ProviderType'].upper() not in CSE_PROVIDER_TYPES: raise exceptions.UnknownCseProviderTypeError( - provider_type=emrfs_args['ProviderType']) + provider_type=emrfs_args['ProviderType'] + ) elif emrfs_args['ProviderType'].upper() == 'KMS': - _verify_required_args(emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, - CSE_KMS_OPTION_NAME) + _verify_required_args( + emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, CSE_KMS_OPTION_NAME + ) elif emrfs_args['ProviderType'].upper() == 'CUSTOM': - _verify_required_args(emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, - CSE_CUSTOM_OPTION_NAME) + _verify_required_args( + emrfs_args.keys(), + CSE_CUSTOM_REQUIRED_KEYS, + CSE_CUSTOM_OPTION_NAME, + ) # No child attributes should be present if the parent feature is not # configured if 'Consistent' not in emrfs_args: - _verify_child_args(emrfs_args.keys(), CONSISTENT_OPTIONAL_KEYS, - CONSISTENT_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CONSISTENT_OPTIONAL_KEYS, CONSISTENT_OPTION_NAME + ) if not _need_to_configure_cse(emrfs_args, 'KMS'): - _verify_child_args(emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, - CSE_KMS_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CSE_KMS_REQUIRED_KEYS, CSE_KMS_OPTION_NAME + ) if not _need_to_configure_cse(emrfs_args, 'CUSTOM'): - _verify_child_args(emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, - CSE_CUSTOM_OPTION_NAME) + _verify_child_args( + emrfs_args.keys(), CSE_CUSTOM_REQUIRED_KEYS, CSE_CUSTOM_OPTION_NAME + ) def _verify_required_args(actual_keys, required_keys, object_name): if any(x not in actual_keys for x in required_keys): missing_keys = list( - sorted(set(required_keys).difference(set(actual_keys)))) + sorted(set(required_keys).difference(set(actual_keys))) + ) raise exceptions.MissingParametersError( - object_name=object_name, missing=emrutils.join(missing_keys)) + object_name=object_name, missing=emrutils.join(missing_keys) + ) def _verify_child_args(actual_keys, child_keys, parent_object_name): if any(x in actual_keys for x in child_keys): invalid_keys = list( - sorted(set(child_keys).intersection(set(actual_keys)))) + sorted(set(child_keys).intersection(set(actual_keys))) + ) raise exceptions.InvalidEmrFsArgumentsError( invalid=emrutils.join(invalid_keys), - parent_object_name=parent_object_name) + parent_object_name=parent_object_name, + ) def _build_ba_args_to_setup_emrfs(emrfs_args): @@ -170,29 +194,35 @@ def _need_to_configure_consistent_view(emrfs_args): def _need_to_configure_sse(emrfs_args): - return 'SSE' in emrfs_args \ - or ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_SERVER_SIDE) + return 'SSE' in emrfs_args or ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_SERVER_SIDE + ) def _need_to_configure_cse(emrfs_args, cse_type): - return ('Encryption' in emrfs_args and - emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE and - 'ProviderType' in emrfs_args and - emrfs_args['ProviderType'].upper() == cse_type) + return ( + 'Encryption' in emrfs_args + and emrfs_args['Encryption'].upper() == constants.EMRFS_CLIENT_SIDE + and 'ProviderType' in emrfs_args + and emrfs_args['ProviderType'].upper() == cse_type + ) def _update_properties_for_consistent_view(emrfs_properties, emrfs_args): - emrfs_properties[constants.EMRFS_CONSISTENT_KEY] = \ - str(emrfs_args['Consistent']).lower() + emrfs_properties[constants.EMRFS_CONSISTENT_KEY] = str( + emrfs_args['Consistent'] + ).lower() if 'RetryCount' in emrfs_args: - emrfs_properties[constants.EMRFS_RETRY_COUNT_KEY] = \ - str(emrfs_args['RetryCount']) + emrfs_properties[constants.EMRFS_RETRY_COUNT_KEY] = str( + emrfs_args['RetryCount'] + ) if 'RetryPeriod' in emrfs_args: - emrfs_properties[constants.EMRFS_RETRY_PERIOD_KEY] = \ - str(emrfs_args['RetryPeriod']) + emrfs_properties[constants.EMRFS_RETRY_PERIOD_KEY] = str( + emrfs_args['RetryPeriod'] + ) def _update_properties_for_sse(emrfs_properties, emrfs_args): @@ -206,16 +236,17 @@ def _update_properties_for_cse(emrfs_properties, emrfs_args, cse_type): emrfs_properties[constants.EMRFS_CSE_KEY] = 'true' if cse_type == 'KMS': emrfs_properties[ - constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY] = \ - constants.EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME + constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY + ] = constants.EMRFS_CSE_KMS_PROVIDER_FULL_CLASS_NAME - emrfs_properties[constants.EMRFS_CSE_KMS_KEY_ID_KEY] =\ - emrfs_args['KMSKeyId'] + emrfs_properties[constants.EMRFS_CSE_KMS_KEY_ID_KEY] = emrfs_args[ + 'KMSKeyId' + ] elif cse_type == 'CUSTOM': emrfs_properties[ - constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY] = \ - emrfs_args['CustomProviderClass'] + constants.EMRFS_CSE_ENCRYPTION_MATERIALS_PROVIDER_KEY + ] = emrfs_args['CustomProviderClass'] def _update_emrfs_ba_args(ba_args, key_value): diff --git a/awscli/customizations/emr/emrutils.py b/awscli/customizations/emr/emrutils.py index d4fc1991147f..016cabe7bfa0 100644 --- a/awscli/customizations/emr/emrutils.py +++ b/awscli/customizations/emr/emrutils.py @@ -15,13 +15,11 @@ import logging import os - from awscli.clidriver import CLIOperationCaller +from awscli.customizations.emr import constants, exceptions from awscli.customizations.exceptions import ParamValidationError -from awscli.customizations.emr import constants -from awscli.customizations.emr import exceptions -from botocore.exceptions import WaiterError, NoCredentialsError from botocore import xform_name +from botocore.exceptions import NoCredentialsError, WaiterError LOG = logging.getLogger(__name__) @@ -57,11 +55,16 @@ def parse_key_value_string(key_value_string): def apply_boolean_options( - true_option, true_option_name, false_option, false_option_name): + true_option, true_option_name, false_option, false_option_name +): if true_option and false_option: - error_message = \ - 'aws: error: cannot use both ' + true_option_name + \ - ' and ' + false_option_name + ' options together.' + error_message = ( + 'aws: error: cannot use both ' + + true_option_name + + ' and ' + + false_option_name + + ' options together.' + ) raise ParamValidationError(error_message) elif true_option: return True @@ -92,13 +95,14 @@ def apply_params(src_params, src_key, dest_params, dest_key): def build_step( - jar, name='Step', - action_on_failure=constants.DEFAULT_FAILURE_ACTION, - args=None, - main_class=None, - properties=None): - check_required_field( - structure='HadoopJarStep', name='Jar', value=jar) + jar, + name='Step', + action_on_failure=constants.DEFAULT_FAILURE_ACTION, + args=None, + main_class=None, + properties=None, +): + check_required_field(structure='HadoopJarStep', name='Jar', value=jar) step = {} apply_dict(step, 'Name', name) @@ -113,13 +117,11 @@ def build_step( return step -def build_bootstrap_action( - path, - name='Bootstrap Action', - args=None): +def build_bootstrap_action(path, name='Bootstrap Action', args=None): if path is None: raise exceptions.MissingParametersError( - object_name='ScriptBootstrapActionConfig', missing='Path') + object_name='ScriptBootstrapActionConfig', missing='Path' + ) ba_config = {} apply_dict(ba_config, 'Name', name) script_config = {} @@ -140,13 +142,15 @@ def get_script_runner(region='us-east-1'): if region is None: region = 'us-east-1' return build_s3_link( - relative_path=constants.SCRIPT_RUNNER_PATH, region=region) + relative_path=constants.SCRIPT_RUNNER_PATH, region=region + ) def check_required_field(structure, name, value): if not value: raise exceptions.MissingParametersError( - object_name=structure, missing=name) + object_name=structure, missing=name + ) def check_empty_string_list(name, value): @@ -154,8 +158,14 @@ def check_empty_string_list(name, value): raise exceptions.EmptyListError(param=name) -def call(session, operation_name, parameters, region_name=None, - endpoint_url=None, verify=None): +def call( + session, + operation_name, + parameters, + region_name=None, + endpoint_url=None, + verify=None, +): # We could get an error from get_endpoint() about not having # a region configured. Before this happens we want to check # for credentials so we can give a good error message. @@ -163,8 +173,11 @@ def call(session, operation_name, parameters, region_name=None, raise NoCredentialsError() client = session.create_client( - 'emr', region_name=region_name, endpoint_url=endpoint_url, - verify=verify) + 'emr', + region_name=region_name, + endpoint_url=endpoint_url, + verify=verify, + ) LOG.debug('Calling ' + str(operation_name)) return getattr(client, operation_name)(**parameters) @@ -182,7 +195,8 @@ def get_client(session, parsed_globals): 'emr', region_name=get_region(session, parsed_globals), endpoint_url=parsed_globals.endpoint_url, - verify=parsed_globals.verify_ssl) + verify=parsed_globals.verify_ssl, + ) def get_cluster_state(session, parsed_globals, cluster_id): @@ -210,12 +224,13 @@ def which(program): return None -def call_and_display_response(session, operation_name, parameters, - parsed_globals): +def call_and_display_response( + session, operation_name, parameters, parsed_globals +): cli_operation_caller = CLIOperationCaller(session) cli_operation_caller.invoke( - 'emr', operation_name, - parameters, parsed_globals) + 'emr', operation_name, parameters, parsed_globals + ) def display_response(session, operation_name, result, parsed_globals): @@ -223,7 +238,8 @@ def display_response(session, operation_name, result, parsed_globals): # Calling a private method. Should be changed after the functionality # is moved outside CliOperationCaller. cli_operation_caller._display_response( - operation_name, result, parsed_globals) + operation_name, result, parsed_globals + ) def get_region(session, parsed_globals): @@ -245,8 +261,9 @@ def join(values, separator=',', lastSeparator='and'): return values[0] else: separator = '%s ' % separator - return ' '.join([separator.join(values[:-1]), - lastSeparator, values[-1]]) + return ' '.join( + [separator.join(values[:-1]), lastSeparator, values[-1]] + ) def split_to_key_value(string): @@ -256,21 +273,24 @@ def split_to_key_value(string): return string.split('=', 1) -def get_cluster(cluster_id, session, region, - endpoint_url, verify_ssl): - describe_cluster_params = {'ClusterId': cluster_id} - describe_cluster_response = call( - session, 'describe_cluster', describe_cluster_params, - region, endpoint_url, - verify_ssl) +def get_cluster(cluster_id, session, region, endpoint_url, verify_ssl): + describe_cluster_params = {'ClusterId': cluster_id} + describe_cluster_response = call( + session, + 'describe_cluster', + describe_cluster_params, + region, + endpoint_url, + verify_ssl, + ) - if describe_cluster_response is not None: - return describe_cluster_response.get('Cluster') + if describe_cluster_response is not None: + return describe_cluster_response.get('Cluster') -def get_release_label(cluster_id, session, region, - endpoint_url, verify_ssl): - cluster = get_cluster(cluster_id, session, region, - endpoint_url, verify_ssl) - if cluster is not None: - return cluster.get('ReleaseLabel') +def get_release_label(cluster_id, session, region, endpoint_url, verify_ssl): + cluster = get_cluster( + cluster_id, session, region, endpoint_url, verify_ssl + ) + if cluster is not None: + return cluster.get('ReleaseLabel') diff --git a/awscli/customizations/emr/exceptions.py b/awscli/customizations/emr/exceptions.py index ec4a9ad95bf3..d1559886a498 100644 --- a/awscli/customizations/emr/exceptions.py +++ b/awscli/customizations/emr/exceptions.py @@ -14,12 +14,12 @@ class EmrError(Exception): - """ The base exception class for Emr exceptions. :ivar msg: The descriptive message associated with the error. """ + fmt = 'An unspecified error occurred' def __init__(self, **kwargs): @@ -29,7 +29,6 @@ def __init__(self, **kwargs): class MissingParametersError(EmrError, ParamValidationError): - """ One or more required parameters were not supplied. @@ -40,168 +39,193 @@ class MissingParametersError(EmrError, ParamValidationError): other than str(). :ivar missing: The names of the missing parameters. """ - fmt = ('aws: error: The following required parameters are missing for ' - '{object_name}: {missing}.') + fmt = ( + 'aws: error: The following required parameters are missing for ' + '{object_name}: {missing}.' + ) -class EmptyListError(EmrError, ParamValidationError): +class EmptyListError(EmrError, ParamValidationError): """ The provided list is empty. :ivar param: The provided list parameter """ - fmt = ('aws: error: The prameter {param} cannot be an empty list.') + fmt = 'aws: error: The prameter {param} cannot be an empty list.' -class MissingRequiredInstanceGroupsError(EmrError, ParamValidationError): +class MissingRequiredInstanceGroupsError(EmrError, ParamValidationError): """ In create-cluster command, none of --instance-group, --instance-count nor --instance-type were not supplied. """ - fmt = ('aws: error: Must specify either --instance-groups or ' - '--instance-type with --instance-count(optional) to ' - 'configure instance groups.') + fmt = ( + 'aws: error: Must specify either --instance-groups or ' + '--instance-type with --instance-count(optional) to ' + 'configure instance groups.' + ) -class InstanceGroupsValidationError(EmrError, ParamValidationError): +class InstanceGroupsValidationError(EmrError, ParamValidationError): """ --instance-type and --instance-count are shortcut option for --instance-groups and they cannot be specified together with --instance-groups """ - fmt = ('aws: error: You may not specify --instance-type ' - 'or --instance-count with --instance-groups, ' - 'because --instance-type and --instance-count are ' - 'shortcut options for --instance-groups.') + fmt = ( + 'aws: error: You may not specify --instance-type ' + 'or --instance-count with --instance-groups, ' + 'because --instance-type and --instance-count are ' + 'shortcut options for --instance-groups.' + ) -class InvalidAmiVersionError(EmrError, ParamValidationError): +class InvalidAmiVersionError(EmrError, ParamValidationError): """ The supplied ami-version is invalid. :ivar ami_version: The provided ami_version. """ - fmt = ('aws: error: The supplied AMI version "{ami_version}" is invalid.' - ' Please see AMI Versions Supported in Amazon EMR in ' - 'Amazon Elastic MapReduce Developer Guide: ' - 'http://docs.aws.amazon.com/ElasticMapReduce/' - 'latest/DeveloperGuide/ami-versions-supported.html') + fmt = ( + 'aws: error: The supplied AMI version "{ami_version}" is invalid.' + ' Please see AMI Versions Supported in Amazon EMR in ' + 'Amazon Elastic MapReduce Developer Guide: ' + 'http://docs.aws.amazon.com/ElasticMapReduce/' + 'latest/DeveloperGuide/ami-versions-supported.html' + ) -class MissingBooleanOptionsError(EmrError, ParamValidationError): +class MissingBooleanOptionsError(EmrError, ParamValidationError): """ Required boolean options are not supplied. :ivar true_option :ivar false_option """ - fmt = ('aws: error: Must specify one of the following boolean options: ' - '{true_option}|{false_option}.') + fmt = ( + 'aws: error: Must specify one of the following boolean options: ' + '{true_option}|{false_option}.' + ) -class UnknownStepTypeError(EmrError, ParamValidationError): +class UnknownStepTypeError(EmrError, ParamValidationError): """ The provided step type is not supported. :ivar step_type: the step_type provided. """ - fmt = ('aws: error: The step type {step_type} is not supported.') + fmt = 'aws: error: The step type {step_type} is not supported.' -class UnknownIamEndpointError(EmrError): +class UnknownIamEndpointError(EmrError): """ The IAM endpoint is not known for the specified region. :ivar region: The region specified. """ - fmt = 'IAM endpoint not known for region: {region}.' +\ - ' Specify the iam-endpoint using the --iam-endpoint option.' + fmt = ( + 'IAM endpoint not known for region: {region}.' + + ' Specify the iam-endpoint using the --iam-endpoint option.' + ) -class ResolveServicePrincipalError(EmrError): +class ResolveServicePrincipalError(EmrError): """ The service principal could not be resolved from the region or the endpoint. """ - fmt = 'Could not resolve the service principal from' +\ - ' the region or the endpoint.' + fmt = ( + 'Could not resolve the service principal from' + + ' the region or the endpoint.' + ) -class LogUriError(EmrError, ParamValidationError): +class LogUriError(EmrError, ParamValidationError): """ The LogUri is not specified and debugging is enabled for the cluster. """ - fmt = ('aws: error: LogUri not specified. You must specify a logUri ' - 'if you enable debugging when creating a cluster.') + fmt = ( + 'aws: error: LogUri not specified. You must specify a logUri ' + 'if you enable debugging when creating a cluster.' + ) -class MasterDNSNotAvailableError(EmrError): +class MasterDNSNotAvailableError(EmrError): """ Cannot get dns of master node on the cluster. """ - fmt = 'Cannot get DNS of master node on the cluster. '\ - ' Please try again after some time.' + fmt = ( + 'Cannot get DNS of master node on the cluster. ' + ' Please try again after some time.' + ) -class WrongPuttyKeyError(EmrError, ParamValidationError): +class WrongPuttyKeyError(EmrError, ParamValidationError): """ A wrong key has been used with a compatible program. """ - fmt = 'Key file file format is incorrect. Putty expects a ppk file. '\ - 'Please refer to documentation at http://docs.aws.amazon.com/'\ - 'ElasticMapReduce/latest/DeveloperGuide/EMR_SetUp_SSH.html. ' + fmt = ( + 'Key file file format is incorrect. Putty expects a ppk file. ' + 'Please refer to documentation at http://docs.aws.amazon.com/' + 'ElasticMapReduce/latest/DeveloperGuide/EMR_SetUp_SSH.html. ' + ) -class SSHNotFoundError(EmrError): +class SSHNotFoundError(EmrError): """ SSH or Putty not available. """ - fmt = 'SSH or Putty not available. Please refer to the documentation '\ - 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/'\ - 'DeveloperGuide/EMR_SetUp_SSH.html.' + fmt = ( + 'SSH or Putty not available. Please refer to the documentation ' + 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/' + 'DeveloperGuide/EMR_SetUp_SSH.html.' + ) -class SCPNotFoundError(EmrError): +class SCPNotFoundError(EmrError): """ SCP or Pscp not available. """ - fmt = 'SCP or Pscp not available. Please refer to the documentation '\ - 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/'\ - 'DeveloperGuide/EMR_SetUp_SSH.html. ' + fmt = ( + 'SCP or Pscp not available. Please refer to the documentation ' + 'at http://docs.aws.amazon.com/ElasticMapReduce/latest/' + 'DeveloperGuide/EMR_SetUp_SSH.html. ' + ) -class SubnetAndAzValidationError(EmrError, ParamValidationError): +class SubnetAndAzValidationError(EmrError, ParamValidationError): """ SubnetId and AvailabilityZone are mutual exclusive in --ec2-attributes. """ - fmt = ('aws: error: You may not specify both a SubnetId and an Availabili' - 'tyZone (placement) because ec2SubnetId implies a placement.') + fmt = ( + 'aws: error: You may not specify both a SubnetId and an Availabili' + 'tyZone (placement) because ec2SubnetId implies a placement.' + ) -class RequiredOptionsError(EmrError, ParamValidationError): +class RequiredOptionsError(EmrError, ParamValidationError): """ Either of option1 or option2 is required. """ - fmt = ('aws: error: Either {option1} or {option2} is required.') + fmt = 'aws: error: Either {option1} or {option2} is required.' class MutualExclusiveOptionError(EmrError, ParamValidationError): - """ The provided option1 and option2 are mutually exclusive. @@ -211,15 +235,18 @@ class MutualExclusiveOptionError(EmrError, ParamValidationError): """ def __init__(self, **kwargs): - msg = ('aws: error: You cannot specify both ' + - kwargs.get('option1', '') + ' and ' + - kwargs.get('option2', '') + ' options together.' + - kwargs.get('message', '')) + msg = ( + 'aws: error: You cannot specify both ' + + kwargs.get('option1', '') + + ' and ' + + kwargs.get('option2', '') + + ' options together.' + + kwargs.get('message', '') + ) Exception.__init__(self, msg) class MissingApplicationsError(EmrError, ParamValidationError): - """ The application required for a step is not installed when creating a cluster. @@ -228,50 +255,56 @@ class MissingApplicationsError(EmrError, ParamValidationError): """ def __init__(self, **kwargs): - msg = ('aws: error: Some of the steps require the following' - ' applications to be installed: ' + - ', '.join(kwargs['applications']) + '. Please install the' - ' applications using --applications.') + msg = ( + 'aws: error: Some of the steps require the following' + ' applications to be installed: ' + + ', '.join(kwargs['applications']) + + '. Please install the' + ' applications using --applications.' + ) Exception.__init__(self, msg) class ClusterTerminatedError(EmrError): - """ The cluster is terminating or has already terminated. """ + fmt = 'aws: error: Cluster terminating or already terminated.' class ClusterStatesFilterValidationError(EmrError, ParamValidationError): - """ In the list-clusters command, customers can specify only one of the following states filters: --cluster-states, --active, --terminated, --failed """ - fmt = ('aws: error: You can specify only one of the cluster state ' - 'filters: --cluster-states, --active, --terminated, --failed.') + fmt = ( + 'aws: error: You can specify only one of the cluster state ' + 'filters: --cluster-states, --active, --terminated, --failed.' + ) -class MissingClusterAttributesError(EmrError, ParamValidationError): +class MissingClusterAttributesError(EmrError, ParamValidationError): """ In the modify-cluster-attributes command, customers need to provide at least one of the following cluster attributes: --visible-to-all-users, --no-visible-to-all-users, --termination-protected, --no-termination-protected, --auto-terminate and --no-auto-terminate """ - fmt = ('aws: error: Must specify one of the following boolean options: ' - '--visible-to-all-users|--no-visible-to-all-users, ' - '--termination-protected|--no-termination-protected, ' - '--auto-terminate|--no-auto-terminate, ' - '--unhealthy-node-replacement|--no-unhealthy-node-replacement.') + fmt = ( + 'aws: error: Must specify one of the following boolean options: ' + '--visible-to-all-users|--no-visible-to-all-users, ' + '--termination-protected|--no-termination-protected, ' + '--auto-terminate|--no-auto-terminate, ' + '--unhealthy-node-replacement|--no-unhealthy-node-replacement.' + ) -class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): +class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): """ The provided EMRFS parameters are invalid as parent feature e.g., Consistent View, CSE, SSE is not configured @@ -280,40 +313,46 @@ class InvalidEmrFsArgumentsError(EmrError, ParamValidationError): :ivar parent_object_name: Parent feature name """ - fmt = ('aws: error: {parent_object_name} is not specified. Thus, ' - ' following parameters are invalid: {invalid}') + fmt = ( + 'aws: error: {parent_object_name} is not specified. Thus, ' + ' following parameters are invalid: {invalid}' + ) class DuplicateEmrFsConfigurationError(EmrError, ParamValidationError): - - fmt = ('aws: error: EMRFS should be configured either using ' - '--configuration or --emrfs but not both') + fmt = ( + 'aws: error: EMRFS should be configured either using ' + '--configuration or --emrfs but not both' + ) class UnknownCseProviderTypeError(EmrError, ParamValidationError): - """ The provided EMRFS client-side encryption provider type is not supported. :ivar provider_type: the provider_type provided. """ - fmt = ('aws: error: The client side encryption type "{provider_type}" is ' - 'not supported. You must specify either KMS or Custom') + fmt = ( + 'aws: error: The client side encryption type "{provider_type}" is ' + 'not supported. You must specify either KMS or Custom' + ) -class UnknownEncryptionTypeError(EmrError, ParamValidationError): +class UnknownEncryptionTypeError(EmrError, ParamValidationError): """ The provided encryption type is not supported. :ivar provider_type: the provider_type provided. """ - fmt = ('aws: error: The encryption type "{encryption}" is invalid. ' - 'You must specify either ServerSide or ClientSide') + fmt = ( + 'aws: error: The encryption type "{encryption}" is invalid. ' + 'You must specify either ServerSide or ClientSide' + ) -class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): +class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): """ Only one of SSE or Encryption can be configured. @@ -321,25 +360,30 @@ class BothSseAndEncryptionConfiguredError(EmrError, ParamValidationError): :ivar encryption: Value for encryption """ - fmt = ('aws: error: Both SSE={sse} and Encryption={encryption} are ' - 'configured for --emrfs. You must specify only one of the two.') + fmt = ( + 'aws: error: Both SSE={sse} and Encryption={encryption} are ' + 'configured for --emrfs. You must specify only one of the two.' + ) class InvalidBooleanConfigError(EmrError, ParamValidationError): - - fmt = ("aws: error: {config_value} for {config_key} in the config file is " - "invalid. The value should be either 'True' or 'False'. Use " - "'aws configure set {profile_var_name}.emr.{config_key}Backup type. You can specify 'incremental' or " - "'full'.
"}, - {'name': 'dir', 'required': True, - 'help_text': helptext.HBASE_BACKUP_DIR}, - {'name': 'interval', 'required': True, - 'help_text': 'The time between backups.
'}, - {'name': 'unit', 'required': True, - 'help_text': "The time unit for backup's time-interval. " - "You can specify one of the following values:" - " 'minutes', 'hours', or 'days'.
"}, - {'name': 'start-time', - 'help_text': 'The time of the first backup in ISO format.
' - ' e.g. 2014-04-21T05:26:10Z. Default is now.'}, - {'name': 'consistent', 'action': 'store_true', - 'help_text': 'Performs a consistent backup.' - ' Pauses all write operations to the HBase cluster' - ' during the backup process.
'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'type', + 'required': True, + 'help_text': "Backup type. You can specify 'incremental' or " + "'full'.
", + }, + { + 'name': 'dir', + 'required': True, + 'help_text': helptext.HBASE_BACKUP_DIR, + }, + { + 'name': 'interval', + 'required': True, + 'help_text': 'The time between backups.
', + }, + { + 'name': 'unit', + 'required': True, + 'help_text': "The time unit for backup's time-interval. " + "You can specify one of the following values:" + " 'minutes', 'hours', or 'days'.
", + }, + { + 'name': 'start-time', + 'help_text': 'The time of the first backup in ISO format.
' + ' e.g. 2014-04-21T05:26:10Z. Default is now.', + }, + { + 'name': 'consistent', + 'action': 'store_true', + 'help_text': 'Performs a consistent backup.' + ' Pauses all write operations to the HBase cluster' + ' during the backup process.
', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -88,37 +115,54 @@ def _run_main_command(self, parsed_args, parsed_globals): jar=constants.HBASE_JAR_PATH, name=constants.HBASE_SCHEDULE_BACKUP_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) + args=args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _check_type(self, type): type = type.lower() if type != constants.FULL and type != constants.INCREMENTAL: - raise ParamValidationError('aws: error: invalid type. ' - 'type should be either ' + - constants.FULL + ' or ' + constants.INCREMENTAL + - '.') + raise ParamValidationError( + 'aws: error: invalid type. ' + 'type should be either ' + + constants.FULL + + ' or ' + + constants.INCREMENTAL + + '.' + ) def _check_unit(self, unit): unit = unit.lower() - if (unit != constants.MINUTES and - unit != constants.HOURS and - unit != constants.DAYS): + if ( + unit != constants.MINUTES + and unit != constants.HOURS + and unit != constants.DAYS + ): raise ParamValidationError( 'aws: error: invalid unit. unit should be one of' - ' the following values: ' + constants.MINUTES + - ', ' + constants.HOURS + ' or ' + constants.DAYS + '.' + ' the following values: ' + + constants.MINUTES + + ', ' + + constants.HOURS + + ' or ' + + constants.DAYS + + '.' ) def _build_hbase_schedule_backup_args(self, parsed_args): - args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP, - constants.TRUE, constants.HBASE_BACKUP_DIR, parsed_args.dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_SCHEDULED_BACKUP, + constants.TRUE, + constants.HBASE_BACKUP_DIR, + parsed_args.dir, + ] type = parsed_args.type.lower() unit = parsed_args.unit.lower() @@ -151,17 +195,28 @@ def _build_hbase_schedule_backup_args(self, parsed_args): class CreateHBaseBackup(Command): NAME = 'create-hbase-backup' - DESCRIPTION = ('Creates a HBase backup in S3. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Creates a HBase backup in S3. ' + + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'dir', 'required': True, - 'help_text': helptext.HBASE_BACKUP_DIR}, - {'name': 'consistent', 'action': 'store_true', - 'help_text': 'Performs a consistent backup. Pauses all write' - ' operations to the HBase cluster during the backup' - ' process.
'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'dir', + 'required': True, + 'help_text': helptext.HBASE_BACKUP_DIR, + }, + { + 'name': 'consistent', + 'action': 'store_true', + 'help_text': 'Performs a consistent backup. Pauses all write' + ' operations to the HBase cluster during the backup' + ' process.
', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -172,19 +227,23 @@ def _run_main_command(self, parsed_args, parsed_globals): jar=constants.HBASE_JAR_PATH, name=constants.HBASE_BACKUP_STEP_NAME, action_on_failure=constants.CANCEL_AND_WAIT, - args=args) + args=args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _build_hbase_backup_args(self, parsed_args): - args = [constants.HBASE_MAIN, - constants.HBASE_BACKUP, - constants.HBASE_BACKUP_DIR, parsed_args.dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_BACKUP, + constants.HBASE_BACKUP_DIR, + parsed_args.dir, + ] if parsed_args.consistent is True: args.append(constants.HBASE_BACKUP_CONSISTENT) @@ -194,15 +253,26 @@ def _build_hbase_backup_args(self, parsed_args): class DisableHBaseBackups(Command): NAME = 'disable-hbase-backups' - DESCRIPTION = ('Add a step to disable automated HBase backups. ' + - helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS) + DESCRIPTION = ( + 'Add a step to disable automated HBase backups. ' + + helptext.AVAILABLE_ONLY_FOR_AMI_VERSIONS + ) ARG_TABLE = [ - {'name': 'cluster-id', 'required': True, - 'help_text': helptext.CLUSTER_ID}, - {'name': 'full', 'action': 'store_true', - 'help_text': 'Disables full backup.'}, - {'name': 'incremental', 'action': 'store_true', - 'help_text': 'Disables incremental backup.'} + { + 'name': 'cluster-id', + 'required': True, + 'help_text': helptext.CLUSTER_ID, + }, + { + 'name': 'full', + 'action': 'store_true', + 'help_text': 'Disables full backup.', + }, + { + 'name': 'incremental', + 'action': 'store_true', + 'help_text': 'Disables incremental backup.', + }, ] def _run_main_command(self, parsed_args, parsed_globals): @@ -214,22 +284,30 @@ def _run_main_command(self, parsed_args, parsed_globals): constants.HBASE_JAR_PATH, constants.HBASE_SCHEDULE_BACKUP_STEP_NAME, constants.CANCEL_AND_WAIT, - args) + args, + ) steps.append(step_config) - parameters = {'JobFlowId': parsed_args.cluster_id, - 'Steps': steps} - emrutils.call_and_display_response(self._session, 'AddJobFlowSteps', - parameters, parsed_globals) + parameters = {'JobFlowId': parsed_args.cluster_id, 'Steps': steps} + emrutils.call_and_display_response( + self._session, 'AddJobFlowSteps', parameters, parsed_globals + ) return 0 def _build_hbase_disable_backups_args(self, parsed_args): - args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP, - constants.FALSE] + args = [ + constants.HBASE_MAIN, + constants.HBASE_SCHEDULED_BACKUP, + constants.FALSE, + ] if parsed_args.full is False and parsed_args.incremental is False: - error_message = 'Should specify at least one of --' +\ - constants.FULL + ' and --' +\ - constants.INCREMENTAL + '.' + error_message = ( + 'Should specify at least one of --' + + constants.FULL + + ' and --' + + constants.INCREMENTAL + + '.' + ) raise ParamValidationError(error_message) if parsed_args.full is True: args.append(constants.HBASE_DISABLE_FULL_BACKUP) diff --git a/awscli/customizations/emr/hbaseutils.py b/awscli/customizations/emr/hbaseutils.py index 0376dda6a4eb..e3050717fe0f 100644 --- a/awscli/customizations/emr/hbaseutils.py +++ b/awscli/customizations/emr/hbaseutils.py @@ -15,9 +15,12 @@ def build_hbase_restore_from_backup_args(dir, backup_version=None): - args = [constants.HBASE_MAIN, - constants.HBASE_RESTORE, - constants.HBASE_BACKUP_DIR, dir] + args = [ + constants.HBASE_MAIN, + constants.HBASE_RESTORE, + constants.HBASE_BACKUP_DIR, + dir, + ] if backup_version is not None: args.append(constants.HBASE_BACKUP_VERSION_FOR_RESTORE) diff --git a/awscli/customizations/emr/helptext.py b/awscli/customizations/emr/helptext.py index cf8f587bfa7a..fed90bb8266b 100755 --- a/awscli/customizations/emr/helptext.py +++ b/awscli/customizations/emr/helptext.py @@ -11,8 +11,10 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from awscli.customizations.emr.createdefaultroles import EMR_ROLE_NAME -from awscli.customizations.emr.createdefaultroles import EC2_ROLE_NAME +from awscli.customizations.emr.createdefaultroles import ( + EC2_ROLE_NAME, + EMR_ROLE_NAME, +) TERMINATE_CLUSTERS = ( 'Shuts down one or more clusters, each specified by cluster ID. ' @@ -27,28 +29,33 @@ 'The command is asynchronous. Depending on the ' 'configuration of the cluster, it may take from 5 to 20 minutes for the ' 'cluster to terminate completely and release allocated resources such as ' - 'Amazon EC2 instances.') + 'Amazon EC2 instances.' +) CLUSTER_ID = ( 'A unique string that identifies a cluster. The '
'create-cluster
command returns this identifier. You can '
- 'use the list-clusters
command to get cluster IDs.
list-clusters
command to get cluster IDs.'
+)
HBASE_BACKUP_DIR = (
'The Amazon S3 location of the Hbase backup. Example: '
's3://mybucket/mybackup
, where mybucket
is the '
'specified Amazon S3 bucket and mybackup is the specified backup '
'location. The path argument must begin with s3://, which '
- 'refers to an Amazon S3 bucket.
The backup version to restore from. If not specified, the latest backup ' - 'in the specified location is used.
') + 'in the specified location is used.' +) # create-cluster options help text CREATE_CLUSTER_DESCRIPTION = ( - 'Creates an Amazon EMR cluster with the specified configurations.') + 'Creates an Amazon EMR cluster with the specified configurations.' +) DESCRIBE_CLUSTER_DESCRIPTION = ( 'Provides cluster-level details including status, hardware ' @@ -58,22 +65,24 @@ 'elasticmapreduce:ListBootstrapActions, ' 'elasticmapreduce:ListInstanceFleets, ' 'elasticmapreduce:DescribeCluster, ' - 'and elasticmapreduce:ListInstanceGroups.') + 'and elasticmapreduce:ListInstanceGroups.' +) -CLUSTER_NAME = ( - 'The name of the cluster. If not provided, the default is "Development Cluster".
') +CLUSTER_NAME = 'The name of the cluster. If not provided, the default is "Development Cluster".
' LOG_URI = ( 'Specifies the location in Amazon S3 to which log files ' 'are periodically written. If a value is not provided, ' 'logs files are not written to Amazon S3 from the master node ' - 'and are lost if the master node terminates.
') + 'and are lost if the master node terminates.' +) LOG_ENCRYPTION_KMS_KEY_ID = ( 'Specifies the KMS Id utilized for log encryption. If a value is ' 'not provided, log files will be encrypted by default encryption method ' 'AES-256. This attribute is only available with EMR version 5.30.0 and later, ' - 'excluding EMR 6.0.0.
') + 'excluding EMR 6.0.0.' +) SERVICE_ROLE = ( 'Specifies an IAM service role, which Amazon EMR requires to call other AWS services '
@@ -82,28 +91,32 @@
'To specify the default service role, as well as the default instance '
'profile, use the --use-default-roles
parameter. '
'If the role and instance profile do not already exist, use the '
- 'aws emr create-default-roles
command to create them.
aws emr create-default-roles
command to create them.'
+)
AUTOSCALING_ROLE = (
'Specify --auto-scaling-role EMR_AutoScaling_DefaultRole
'
' if an automatic scaling policy is specified for an instance group'
' using the --instance-groups
parameter. This default'
' IAM role allows the automatic scaling feature'
- ' to launch and terminate Amazon EC2 instances during scaling operations.
Specifies that the cluster should use the default' ' service role (EMR_DefaultRole) and instance profile (EMR_EC2_DefaultRole)' ' for permissions to access other AWS services.
' 'Make sure that the role and instance profile exist first. To create them,'
- ' use the create-default-roles
command.
create-default-roles
command.'
+)
AMI_VERSION = (
'Applies only to Amazon EMR release versions earlier than 4.0. Use'
' --release-label
for 4.0 and later. Specifies'
' the version of Amazon Linux Amazon Machine Image (AMI)'
' to use when launching Amazon EC2 instances in the cluster.'
- ' For example, --ami-version 3.1.0
.')
+ ' For example, --ami-version 3.1.0
.'
+)
RELEASE_LABEL = (
'
Specifies the Amazon EMR release version, which determines' @@ -115,12 +128,14 @@ '
https://docs.aws.amazon.com/emr/latest/ReleaseGuide
' 'Use --release-label
only for Amazon EMR release version 4.0'
' and later. Use --ami-version
for earlier versions.'
- ' You cannot specify both a release label and AMI version.
Specifies a particular Amazon Linux release for all nodes in a cluster' - ' launch request. If a release is not specified, EMR uses the latest validated' - ' Amazon Linux release for cluster launch.
') + ' launch request. If a release is not specified, EMR uses the latest validated' + ' Amazon Linux release for cluster launch.' +) CONFIGURATIONS = ( 'Specifies a JSON file that contains configuration classifications,'
@@ -134,7 +149,8 @@
' file for an application, such as yarn-site
for YARN. For a list of'
' available configuration classifications and example JSON, see'
' the following topic in the Amazon EMR Release Guide:
https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
') + 'https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html
' +) INSTANCE_GROUPS = ( 'Specifies the number and type of Amazon EC2 instances' @@ -163,7 +179,8 @@ '
[EbsConfiguration]
- Specifies additional Amazon EBS storage volumes attached'
' to EC2 instances using an inline JSON structure.[AutoScalingPolicy]
- Specifies an automatic scaling policy for the'
- ' instance group using an inline JSON structure.Applies only to Amazon EMR release version 5.0 and later. Specifies' @@ -195,7 +212,8 @@ '
InstanceTypeConfigs
- Specify up to five EC2 instance types to'
' use in the instance fleet, including details such as Spot price and Amazon EBS configuration.'
' When you use an On-Demand or Spot Instance allocation strategy,'
- ' you can specify up to 30 instance types per instance fleet.Shortcut parameter as an alternative to --instance-groups
.'
@@ -204,18 +222,21 @@
' the cluster consists of a single master node running on the EC2 instance type'
' specified. When used together with --instance-count
,'
' one instance is used for the master node, and the remainder'
- ' are used for the core node type.
Shortcut parameter as an alternative to --instance-groups
'
' when used together with --instance-type
. Specifies the'
' number of Amazon EC2 instances to create for a cluster.'
' One instance is used for the master node, and the remainder'
- ' are used for the core node type.
Specifies additional information during cluster creation. To set development mode when starting your EMR cluster,'
- ' set this parameter to {"clusterType":"development"}
.
{"clusterType":"development"}
.'
+)
EC2_ATTRIBUTES = (
'Configures cluster and Amazon EC2 instance configurations. Accepts'
@@ -227,10 +248,10 @@
' For example, us-west-1b
. AvailabilityZone
is used for uniform instance groups,'
' while AvailabilityZones
(plural) is used for instance fleets.'
'
AvailabilityZones
- Applies to clusters that use the instance fleet configuration.'
- ' When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances'
+ ' When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances'
' in the optimal Availability Zone. AvailabilityZone
is used for uniform instance groups,'
' while AvailabilityZones
(plural) is used for instance fleets.SubnetId
- Applies to clusters that use the uniform instance group configuration.'
+ 'SubnetId
- Applies to clusters that use the uniform instance group configuration.'
' Specify the VPC subnet in which to create the cluster. SubnetId
is used for uniform instance groups,'
' while SubnetIds
(plural) is used for instance fleets.SubnetIds
- Applies to clusters that use the instance fleet configuration.'
@@ -249,16 +270,19 @@
'AdditionalMasterSecurityGroups
- A list of additional Amazon EC2'
' security group IDs for the master node.AdditionalSlaveSecurityGroups
- A list of additional Amazon EC2'
- ' security group IDs for the slave nodes.Specifies whether the cluster should terminate after' - ' completing all the steps. Auto termination is off by default.
') + ' completing all the steps. Auto termination is off by default.' +) TERMINATION_PROTECTED = ( 'Specifies whether to lock the cluster to prevent the' ' Amazon EC2 instances from being terminated by API call,' - ' user intervention, or an error.
') + ' user intervention, or an error.' +) SCALE_DOWN_BEHAVIOR = ( 'Specifies the way that individual Amazon EC2 instances terminate' @@ -276,7 +300,8 @@ ' of the AWS account associated with the cluster. If a user' ' has the proper policy permissions set, they can also manage the cluster.
' 'Visibility is on by default. The --no-visible-to-all-users
option'
- ' is no longer supported. To restrict cluster visibility, use an IAM policy.
Specifies that the debugging tool is enabled for the cluster,'
@@ -284,7 +309,8 @@
' Turning debugging on requires that you specify --log-uri
'
' because log files must be stored in Amazon S3 so that'
' Amazon EMR can index them for viewing in the console.'
- ' Effective January 23, 2023, Amazon EMR will discontinue the debugging tool for all versions.
A list of tags to associate with a cluster, which apply to' @@ -294,7 +320,8 @@ ' with a maximum of 256 characters.
' 'You can specify tags in key=value
format or you can add a'
' tag without a value using only the key name, for example key
.'
- ' Use a space to separate multiple tags.
Specifies a list of bootstrap actions to run on each EC2 instance when'
@@ -317,7 +344,8 @@
' to pass to the bootstrap action script. Arguments can be'
' either a list of values ( Specifies the applications to install on the cluster.'
@@ -329,7 +357,8 @@
' some applications take optional arguments for configuration.'
' Arguments should either be a comma-separated list of values'
' (Args=arg1,arg2,arg3
)'
' or a list of key-value pairs, as well as optional values,'
- ' enclosed in square brackets (Args=[arg1,arg2=arg2value,arg3]).')
+ ' enclosed in square brackets (
Args=[arg1,arg2=arg2value,arg3]).'
+)
APPLICATIONS = (
'
Args=arg1,arg2,arg3
) or a bracket-enclosed list of values'
- ' and key-value pairs (Args=[arg1,arg2=arg3,arg4]
).Args=[arg1,arg2=arg3,arg4]
).
Specifies EMRFS configuration options, such as consistent view' @@ -340,13 +369,15 @@ ' to configure EMRFS, and use security configurations' ' to configure encryption for EMRFS data in Amazon S3 instead.' ' For more information, see the following topic in the Amazon EMR Management Guide:
' - 'https://docs.aws.amazon.com/emr/latest/ManagementGuide/emrfs-configure-consistent-view.html
') + 'https://docs.aws.amazon.com/emr/latest/ManagementGuide/emrfs-configure-consistent-view.html
' +) RESTORE_FROM_HBASE = ( 'Applies only when using Amazon EMR release versions earlier than 4.0.'
' Launches a new HBase cluster and populates it with'
' data from a previous backup of an HBase cluster. HBase'
- ' must be installed using the --applications
option.
--applications
option.'
+)
STEPS = (
'Specifies a list of steps to be executed by the cluster. Steps run'
@@ -356,27 +387,32 @@
' or by specifying an inline JSON structure. Args
supplied with steps'
' should be a comma-separated list of values (Args=arg1,arg2,arg3
) or'
' a bracket-enclosed list of values and key-value'
- ' pairs (Args=[arg1,arg2=value,arg4
).
Args=[arg1,arg2=value,arg4
).'
+)
INSTALL_APPLICATIONS = (
'The applications to be installed.'
' Takes the following parameters: '
- 'Name
and Args
.
Name
and Args
.'
+)
EBS_ROOT_VOLUME_SIZE = (
'This option is available only with Amazon EMR version 4.x and later. Specifies the size,' ' in GiB, of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.
') + ' that is used for each EC2 instance in the cluster. ' +) EBS_ROOT_VOLUME_IOPS = ( 'This option is available only with Amazon EMR version 6.15.0 and later. Specifies the IOPS,' ' of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.
') + ' that is used for each EC2 instance in the cluster. ' +) EBS_ROOT_VOLUME_THROUGHPUT = ( 'This option is available only with Amazon EMR version 6.15.0 and later. Specifies the throughput,' ' in MiB/s, of the EBS root device volume of the Amazon Linux AMI' - ' that is used for each EC2 instance in the cluster.
') + ' that is used for each EC2 instance in the cluster. ' +) SECURITY_CONFIG = ( @@ -386,7 +422,8 @@ ' the following topic in the Amazon EMR Management Guide:' 'https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-encryption-enable-security-configuration.html
' 'Use list-security-configurations
to get a list of available'
- ' security configurations in the active account.
Applies only to Amazon EMR release version 5.7.0 and later.' @@ -396,7 +433,8 @@ ' can also be used instead of bootstrap actions to customize' ' cluster node configurations. For more information, see' ' the following topic in the Amazon EMR Management Guide:
' - 'https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html
') + 'https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-custom-ami.html
' +) REPO_UPGRADE_ON_BOOT = ( 'Applies only when a --custom-ami-id
is'
@@ -405,24 +443,26 @@
' before other services start. You can set this parameter'
' using --rep-upgrade-on-boot NONE
to'
' disable these updates. CAUTION: This creates additional'
- ' security risks.
Specifies required cluster attributes for Kerberos when Kerberos authentication'
- ' is enabled in the specified --security-configuration
.'
- ' Takes the following arguments:
Realm
- Specifies the name of the Kerberos'
- ' realm to which all nodes in a cluster belong. For example,'
- ' Realm=EC2.INTERNAL
.KdcAdminPassword
- Specifies the password used within the cluster'
- ' for the kadmin service, which maintains Kerberos principals, password'
- ' policies, and keytabs for the cluster.CrossRealmTrustPrincipalPassword
- Required when establishing a cross-realm trust'
- ' with a KDC in a different realm. This is the cross-realm principal password,'
- ' which must be identical across realms.ADDomainJoinUser
- Required when establishing trust with an Active Directory'
- ' domain. This is the User logon name of an AD account with sufficient privileges to join resources to the domain.ADDomainJoinPassword
- The AD password for ADDomainJoinUser
.Specifies required cluster attributes for Kerberos when Kerberos authentication'
+ ' is enabled in the specified --security-configuration
.'
+ ' Takes the following arguments:
Realm
- Specifies the name of the Kerberos'
+ ' realm to which all nodes in a cluster belong. For example,'
+ ' Realm=EC2.INTERNAL
.KdcAdminPassword
- Specifies the password used within the cluster'
+ ' for the kadmin service, which maintains Kerberos principals, password'
+ ' policies, and keytabs for the cluster.CrossRealmTrustPrincipalPassword
- Required when establishing a cross-realm trust'
+ ' with a KDC in a different realm. This is the cross-realm principal password,'
+ ' which must be identical across realms.ADDomainJoinUser
- Required when establishing trust with an Active Directory'
+ ' domain. This is the User logon name of an AD account with sufficient privileges to join resources to the domain.ADDomainJoinPassword
- The AD password for ADDomainJoinUser
.WAITING
TERMINATING
TERMINATED
TERMINATED_WITH_ERRORS
TERMINATED_WITH_ERRORS
Shortcut options for --cluster-states. The'
@@ -446,41 +487,50 @@
' are STARTING
,BOOTSTRAPPING
,'
' RUNNING
, WAITING
, or TERMINATING
. '
'
--terminated
- list only clusters that are TERMINATED
. --failed
- list only clusters that are TERMINATED_WITH_ERRORS
.--failed
- list only clusters that are TERMINATED_WITH_ERRORS
.List only those clusters created after the date and time'
' specified in the format yyyy-mm-ddThh:mm:ss. For example,'
- ' --created-after 2017-07-04T00:01:30.
--created-after 2017-07-04T00:01:30.'
+)
LIST_CLUSTERS_CREATED_BEFORE = (
'List only those clusters created before the date and time'
' specified in the format yyyy-mm-ddThh:mm:ss. For example,'
- ' --created-before 2017-07-04T00:01:30.
')
+ ' --created-before 2017-07-04T00:01:30.'
+)
EMR_MANAGED_MASTER_SECURITY_GROUP = (
'The identifier of the Amazon EC2 security group '
- 'for the master node.
')
+ 'for the master node.'
+)
EMR_MANAGED_SLAVE_SECURITY_GROUP = (
'The identifier of the Amazon EC2 security group '
- 'for the slave nodes.
')
+ 'for the slave nodes.'
+)
SERVICE_ACCESS_SECURITY_GROUP = (
'The identifier of the Amazon EC2 security group '
- 'for Amazon EMR to access clusters in VPC private subnets.
')
+ 'for Amazon EMR to access clusters in VPC private subnets.'
+)
ADDITIONAL_MASTER_SECURITY_GROUPS = (
' A list of additional Amazon EC2 security group IDs for '
- 'the master node
')
+ 'the master node'
+)
ADDITIONAL_SLAVE_SECURITY_GROUPS = (
'A list of additional Amazon EC2 security group IDs for '
- 'the slave nodes.
')
+ 'the slave nodes.'
+)
AVAILABLE_ONLY_FOR_AMI_VERSIONS = (
'This command is only available when using Amazon EMR versions'
- 'earlier than 4.0.')
+ 'earlier than 4.0.'
+)
STEP_CONCURRENCY_LEVEL = (
'This command specifies the step concurrency level of the cluster.'
@@ -498,10 +548,10 @@
)
PLACEMENT_GROUP_CONFIGS = (
- 'Placement group configuration for an Amazon EMR '
- 'cluster. The configuration specifies the EC2 placement group '
- 'strategy associated with each EMR Instance Role.
'
- 'Currently, we support placement group only for MASTER
'
+ '
Placement group configuration for an Amazon EMR '
+ 'cluster. The configuration specifies the EC2 placement group '
+ 'strategy associated with each EMR Instance Role.
'
+ 'Currently, we support placement group only for MASTER
'
'role with SPREAD
strategy by default. You can opt-in by '
'passing --placement-group-configs InstanceRole=MASTER
'
'during cluster creation.
'
diff --git a/awscli/customizations/emr/installapplications.py b/awscli/customizations/emr/installapplications.py
index e73e57b9d22e..6212a6b14c99 100644
--- a/awscli/customizations/emr/installapplications.py
+++ b/awscli/customizations/emr/installapplications.py
@@ -12,41 +12,52 @@
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import applicationutils
-from awscli.customizations.emr import argumentschema
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import helptext
+from awscli.customizations.emr import (
+ applicationutils,
+ argumentschema,
+ constants,
+ emrutils,
+ helptext,
+)
from awscli.customizations.emr.command import Command
from awscli.customizations.exceptions import ParamValidationError
class InstallApplications(Command):
NAME = 'install-applications'
- DESCRIPTION = ('Installs applications on a running cluster. Currently only'
- ' Hive and Pig can be installed using this command, and'
- ' this command is only supported by AMI versions'
- ' (3.x and 2.x).')
+ DESCRIPTION = (
+ 'Installs applications on a running cluster. Currently only'
+ ' Hive and Pig can be installed using this command, and'
+ ' this command is only supported by AMI versions'
+ ' (3.x and 2.x).'
+ )
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': helptext.CLUSTER_ID},
- {'name': 'applications', 'required': True,
- 'help_text': helptext.INSTALL_APPLICATIONS,
- 'schema': argumentschema.APPLICATIONS_SCHEMA},
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': helptext.CLUSTER_ID,
+ },
+ {
+ 'name': 'applications',
+ 'required': True,
+ 'help_text': helptext.INSTALL_APPLICATIONS,
+ 'schema': argumentschema.APPLICATIONS_SCHEMA,
+ },
]
# Applications supported by the install-applications command.
supported_apps = ['HIVE', 'PIG']
def _run_main_command(self, parsed_args, parsed_globals):
-
parameters = {'JobFlowId': parsed_args.cluster_id}
self._check_for_supported_apps(parsed_args.applications)
parameters['Steps'] = applicationutils.build_applications(
- self.region, parsed_args.applications)[2]
+ self.region, parsed_args.applications
+ )[2]
- emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
- parameters, parsed_globals)
+ emrutils.call_and_display_response(
+ self._session, 'AddJobFlowSteps', parameters, parsed_globals
+ )
return 0
def _check_for_supported_apps(self, parsed_applications):
@@ -58,10 +69,12 @@ def _check_for_supported_apps(self, parsed_applications):
raise ParamValidationError(
"aws: error: " + app_config['Name'] + " cannot be"
" installed on a running cluster. 'Name' should be one"
- " of the following: " +
- ', '.join(self.supported_apps))
+ " of the following: " + ', '.join(self.supported_apps)
+ )
else:
raise ParamValidationError(
- "aws: error: Unknown application: " + app_config['Name'] +
- ". 'Name' should be one of the following: " +
- ', '.join(constants.APPLICATIONS))
+ "aws: error: Unknown application: "
+ + app_config['Name']
+ + ". 'Name' should be one of the following: "
+ + ', '.join(constants.APPLICATIONS)
+ )
diff --git a/awscli/customizations/emr/instancefleetsutils.py b/awscli/customizations/emr/instancefleetsutils.py
index 02d8f0b7a826..4c60599487d3 100644
--- a/awscli/customizations/emr/instancefleetsutils.py
+++ b/awscli/customizations/emr/instancefleetsutils.py
@@ -11,8 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import exceptions
+from awscli.customizations.emr import constants, exceptions
def validate_and_build_instance_fleets(parsed_instance_fleets):
@@ -31,41 +30,71 @@ def validate_and_build_instance_fleets(parsed_instance_fleets):
instance_fleet_config['Name'] = instance_fleet['Name']
else:
instance_fleet_config['Name'] = instance_fleet['InstanceFleetType']
- instance_fleet_config['InstanceFleetType'] = instance_fleet['InstanceFleetType']
+ instance_fleet_config['InstanceFleetType'] = instance_fleet[
+ 'InstanceFleetType'
+ ]
if 'TargetOnDemandCapacity' in keys:
- instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet['TargetOnDemandCapacity']
+ instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet[
+ 'TargetOnDemandCapacity'
+ ]
if 'TargetSpotCapacity' in keys:
- instance_fleet_config['TargetSpotCapacity'] = instance_fleet['TargetSpotCapacity']
+ instance_fleet_config['TargetSpotCapacity'] = instance_fleet[
+ 'TargetSpotCapacity'
+ ]
if 'InstanceTypeConfigs' in keys:
- instance_fleet_config['InstanceTypeConfigs'] = instance_fleet['InstanceTypeConfigs']
+ instance_fleet_config['InstanceTypeConfigs'] = instance_fleet[
+ 'InstanceTypeConfigs'
+ ]
if 'LaunchSpecifications' in keys:
- instanceFleetProvisioningSpecifications = instance_fleet['LaunchSpecifications']
+ instanceFleetProvisioningSpecifications = instance_fleet[
+ 'LaunchSpecifications'
+ ]
instance_fleet_config['LaunchSpecifications'] = {}
if 'SpotSpecification' in instanceFleetProvisioningSpecifications:
- instance_fleet_config['LaunchSpecifications']['SpotSpecification'] = \
- instanceFleetProvisioningSpecifications['SpotSpecification']
+ instance_fleet_config['LaunchSpecifications'][
+ 'SpotSpecification'
+ ] = instanceFleetProvisioningSpecifications[
+ 'SpotSpecification'
+ ]
- if 'OnDemandSpecification' in instanceFleetProvisioningSpecifications:
- instance_fleet_config['LaunchSpecifications']['OnDemandSpecification'] = \
- instanceFleetProvisioningSpecifications['OnDemandSpecification']
+ if (
+ 'OnDemandSpecification'
+ in instanceFleetProvisioningSpecifications
+ ):
+ instance_fleet_config['LaunchSpecifications'][
+ 'OnDemandSpecification'
+ ] = instanceFleetProvisioningSpecifications[
+ 'OnDemandSpecification'
+ ]
if 'ResizeSpecifications' in keys:
- instanceFleetResizeSpecifications = instance_fleet['ResizeSpecifications']
+ instanceFleetResizeSpecifications = instance_fleet[
+ 'ResizeSpecifications'
+ ]
instance_fleet_config['ResizeSpecifications'] = {}
if 'SpotResizeSpecification' in instanceFleetResizeSpecifications:
- instance_fleet_config['ResizeSpecifications']['SpotResizeSpecification'] = \
- instanceFleetResizeSpecifications['SpotResizeSpecification']
+ instance_fleet_config['ResizeSpecifications'][
+ 'SpotResizeSpecification'
+ ] = instanceFleetResizeSpecifications[
+ 'SpotResizeSpecification'
+ ]
+
+ if (
+ 'OnDemandResizeSpecification'
+ in instanceFleetResizeSpecifications
+ ):
+ instance_fleet_config['ResizeSpecifications'][
+ 'OnDemandResizeSpecification'
+ ] = instanceFleetResizeSpecifications[
+ 'OnDemandResizeSpecification'
+ ]
- if 'OnDemandResizeSpecification' in instanceFleetResizeSpecifications:
- instance_fleet_config['ResizeSpecifications']['OnDemandResizeSpecification'] = \
- instanceFleetResizeSpecifications['OnDemandResizeSpecification']
-
if 'Context' in keys:
instance_fleet_config['Context'] = instance_fleet['Context']
diff --git a/awscli/customizations/emr/instancegroupsutils.py b/awscli/customizations/emr/instancegroupsutils.py
index 258032fef502..470b5eb50bc5 100644
--- a/awscli/customizations/emr/instancegroupsutils.py
+++ b/awscli/customizations/emr/instancegroupsutils.py
@@ -11,8 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import exceptions
+from awscli.customizations.emr import constants, exceptions
def build_instance_groups(parsed_instance_groups):
@@ -44,7 +43,9 @@ def build_instance_groups(parsed_instance_groups):
ig_config['EbsConfiguration'] = instance_group['EbsConfiguration']
if 'AutoScalingPolicy' in keys:
- ig_config['AutoScalingPolicy'] = instance_group['AutoScalingPolicy']
+ ig_config['AutoScalingPolicy'] = instance_group[
+ 'AutoScalingPolicy'
+ ]
if 'Configurations' in keys:
ig_config['Configurations'] = instance_group['Configurations']
@@ -56,8 +57,7 @@ def build_instance_groups(parsed_instance_groups):
return instance_groups
-def _build_instance_group(
- instance_type, instance_count, instance_group_type):
+def _build_instance_group(instance_type, instance_count, instance_group_type):
ig_config = {}
ig_config['InstanceType'] = instance_type
ig_config['InstanceCount'] = instance_count
@@ -68,13 +68,14 @@ def _build_instance_group(
def validate_and_build_instance_groups(
- instance_groups, instance_type, instance_count):
- if (instance_groups is None and instance_type is None):
+ instance_groups, instance_type, instance_count
+):
+ if instance_groups is None and instance_type is None:
raise exceptions.MissingRequiredInstanceGroupsError
- if (instance_groups is not None and
- (instance_type is not None or
- instance_count is not None)):
+ if instance_groups is not None and (
+ instance_type is not None or instance_count is not None
+ ):
raise exceptions.InstanceGroupsValidationError
if instance_groups is not None:
@@ -84,13 +85,15 @@ def validate_and_build_instance_groups(
master_ig = _build_instance_group(
instance_type=instance_type,
instance_count=1,
- instance_group_type="MASTER")
+ instance_group_type="MASTER",
+ )
instance_groups.append(master_ig)
if instance_count is not None and int(instance_count) > 1:
core_ig = _build_instance_group(
instance_type=instance_type,
instance_count=int(instance_count) - 1,
- instance_group_type="CORE")
+ instance_group_type="CORE",
+ )
instance_groups.append(core_ig)
return instance_groups
diff --git a/awscli/customizations/emr/listclusters.py b/awscli/customizations/emr/listclusters.py
index 04b69f3f57ae..cec17e9c78e7 100644
--- a/awscli/customizations/emr/listclusters.py
+++ b/awscli/customizations/emr/listclusters.py
@@ -13,41 +13,46 @@
from awscli.arguments import CustomArgument
-from awscli.customizations.emr import helptext
-from awscli.customizations.emr import exceptions
-from awscli.customizations.emr import constants
+from awscli.customizations.emr import constants, exceptions, helptext
def modify_list_clusters_argument(argument_table, **kwargs):
- argument_table['cluster-states'] = \
- ClusterStatesArgument(
- name='cluster-states',
- help_text=helptext.LIST_CLUSTERS_CLUSTER_STATES,
- nargs='+')
- argument_table['active'] = \
- ActiveStateArgument(
- name='active', help_text=helptext.LIST_CLUSTERS_STATE_FILTERS,
- action='store_true', group_name='states_filter')
- argument_table['terminated'] = \
- TerminatedStateArgument(
- name='terminated',
- action='store_true', group_name='states_filter')
- argument_table['failed'] = \
- FailedStateArgument(
- name='failed', action='store_true', group_name='states_filter')
+ argument_table['cluster-states'] = ClusterStatesArgument(
+ name='cluster-states',
+ help_text=helptext.LIST_CLUSTERS_CLUSTER_STATES,
+ nargs='+',
+ )
+ argument_table['active'] = ActiveStateArgument(
+ name='active',
+ help_text=helptext.LIST_CLUSTERS_STATE_FILTERS,
+ action='store_true',
+ group_name='states_filter',
+ )
+ argument_table['terminated'] = TerminatedStateArgument(
+ name='terminated', action='store_true', group_name='states_filter'
+ )
+ argument_table['failed'] = FailedStateArgument(
+ name='failed', action='store_true', group_name='states_filter'
+ )
argument_table['created-before'] = CreatedBefore(
- name='created-before', help_text=helptext.LIST_CLUSTERS_CREATED_BEFORE,
- cli_type_name='timestamp')
+ name='created-before',
+ help_text=helptext.LIST_CLUSTERS_CREATED_BEFORE,
+ cli_type_name='timestamp',
+ )
argument_table['created-after'] = CreatedAfter(
- name='created-after', help_text=helptext.LIST_CLUSTERS_CREATED_AFTER,
- cli_type_name='timestamp')
+ name='created-after',
+ help_text=helptext.LIST_CLUSTERS_CREATED_AFTER,
+ cli_type_name='timestamp',
+ )
class ClusterStatesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is not None:
- if (parameters.get('ClusterStates') is not None and
- len(parameters.get('ClusterStates')) > 0):
+ if (
+ parameters.get('ClusterStates') is not None
+ and len(parameters.get('ClusterStates')) > 0
+ ):
raise exceptions.ClusterStatesFilterValidationError()
parameters['ClusterStates'] = value
@@ -55,8 +60,10 @@ def add_to_params(self, parameters, value):
class ActiveStateArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is True:
- if (parameters.get('ClusterStates') is not None and
- len(parameters.get('ClusterStates')) > 0):
+ if (
+ parameters.get('ClusterStates') is not None
+ and len(parameters.get('ClusterStates')) > 0
+ ):
raise exceptions.ClusterStatesFilterValidationError()
parameters['ClusterStates'] = constants.LIST_CLUSTERS_ACTIVE_STATES
@@ -64,18 +71,23 @@ def add_to_params(self, parameters, value):
class TerminatedStateArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is True:
- if (parameters.get('ClusterStates') is not None and
- len(parameters.get('ClusterStates')) > 0):
+ if (
+ parameters.get('ClusterStates') is not None
+ and len(parameters.get('ClusterStates')) > 0
+ ):
raise exceptions.ClusterStatesFilterValidationError()
- parameters['ClusterStates'] = \
+ parameters['ClusterStates'] = (
constants.LIST_CLUSTERS_TERMINATED_STATES
+ )
class FailedStateArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is True:
- if (parameters.get('ClusterStates') is not None and
- len(parameters.get('ClusterStates')) > 0):
+ if (
+ parameters.get('ClusterStates') is not None
+ and len(parameters.get('ClusterStates')) > 0
+ ):
raise exceptions.ClusterStatesFilterValidationError()
parameters['ClusterStates'] = constants.LIST_CLUSTERS_FAILED_STATES
diff --git a/awscli/customizations/emr/modifyclusterattributes.py b/awscli/customizations/emr/modifyclusterattributes.py
index 888dce8489d7..c5e6035c5a64 100644
--- a/awscli/customizations/emr/modifyclusterattributes.py
+++ b/awscli/customizations/emr/modifyclusterattributes.py
@@ -11,103 +11,176 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import exceptions
-from awscli.customizations.emr import helptext
+from awscli.customizations.emr import emrutils, exceptions, helptext
from awscli.customizations.emr.command import Command
class ModifyClusterAttr(Command):
NAME = 'modify-cluster-attributes'
- DESCRIPTION = ("Modifies the cluster attributes 'visible-to-all-users', "
- " 'termination-protected' and 'unhealthy-node-replacement'.")
+ DESCRIPTION = (
+ "Modifies the cluster attributes 'visible-to-all-users', "
+ " 'termination-protected' and 'unhealthy-node-replacement'."
+ )
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': helptext.CLUSTER_ID},
- {'name': 'visible-to-all-users', 'required': False, 'action':
- 'store_true', 'group_name': 'visible',
- 'help_text': helptext.VISIBILITY},
- {'name': 'no-visible-to-all-users', 'required': False, 'action':
- 'store_true', 'group_name': 'visible',
- 'help_text': helptext.VISIBILITY},
- {'name': 'termination-protected', 'required': False, 'action':
- 'store_true', 'group_name': 'terminate',
- 'help_text': 'Set termination protection on or off'},
- {'name': 'no-termination-protected', 'required': False, 'action':
- 'store_true', 'group_name': 'terminate',
- 'help_text': 'Set termination protection on or off'},
- {'name': 'auto-terminate', 'required': False, 'action':
- 'store_true', 'group_name': 'auto_terminate',
- 'help_text': 'Set cluster auto terminate after completing all the steps on or off'},
- {'name': 'no-auto-terminate', 'required': False, 'action':
- 'store_true', 'group_name': 'auto_terminate',
- 'help_text': 'Set cluster auto terminate after completing all the steps on or off'},
- {'name': 'unhealthy-node-replacement', 'required': False, 'action':
- 'store_true', 'group_name': 'UnhealthyReplacement',
- 'help_text': 'Set Unhealthy Node Replacement on or off'},
- {'name': 'no-unhealthy-node-replacement', 'required': False, 'action':
- 'store_true', 'group_name': 'UnhealthyReplacement',
- 'help_text': 'Set Unhealthy Node Replacement on or off'},
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': helptext.CLUSTER_ID,
+ },
+ {
+ 'name': 'visible-to-all-users',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'visible',
+ 'help_text': helptext.VISIBILITY,
+ },
+ {
+ 'name': 'no-visible-to-all-users',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'visible',
+ 'help_text': helptext.VISIBILITY,
+ },
+ {
+ 'name': 'termination-protected',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'terminate',
+ 'help_text': 'Set termination protection on or off',
+ },
+ {
+ 'name': 'no-termination-protected',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'terminate',
+ 'help_text': 'Set termination protection on or off',
+ },
+ {
+ 'name': 'auto-terminate',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'auto_terminate',
+ 'help_text': 'Set cluster auto terminate after completing all the steps on or off',
+ },
+ {
+ 'name': 'no-auto-terminate',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'auto_terminate',
+ 'help_text': 'Set cluster auto terminate after completing all the steps on or off',
+ },
+ {
+ 'name': 'unhealthy-node-replacement',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'UnhealthyReplacement',
+ 'help_text': 'Set Unhealthy Node Replacement on or off',
+ },
+ {
+ 'name': 'no-unhealthy-node-replacement',
+ 'required': False,
+ 'action': 'store_true',
+ 'group_name': 'UnhealthyReplacement',
+ 'help_text': 'Set Unhealthy Node Replacement on or off',
+ },
]
def _run_main_command(self, args, parsed_globals):
-
- if (args.visible_to_all_users and args.no_visible_to_all_users):
+ if args.visible_to_all_users and args.no_visible_to_all_users:
raise exceptions.MutualExclusiveOptionError(
option1='--visible-to-all-users',
- option2='--no-visible-to-all-users')
- if (args.termination_protected and args.no_termination_protected):
+ option2='--no-visible-to-all-users',
+ )
+ if args.termination_protected and args.no_termination_protected:
raise exceptions.MutualExclusiveOptionError(
option1='--termination-protected',
- option2='--no-termination-protected')
- if (args.auto_terminate and args.no_auto_terminate):
+ option2='--no-termination-protected',
+ )
+ if args.auto_terminate and args.no_auto_terminate:
raise exceptions.MutualExclusiveOptionError(
- option1='--auto-terminate',
- option2='--no-auto-terminate')
- if (args.unhealthy_node_replacement and args.no_unhealthy_node_replacement):
+ option1='--auto-terminate', option2='--no-auto-terminate'
+ )
+ if (
+ args.unhealthy_node_replacement
+ and args.no_unhealthy_node_replacement
+ ):
raise exceptions.MutualExclusiveOptionError(
option1='--unhealthy-node-replacement',
- option2='--no-unhealthy-node-replacement')
- if not(args.termination_protected or args.no_termination_protected or
- args.visible_to_all_users or args.no_visible_to_all_users or
- args.auto_terminate or args.no_auto_terminate or
- args.unhealthy_node_replacement or args.no_unhealthy_node_replacement):
+ option2='--no-unhealthy-node-replacement',
+ )
+ if not (
+ args.termination_protected
+ or args.no_termination_protected
+ or args.visible_to_all_users
+ or args.no_visible_to_all_users
+ or args.auto_terminate
+ or args.no_auto_terminate
+ or args.unhealthy_node_replacement
+ or args.no_unhealthy_node_replacement
+ ):
raise exceptions.MissingClusterAttributesError()
- if (args.visible_to_all_users or args.no_visible_to_all_users):
- visible = (args.visible_to_all_users and
- not args.no_visible_to_all_users)
- parameters = {'JobFlowIds': [args.cluster_id],
- 'VisibleToAllUsers': visible}
- emrutils.call_and_display_response(self._session,
- 'SetVisibleToAllUsers',
- parameters, parsed_globals)
+ if args.visible_to_all_users or args.no_visible_to_all_users:
+ visible = (
+ args.visible_to_all_users and not args.no_visible_to_all_users
+ )
+ parameters = {
+ 'JobFlowIds': [args.cluster_id],
+ 'VisibleToAllUsers': visible,
+ }
+ emrutils.call_and_display_response(
+ self._session,
+ 'SetVisibleToAllUsers',
+ parameters,
+ parsed_globals,
+ )
+
+ if args.termination_protected or args.no_termination_protected:
+ protected = (
+ args.termination_protected
+ and not args.no_termination_protected
+ )
+ parameters = {
+ 'JobFlowIds': [args.cluster_id],
+ 'TerminationProtected': protected,
+ }
+ emrutils.call_and_display_response(
+ self._session,
+ 'SetTerminationProtection',
+ parameters,
+ parsed_globals,
+ )
- if (args.termination_protected or args.no_termination_protected):
- protected = (args.termination_protected and
- not args.no_termination_protected)
- parameters = {'JobFlowIds': [args.cluster_id],
- 'TerminationProtected': protected}
- emrutils.call_and_display_response(self._session,
- 'SetTerminationProtection',
- parameters, parsed_globals)
+ if args.auto_terminate or args.no_auto_terminate:
+ auto_terminate = args.auto_terminate and not args.no_auto_terminate
+ parameters = {
+ 'JobFlowIds': [args.cluster_id],
+ 'KeepJobFlowAliveWhenNoSteps': not auto_terminate,
+ }
+ emrutils.call_and_display_response(
+ self._session,
+ 'SetKeepJobFlowAliveWhenNoSteps',
+ parameters,
+ parsed_globals,
+ )
- if (args.auto_terminate or args.no_auto_terminate):
- auto_terminate = (args.auto_terminate and
- not args.no_auto_terminate)
- parameters = {'JobFlowIds': [args.cluster_id],
- 'KeepJobFlowAliveWhenNoSteps': not auto_terminate}
- emrutils.call_and_display_response(self._session,
- 'SetKeepJobFlowAliveWhenNoSteps',
- parameters, parsed_globals)
-
- if (args.unhealthy_node_replacement or args.no_unhealthy_node_replacement):
- protected = (args.unhealthy_node_replacement and
- not args.no_unhealthy_node_replacement)
- parameters = {'JobFlowIds': [args.cluster_id],
- 'UnhealthyNodeReplacement': protected}
- emrutils.call_and_display_response(self._session,
- 'SetUnhealthyNodeReplacement',
- parameters, parsed_globals)
+ if (
+ args.unhealthy_node_replacement
+ or args.no_unhealthy_node_replacement
+ ):
+ protected = (
+ args.unhealthy_node_replacement
+ and not args.no_unhealthy_node_replacement
+ )
+ parameters = {
+ 'JobFlowIds': [args.cluster_id],
+ 'UnhealthyNodeReplacement': protected,
+ }
+ emrutils.call_and_display_response(
+ self._session,
+ 'SetUnhealthyNodeReplacement',
+ parameters,
+ parsed_globals,
+ )
return 0
diff --git a/awscli/customizations/emr/ssh.py b/awscli/customizations/emr/ssh.py
index ae4cb71ceb17..3f2d3edbfa30 100644
--- a/awscli/customizations/emr/ssh.py
+++ b/awscli/customizations/emr/ssh.py
@@ -15,25 +15,33 @@
import subprocess
import tempfile
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import sshutils
+from awscli.customizations.emr import constants, emrutils, sshutils
from awscli.customizations.emr.command import Command
-KEY_PAIR_FILE_HELP_TEXT = '\nA value for the variable Key Pair File ' \
- 'can be set in the AWS CLI config file using the ' \
+KEY_PAIR_FILE_HELP_TEXT = (
+ '\nA value for the variable Key Pair File '
+ 'can be set in the AWS CLI config file using the '
'"aws configure set emr.key_pair_file " command.\n'
+)
class Socks(Command):
NAME = 'socks'
- DESCRIPTION = ('Create a socks tunnel on port 8157 from your machine '
- 'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
+ DESCRIPTION = (
+ 'Create a socks tunnel on port 8157 from your machine '
+ 'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT
+ )
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': 'Cluster Id of cluster you want to ssh into'},
- {'name': 'key-pair-file', 'required': True,
- 'help_text': 'Private key file to use for login'},
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': 'Cluster Id of cluster you want to ssh into',
+ },
+ {
+ 'name': 'key-pair-file',
+ 'required': True,
+ 'help_text': 'Private key file to use for login',
+ },
]
def _run_main_command(self, parsed_args, parsed_globals):
@@ -41,20 +49,36 @@ def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
- cluster_id=parsed_args.cluster_id)
+ cluster_id=parsed_args.cluster_id,
+ )
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
- if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
- command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
- 'ServerAliveInterval=10', '-ND', '8157', '-i',
- parsed_args.key_pair_file, constants.SSH_USER +
- '@' + master_dns]
+ if emrutils.which('ssh') or emrutils.which('ssh.exe'):
+ command = [
+ 'ssh',
+ '-o',
+ 'StrictHostKeyChecking=no',
+ '-o',
+ 'ServerAliveInterval=10',
+ '-ND',
+ '8157',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns,
+ ]
else:
- command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
- constants.SSH_USER + '@' + master_dns, '-N', '-D',
- '8157']
+ command = [
+ 'putty',
+ '-ssh',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns,
+ '-N',
+ '-D',
+ '8157',
+ ]
print(' '.join(command))
rc = subprocess.call(command)
@@ -66,35 +90,56 @@ def _run_main_command(self, parsed_args, parsed_globals):
class SSH(Command):
NAME = 'ssh'
- DESCRIPTION = ('SSH into master node of the cluster.\n%s' %
- KEY_PAIR_FILE_HELP_TEXT)
+ DESCRIPTION = (
+ 'SSH into master node of the cluster.\n%s' % KEY_PAIR_FILE_HELP_TEXT
+ )
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': 'Cluster Id of cluster you want to ssh into'},
- {'name': 'key-pair-file', 'required': True,
- 'help_text': 'Private key file to use for login'},
- {'name': 'command', 'help_text': 'Command to execute on Master Node'}
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': 'Cluster Id of cluster you want to ssh into',
+ },
+ {
+ 'name': 'key-pair-file',
+ 'required': True,
+ 'help_text': 'Private key file to use for login',
+ },
+ {'name': 'command', 'help_text': 'Command to execute on Master Node'},
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
- cluster_id=parsed_args.cluster_id)
+ cluster_id=parsed_args.cluster_id,
+ )
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
- if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
- command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
- 'ServerAliveInterval=10', '-i',
- parsed_args.key_pair_file, constants.SSH_USER +
- '@' + master_dns, '-t']
+ if emrutils.which('ssh') or emrutils.which('ssh.exe'):
+ command = [
+ 'ssh',
+ '-o',
+ 'StrictHostKeyChecking=no',
+ '-o',
+ 'ServerAliveInterval=10',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns,
+ '-t',
+ ]
if parsed_args.command:
command.append(parsed_args.command)
else:
- command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
- constants.SSH_USER + '@' + master_dns, '-t']
+ command = [
+ 'putty',
+ '-ssh',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns,
+ '-t',
+ ]
if parsed_args.command:
f.write(parsed_args.command)
f.write('\nread -n1 -r -p "Command completed. Press any key."')
@@ -110,33 +155,57 @@ def _run_main_command(self, parsed_args, parsed_globals):
class Put(Command):
NAME = 'put'
- DESCRIPTION = ('Put file onto the master node.\n%s' %
- KEY_PAIR_FILE_HELP_TEXT)
+ DESCRIPTION = (
+ 'Put file onto the master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT
+ )
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': 'Cluster Id of cluster you want to put file onto'},
- {'name': 'key-pair-file', 'required': True,
- 'help_text': 'Private key file to use for login'},
- {'name': 'src', 'required': True,
- 'help_text': 'Source file path on local machine'},
- {'name': 'dest', 'help_text': 'Destination file path on remote host'}
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': 'Cluster Id of cluster you want to put file onto',
+ },
+ {
+ 'name': 'key-pair-file',
+ 'required': True,
+ 'help_text': 'Private key file to use for login',
+ },
+ {
+ 'name': 'src',
+ 'required': True,
+ 'help_text': 'Source file path on local machine',
+ },
+ {'name': 'dest', 'help_text': 'Destination file path on remote host'},
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
- cluster_id=parsed_args.cluster_id)
+ cluster_id=parsed_args.cluster_id,
+ )
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
- if (emrutils.which('scp') or emrutils.which('scp.exe')):
- command = ['scp', '-r', '-o StrictHostKeyChecking=no',
- '-i', parsed_args.key_pair_file, parsed_args.src,
- constants.SSH_USER + '@' + master_dns]
+ if emrutils.which('scp') or emrutils.which('scp.exe'):
+ command = [
+ 'scp',
+ '-r',
+ '-o StrictHostKeyChecking=no',
+ '-i',
+ parsed_args.key_pair_file,
+ parsed_args.src,
+ constants.SSH_USER + '@' + master_dns,
+ ]
else:
- command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
- parsed_args.src, constants.SSH_USER + '@' + master_dns]
+ command = [
+ 'pscp',
+ '-scp',
+ '-r',
+ '-i',
+ parsed_args.key_pair_file,
+ parsed_args.src,
+ constants.SSH_USER + '@' + master_dns,
+ ]
# if the instance is not terminated
if parsed_args.dest:
@@ -150,33 +219,53 @@ def _run_main_command(self, parsed_args, parsed_globals):
class Get(Command):
NAME = 'get'
- DESCRIPTION = ('Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
+ DESCRIPTION = 'Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT
ARG_TABLE = [
- {'name': 'cluster-id', 'required': True,
- 'help_text': 'Cluster Id of cluster you want to get file from'},
- {'name': 'key-pair-file', 'required': True,
- 'help_text': 'Private key file to use for login'},
- {'name': 'src', 'required': True,
- 'help_text': 'Source file path on remote host'},
- {'name': 'dest', 'help_text': 'Destination file path on your machine'}
+ {
+ 'name': 'cluster-id',
+ 'required': True,
+ 'help_text': 'Cluster Id of cluster you want to get file from',
+ },
+ {
+ 'name': 'key-pair-file',
+ 'required': True,
+ 'help_text': 'Private key file to use for login',
+ },
+ {
+ 'name': 'src',
+ 'required': True,
+ 'help_text': 'Source file path on remote host',
+ },
+ {'name': 'dest', 'help_text': 'Destination file path on your machine'},
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
- cluster_id=parsed_args.cluster_id)
+ cluster_id=parsed_args.cluster_id,
+ )
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
- if (emrutils.which('scp') or emrutils.which('scp.exe')):
- command = ['scp', '-r', '-o StrictHostKeyChecking=no', '-i',
- parsed_args.key_pair_file, constants.SSH_USER + '@' +
- master_dns + ':' + parsed_args.src]
+ if emrutils.which('scp') or emrutils.which('scp.exe'):
+ command = [
+ 'scp',
+ '-r',
+ '-o StrictHostKeyChecking=no',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns + ':' + parsed_args.src,
+ ]
else:
- command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
- constants.SSH_USER + '@' + master_dns + ':' +
- parsed_args.src]
+ command = [
+ 'pscp',
+ '-scp',
+ '-r',
+ '-i',
+ parsed_args.key_pair_file,
+ constants.SSH_USER + '@' + master_dns + ':' + parsed_args.src,
+ ]
if parsed_args.dest:
command.append(parsed_args.dest)
diff --git a/awscli/customizations/emr/sshutils.py b/awscli/customizations/emr/sshutils.py
index 443f64b472d0..81d8b3fa9626 100644
--- a/awscli/customizations/emr/sshutils.py
+++ b/awscli/customizations/emr/sshutils.py
@@ -13,9 +13,7 @@
import logging
-from awscli.customizations.emr import exceptions
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import constants
+from awscli.customizations.emr import constants, emrutils, exceptions
from botocore.exceptions import WaiterError
LOG = logging.getLogger(__name__)
@@ -32,7 +30,8 @@ def validate_and_find_master_dns(session, parsed_globals, cluster_id):
Throw MasterDNSNotAvailableError or ClusterTerminatedError.
"""
cluster_state = emrutils.get_cluster_state(
- session, parsed_globals, cluster_id)
+ session, parsed_globals, cluster_id
+ )
if cluster_state in constants.TERMINATED_STATES:
raise exceptions.ClusterTerminatedError
@@ -48,21 +47,27 @@ def validate_and_find_master_dns(session, parsed_globals, cluster_id):
raise exceptions.MasterDNSNotAvailableError
return emrutils.find_master_dns(
- session=session, cluster_id=cluster_id,
- parsed_globals=parsed_globals)
+ session=session, cluster_id=cluster_id, parsed_globals=parsed_globals
+ )
def validate_ssh_with_key_file(key_file):
- if (emrutils.which('putty.exe') or emrutils.which('ssh') or
- emrutils.which('ssh.exe')) is None:
+ if (
+ emrutils.which('putty.exe')
+ or emrutils.which('ssh')
+ or emrutils.which('ssh.exe')
+ ) is None:
raise exceptions.SSHNotFoundError
else:
check_ssh_key_format(key_file)
def validate_scp_with_key_file(key_file):
- if (emrutils.which('pscp.exe') or emrutils.which('scp') or
- emrutils.which('scp.exe')) is None:
+ if (
+ emrutils.which('pscp.exe')
+ or emrutils.which('scp')
+ or emrutils.which('scp.exe')
+ ) is None:
raise exceptions.SCPNotFoundError
else:
check_scp_key_format(key_file)
@@ -70,8 +75,10 @@ def validate_scp_with_key_file(key_file):
def check_scp_key_format(key_file):
# If only pscp is present and the file format is incorrect
- if (emrutils.which('pscp.exe') is not None and
- (emrutils.which('scp.exe') or emrutils.which('scp')) is None):
+ if (
+ emrutils.which('pscp.exe') is not None
+ and (emrutils.which('scp.exe') or emrutils.which('scp')) is None
+ ):
if check_command_key_format(key_file, ['ppk']) is False:
raise exceptions.WrongPuttyKeyError
else:
@@ -80,8 +87,10 @@ def check_scp_key_format(key_file):
def check_ssh_key_format(key_file):
# If only putty is present and the file format is incorrect
- if (emrutils.which('putty.exe') is not None and
- (emrutils.which('ssh.exe') or emrutils.which('ssh')) is None):
+ if (
+ emrutils.which('putty.exe') is not None
+ and (emrutils.which('ssh.exe') or emrutils.which('ssh')) is None
+ ):
if check_command_key_format(key_file, ['ppk']) is False:
raise exceptions.WrongPuttyKeyError
else:
diff --git a/awscli/customizations/emr/steputils.py b/awscli/customizations/emr/steputils.py
index 3a9e6b99bfa2..e6b343b7a91a 100644
--- a/awscli/customizations/emr/steputils.py
+++ b/awscli/customizations/emr/steputils.py
@@ -11,9 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import constants
-from awscli.customizations.emr import exceptions
+from awscli.customizations.emr import constants, emrutils, exceptions
def build_step_config_list(parsed_step_list, region, release_label):
@@ -29,23 +27,24 @@ def build_step_config_list(parsed_step_list, region, release_label):
step_config = build_custom_jar_step(parsed_step=step)
elif step_type == constants.STREAMING:
step_config = build_streaming_step(
- parsed_step=step, release_label=release_label)
+ parsed_step=step, release_label=release_label
+ )
elif step_type == constants.HIVE:
step_config = build_hive_step(
- parsed_step=step, region=region,
- release_label=release_label)
+ parsed_step=step, region=region, release_label=release_label
+ )
elif step_type == constants.PIG:
step_config = build_pig_step(
- parsed_step=step, region=region,
- release_label=release_label)
+ parsed_step=step, region=region, release_label=release_label
+ )
elif step_type == constants.IMPALA:
step_config = build_impala_step(
- parsed_step=step, region=region,
- release_label=release_label)
+ parsed_step=step, region=region, release_label=release_label
+ )
elif step_type == constants.SPARK:
step_config = build_spark_step(
- parsed_step=step, region=region,
- release_label=release_label)
+ parsed_step=step, region=region, release_label=release_label
+ )
else:
raise exceptions.UnknownStepTypeError(step_type=step_type)
@@ -57,14 +56,17 @@ def build_step_config_list(parsed_step_list, region, release_label):
def build_custom_jar_step(parsed_step):
name = _apply_default_value(
arg=parsed_step.get('Name'),
- value=constants.DEFAULT_CUSTOM_JAR_STEP_NAME)
+ value=constants.DEFAULT_CUSTOM_JAR_STEP_NAME,
+ )
action_on_failure = _apply_default_value(
arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
emrutils.check_required_field(
structure=constants.CUSTOM_JAR_STEP_CONFIG,
name='Jar',
- value=parsed_step.get('Jar'))
+ value=parsed_step.get('Jar'),
+ )
return emrutils.build_step(
jar=parsed_step.get('Jar'),
args=parsed_step.get('Args'),
@@ -72,22 +74,25 @@ def build_custom_jar_step(parsed_step):
action_on_failure=action_on_failure,
main_class=parsed_step.get('MainClass'),
properties=emrutils.parse_key_value_string(
- parsed_step.get('Properties')))
+ parsed_step.get('Properties')
+ ),
+ )
def build_streaming_step(parsed_step, release_label):
name = _apply_default_value(
arg=parsed_step.get('Name'),
- value=constants.DEFAULT_STREAMING_STEP_NAME)
+ value=constants.DEFAULT_STREAMING_STEP_NAME,
+ )
action_on_failure = _apply_default_value(
arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
args = parsed_step.get('Args')
emrutils.check_required_field(
- structure=constants.STREAMING_STEP_CONFIG,
- name='Args',
- value=args)
+ structure=constants.STREAMING_STEP_CONFIG, name='Args', value=args
+ )
emrutils.check_empty_string_list(name='Args', value=args)
args_list = []
@@ -100,30 +105,30 @@ def build_streaming_step(parsed_step, release_label):
args_list += args
return emrutils.build_step(
- jar=jar,
- args=args_list,
- name=name,
- action_on_failure=action_on_failure)
+ jar=jar, args=args_list, name=name, action_on_failure=action_on_failure
+ )
def build_hive_step(parsed_step, release_label, region=None):
args = parsed_step.get('Args')
emrutils.check_required_field(
- structure=constants.HIVE_STEP_CONFIG, name='Args', value=args)
+ structure=constants.HIVE_STEP_CONFIG, name='Args', value=args
+ )
emrutils.check_empty_string_list(name='Args', value=args)
name = _apply_default_value(
- arg=parsed_step.get('Name'),
- value=constants.DEFAULT_HIVE_STEP_NAME)
- action_on_failure = \
- _apply_default_value(
- arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ arg=parsed_step.get('Name'), value=constants.DEFAULT_HIVE_STEP_NAME
+ )
+ action_on_failure = _apply_default_value(
+ arg=parsed_step.get('ActionOnFailure'),
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
return emrutils.build_step(
jar=_get_runner_jar(release_label, region),
args=_build_hive_args(args, release_label, region),
name=name,
- action_on_failure=action_on_failure)
+ action_on_failure=action_on_failure,
+ )
def _build_hive_args(args, release_label, region):
@@ -131,8 +136,11 @@ def _build_hive_args(args, release_label, region):
if release_label:
args_list.append(constants.HIVE_SCRIPT_COMMAND)
else:
- args_list.append(emrutils.build_s3_link(
- relative_path=constants.HIVE_SCRIPT_PATH, region=region))
+ args_list.append(
+ emrutils.build_s3_link(
+ relative_path=constants.HIVE_SCRIPT_PATH, region=region
+ )
+ )
args_list.append(constants.RUN_HIVE_SCRIPT)
@@ -149,20 +157,23 @@ def _build_hive_args(args, release_label, region):
def build_pig_step(parsed_step, release_label, region=None):
args = parsed_step.get('Args')
emrutils.check_required_field(
- structure=constants.PIG_STEP_CONFIG, name='Args', value=args)
+ structure=constants.PIG_STEP_CONFIG, name='Args', value=args
+ )
emrutils.check_empty_string_list(name='Args', value=args)
name = _apply_default_value(
- arg=parsed_step.get('Name'),
- value=constants.DEFAULT_PIG_STEP_NAME)
+ arg=parsed_step.get('Name'), value=constants.DEFAULT_PIG_STEP_NAME
+ )
action_on_failure = _apply_default_value(
arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
return emrutils.build_step(
jar=_get_runner_jar(release_label, region),
args=_build_pig_args(args, release_label, region),
name=name,
- action_on_failure=action_on_failure)
+ action_on_failure=action_on_failure,
+ )
def _build_pig_args(args, release_label, region):
@@ -170,8 +181,11 @@ def _build_pig_args(args, release_label, region):
if release_label:
args_list.append(constants.PIG_SCRIPT_COMMAND)
else:
- args_list.append(emrutils.build_s3_link(
- relative_path=constants.PIG_SCRIPT_PATH, region=region))
+ args_list.append(
+ emrutils.build_s3_link(
+ relative_path=constants.PIG_SCRIPT_PATH, region=region
+ )
+ )
args_list.append(constants.RUN_PIG_SCRIPT)
@@ -189,43 +203,51 @@ def build_impala_step(parsed_step, release_label, region=None):
if release_label:
raise exceptions.UnknownStepTypeError(step_type=constants.IMPALA)
name = _apply_default_value(
- arg=parsed_step.get('Name'),
- value=constants.DEFAULT_IMPALA_STEP_NAME)
+ arg=parsed_step.get('Name'), value=constants.DEFAULT_IMPALA_STEP_NAME
+ )
action_on_failure = _apply_default_value(
arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
args_list = [
emrutils.build_s3_link(
- relative_path=constants.IMPALA_INSTALL_PATH, region=region),
- constants.RUN_IMPALA_SCRIPT]
+ relative_path=constants.IMPALA_INSTALL_PATH, region=region
+ ),
+ constants.RUN_IMPALA_SCRIPT,
+ ]
args = parsed_step.get('Args')
emrutils.check_required_field(
- structure=constants.IMPALA_STEP_CONFIG, name='Args', value=args)
+ structure=constants.IMPALA_STEP_CONFIG, name='Args', value=args
+ )
args_list += args
return emrutils.build_step(
jar=emrutils.get_script_runner(region),
args=args_list,
name=name,
- action_on_failure=action_on_failure)
+ action_on_failure=action_on_failure,
+ )
def build_spark_step(parsed_step, release_label, region=None):
name = _apply_default_value(
- arg=parsed_step.get('Name'),
- value=constants.DEFAULT_SPARK_STEP_NAME)
+ arg=parsed_step.get('Name'), value=constants.DEFAULT_SPARK_STEP_NAME
+ )
action_on_failure = _apply_default_value(
arg=parsed_step.get('ActionOnFailure'),
- value=constants.DEFAULT_FAILURE_ACTION)
+ value=constants.DEFAULT_FAILURE_ACTION,
+ )
args = parsed_step.get('Args')
emrutils.check_required_field(
- structure=constants.SPARK_STEP_CONFIG, name='Args', value=args)
+ structure=constants.SPARK_STEP_CONFIG, name='Args', value=args
+ )
return emrutils.build_step(
jar=_get_runner_jar(release_label, region),
args=_build_spark_args(args, release_label, region),
name=name,
- action_on_failure=action_on_failure)
+ action_on_failure=action_on_failure,
+ )
def _build_spark_args(args, release_label, region):
@@ -247,5 +269,8 @@ def _apply_default_value(arg, value):
def _get_runner_jar(release_label, region):
- return constants.COMMAND_RUNNER if release_label \
+ return (
+ constants.COMMAND_RUNNER
+ if release_label
else emrutils.get_script_runner(region)
+ )
diff --git a/awscli/customizations/emr/terminateclusters.py b/awscli/customizations/emr/terminateclusters.py
index b3d7234dc2bb..a4d28b9f14e8 100644
--- a/awscli/customizations/emr/terminateclusters.py
+++ b/awscli/customizations/emr/terminateclusters.py
@@ -12,23 +12,26 @@
# language governing permissions and limitations under the License.
-from awscli.customizations.emr import emrutils
-from awscli.customizations.emr import helptext
+from awscli.customizations.emr import emrutils, helptext
from awscli.customizations.emr.command import Command
class TerminateClusters(Command):
NAME = 'terminate-clusters'
DESCRIPTION = helptext.TERMINATE_CLUSTERS
- ARG_TABLE = [{
- 'name': 'cluster-ids', 'nargs': '+', 'required': True,
- 'help_text': 'A list of clusters to terminate.
',
- 'schema': {'type': 'array', 'items': {'type': 'string'}},
- }]
+ ARG_TABLE = [
+ {
+ 'name': 'cluster-ids',
+ 'nargs': '+',
+ 'required': True,
+ 'help_text': 'A list of clusters to terminate.
',
+ 'schema': {'type': 'array', 'items': {'type': 'string'}},
+ }
+ ]
def _run_main_command(self, parsed_args, parsed_globals):
parameters = {'JobFlowIds': parsed_args.cluster_ids}
- emrutils.call_and_display_response(self._session,
- 'TerminateJobFlows', parameters,
- parsed_globals)
+ emrutils.call_and_display_response(
+ self._session, 'TerminateJobFlows', parameters, parsed_globals
+ )
return 0
diff --git a/awscli/customizations/emrcontainers/__init__.py b/awscli/customizations/emrcontainers/__init__.py
index dc93cf5c1c3d..9fb1e96dc80b 100644
--- a/awscli/customizations/emrcontainers/__init__.py
+++ b/awscli/customizations/emrcontainers/__init__.py
@@ -11,8 +11,9 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.emrcontainers.update_role_trust_policy \
- import UpdateRoleTrustPolicyCommand
+from awscli.customizations.emrcontainers.update_role_trust_policy import (
+ UpdateRoleTrustPolicyCommand,
+)
def initialize(cli):
@@ -28,4 +29,5 @@ def inject_commands(command_table, session, **kwargs):
Used to inject new high level commands into the command list.
"""
command_table['update-role-trust-policy'] = UpdateRoleTrustPolicyCommand(
- session)
+ session
+ )
diff --git a/awscli/customizations/emrcontainers/constants.py b/awscli/customizations/emrcontainers/constants.py
index a8e23e95f941..9be41aff5ea4 100644
--- a/awscli/customizations/emrcontainers/constants.py
+++ b/awscli/customizations/emrcontainers/constants.py
@@ -14,24 +14,28 @@
# Declare all the constants used by Lifecycle in this file
# Lifecycle role names
-TRUST_POLICY_STATEMENT_FORMAT = '{ \
+TRUST_POLICY_STATEMENT_FORMAT = (
+ '{ \
"Effect": "Allow", \
"Principal": { \
- "Federated": "arn:%(AWS_PARTITION)s:iam::%(AWS_ACCOUNT_ID)s:oidc-provider/' \
- '%(OIDC_PROVIDER)s" \
+ "Federated": "arn:%(AWS_PARTITION)s:iam::%(AWS_ACCOUNT_ID)s:oidc-provider/'
+ '%(OIDC_PROVIDER)s" \
}, \
"Action": "sts:AssumeRoleWithWebIdentity", \
"Condition": { \
"StringLike": { \
- "%(OIDC_PROVIDER)s:sub": "system:serviceaccount:%(NAMESPACE)s' \
- ':emr-containers-sa-*-*-%(AWS_ACCOUNT_ID)s-' \
- '%(BASE36_ENCODED_ROLE_NAME)s" \
+ "%(OIDC_PROVIDER)s:sub": "system:serviceaccount:%(NAMESPACE)s'
+ ':emr-containers-sa-*-*-%(AWS_ACCOUNT_ID)s-'
+ '%(BASE36_ENCODED_ROLE_NAME)s" \
} \
} \
}'
+)
-TRUST_POLICY_STATEMENT_ALREADY_EXISTS = "Trust policy statement already " \
- "exists for role %s. No changes " \
- "were made!"
+TRUST_POLICY_STATEMENT_ALREADY_EXISTS = (
+ "Trust policy statement already "
+ "exists for role %s. No changes "
+ "were made!"
+)
TRUST_POLICY_UPDATE_SUCCESSFUL = "Successfully updated trust policy of role %s"
diff --git a/awscli/customizations/emrcontainers/eks.py b/awscli/customizations/emrcontainers/eks.py
index 148785193489..d3d9d80fb28b 100644
--- a/awscli/customizations/emrcontainers/eks.py
+++ b/awscli/customizations/emrcontainers/eks.py
@@ -24,8 +24,13 @@ def get_oidc_issuer_id(self, cluster_name):
name=cluster_name
)
- oidc_issuer = self.cluster_info[cluster_name].get("cluster", {}).get(
- "identity", {}).get("oidc", {}).get("issuer", "")
+ oidc_issuer = (
+ self.cluster_info[cluster_name]
+ .get("cluster", {})
+ .get("identity", {})
+ .get("oidc", {})
+ .get("issuer", "")
+ )
return oidc_issuer.split('https://')[1]
@@ -36,7 +41,8 @@ def get_account_id(self, cluster_name):
name=cluster_name
)
- cluster_arn = self.cluster_info[cluster_name].get("cluster", {}).get(
- "arn", "")
+ cluster_arn = (
+ self.cluster_info[cluster_name].get("cluster", {}).get("arn", "")
+ )
return cluster_arn.split(':')[4]
diff --git a/awscli/customizations/emrcontainers/iam.py b/awscli/customizations/emrcontainers/iam.py
index 141a40536135..92cf0f14bb59 100644
--- a/awscli/customizations/emrcontainers/iam.py
+++ b/awscli/customizations/emrcontainers/iam.py
@@ -26,6 +26,5 @@ def get_assume_role_policy(self, role_name):
def update_assume_role_policy(self, role_name, assume_role_policy):
"""Method to update trust policy of given role name"""
return self.iam_client.update_assume_role_policy(
- RoleName=role_name,
- PolicyDocument=json.dumps(assume_role_policy)
+ RoleName=role_name, PolicyDocument=json.dumps(assume_role_policy)
)
diff --git a/awscli/customizations/emrcontainers/update_role_trust_policy.py b/awscli/customizations/emrcontainers/update_role_trust_policy.py
index 036382c9ec16..191c5b59259e 100644
--- a/awscli/customizations/emrcontainers/update_role_trust_policy.py
+++ b/awscli/customizations/emrcontainers/update_role_trust_policy.py
@@ -15,14 +15,15 @@
import logging
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.emrcontainers.constants \
- import TRUST_POLICY_STATEMENT_FORMAT, \
- TRUST_POLICY_STATEMENT_ALREADY_EXISTS, \
- TRUST_POLICY_UPDATE_SUCCESSFUL
from awscli.customizations.emrcontainers.base36 import Base36
+from awscli.customizations.emrcontainers.constants import (
+ TRUST_POLICY_STATEMENT_ALREADY_EXISTS,
+ TRUST_POLICY_STATEMENT_FORMAT,
+ TRUST_POLICY_UPDATE_SUCCESSFUL,
+)
from awscli.customizations.emrcontainers.eks import EKS
from awscli.customizations.emrcontainers.iam import IAM
-from awscli.customizations.utils import uni_print, get_policy_arn_suffix
+from awscli.customizations.utils import get_policy_arn_suffix, uni_print
LOG = logging.getLogger(__name__)
@@ -71,48 +72,56 @@ class UpdateRoleTrustPolicyCommand(BasicCommand):
NAME = 'update-role-trust-policy'
DESCRIPTION = BasicCommand.FROM_FILE(
- 'emr-containers',
- 'update-role-trust-policy',
- '_description.rst'
+ 'emr-containers', 'update-role-trust-policy', '_description.rst'
)
ARG_TABLE = [
{
'name': 'cluster-name',
- 'help_text': ("Specify the name of the Amazon EKS cluster with "
- "which the IAM Role would be used."),
- 'required': True
+ 'help_text': (
+ "Specify the name of the Amazon EKS cluster with "
+ "which the IAM Role would be used."
+ ),
+ 'required': True,
},
{
'name': 'namespace',
- 'help_text': ("Specify the namespace from the Amazon EKS cluster "
- "with which the IAM Role would be used."),
- 'required': True
+ 'help_text': (
+ "Specify the namespace from the Amazon EKS cluster "
+ "with which the IAM Role would be used."
+ ),
+ 'required': True,
},
{
'name': 'role-name',
- 'help_text': ("Specify the IAM Role name that you want to use"
- "with Amazon EMR on EKS."),
- 'required': True
+ 'help_text': (
+ "Specify the IAM Role name that you want to use"
+ "with Amazon EMR on EKS."
+ ),
+ 'required': True,
},
{
'name': 'iam-endpoint',
'no_paramfile': True,
- 'help_text': ("The IAM endpoint to call for updating the role "
- "trust policy. This is optional and should only be"
- "specified when a custom endpoint should be called"
- "for IAM operations."),
- 'required': False
+ 'help_text': (
+ "The IAM endpoint to call for updating the role "
+ "trust policy. This is optional and should only be"
+ "specified when a custom endpoint should be called"
+ "for IAM operations."
+ ),
+ 'required': False,
},
{
'name': 'dry-run',
'action': 'store_true',
'default': False,
- 'help_text': ("Print the merged trust policy document to"
- "stdout instead of updating the role trust"
- "policy directly."),
- 'required': False
- }
+ 'help_text': (
+ "Print the merged trust policy document to"
+ "stdout instead of updating the role trust"
+ "policy directly."
+ ),
+ 'required': False,
+ },
]
def _run_main(self, parsed_args, parsed_globals):
@@ -136,42 +145,55 @@ def _update_role_trust_policy(self, parsed_globals):
base36 = Base36()
- eks_client = EKS(self._session.create_client(
- 'eks',
- region_name=self._region,
- verify=parsed_globals.verify_ssl
- ))
+ eks_client = EKS(
+ self._session.create_client(
+ 'eks',
+ region_name=self._region,
+ verify=parsed_globals.verify_ssl,
+ )
+ )
account_id = eks_client.get_account_id(self._cluster_name)
oidc_provider = eks_client.get_oidc_issuer_id(self._cluster_name)
base36_encoded_role_name = base36.encode(self._role_name)
LOG.debug('Base36 encoded role name: %s', base36_encoded_role_name)
- trust_policy_statement = json.loads(TRUST_POLICY_STATEMENT_FORMAT % {
- "AWS_ACCOUNT_ID": account_id,
- "OIDC_PROVIDER": oidc_provider,
- "NAMESPACE": self._namespace,
- "BASE36_ENCODED_ROLE_NAME": base36_encoded_role_name,
- "AWS_PARTITION": get_policy_arn_suffix(self._region)
- })
-
- LOG.debug('Computed Trust Policy Statement:\n%s', json.dumps(
- trust_policy_statement, indent=2))
- iam_client = IAM(self._session.create_client(
- 'iam',
- region_name=self._region,
- endpoint_url=self._endpoint_url,
- verify=parsed_globals.verify_ssl
- ))
+ trust_policy_statement = json.loads(
+ TRUST_POLICY_STATEMENT_FORMAT
+ % {
+ "AWS_ACCOUNT_ID": account_id,
+ "OIDC_PROVIDER": oidc_provider,
+ "NAMESPACE": self._namespace,
+ "BASE36_ENCODED_ROLE_NAME": base36_encoded_role_name,
+ "AWS_PARTITION": get_policy_arn_suffix(self._region),
+ }
+ )
+
+ LOG.debug(
+ 'Computed Trust Policy Statement:\n%s',
+ json.dumps(trust_policy_statement, indent=2),
+ )
+ iam_client = IAM(
+ self._session.create_client(
+ 'iam',
+ region_name=self._region,
+ endpoint_url=self._endpoint_url,
+ verify=parsed_globals.verify_ssl,
+ )
+ )
assume_role_document = iam_client.get_assume_role_policy(
- self._role_name)
- matches = check_if_statement_exists(trust_policy_statement,
- assume_role_document)
+ self._role_name
+ )
+ matches = check_if_statement_exists(
+ trust_policy_statement, assume_role_document
+ )
if not matches:
- LOG.debug('Role %s does not have the required trust policy ',
- self._role_name)
+ LOG.debug(
+ 'Role %s does not have the required trust policy ',
+ self._role_name,
+ )
existing_statements = assume_role_document.get("Statement")
if existing_statements is None:
@@ -183,8 +205,9 @@ def _update_role_trust_policy(self, parsed_globals):
return json.dumps(assume_role_document, indent=2)
else:
LOG.debug('Updating trust policy of role %s', self._role_name)
- iam_client.update_assume_role_policy(self._role_name,
- assume_role_document)
+ iam_client.update_assume_role_policy(
+ self._role_name, assume_role_document
+ )
return TRUST_POLICY_UPDATE_SUCCESSFUL % self._role_name
else:
return TRUST_POLICY_STATEMENT_ALREADY_EXISTS % self._role_name
diff --git a/awscli/customizations/flatten.py b/awscli/customizations/flatten.py
index a7b893fa077c..5b1348c8311b 100644
--- a/awscli/customizations/flatten.py
+++ b/awscli/customizations/flatten.py
@@ -30,15 +30,26 @@ class FlattenedArgument(CustomArgument):
Supports both an object and a list of objects, in which case the flattened
parameters will hydrate a list with a single object in it.
"""
- def __init__(self, name, container, prop, help_text='', required=None,
- type=None, hydrate=None, hydrate_value=None):
+
+ def __init__(
+ self,
+ name,
+ container,
+ prop,
+ help_text='',
+ required=None,
+ type=None,
+ hydrate=None,
+ hydrate_value=None,
+ ):
self.type = type
self._container = container
self._property = prop
self._hydrate = hydrate
self._hydrate_value = hydrate_value
- super(FlattenedArgument, self).__init__(name=name, help_text=help_text,
- required=required)
+ super(FlattenedArgument, self).__init__(
+ name=name, help_text=help_text, required=required
+ )
@property
def cli_type_name(self):
@@ -151,6 +162,7 @@ def my_hydrate(params, container, cli_type, key, value):
ensure that a list of one or more objects is hydrated rather than a
single object.
"""
+
def __init__(self, service_name, configs):
self.configs = configs
self.service_name = service_name
@@ -163,9 +175,10 @@ def register(self, cli):
# Flatten each configured operation when they are built
service = self.service_name
for operation in self.configs:
- cli.register('building-argument-table.{0}.{1}'.format(service,
- operation),
- self.flatten_args)
+ cli.register(
+ 'building-argument-table.{0}.{1}'.format(service, operation),
+ self.flatten_args,
+ )
def flatten_args(self, command, argument_table, **kwargs):
# For each argument with a bag of parameters
@@ -173,10 +186,15 @@ def flatten_args(self, command, argument_table, **kwargs):
argument_from_table = argument_table[name]
overwritten = False
- LOG.debug('Flattening {0} argument {1} into {2}'.format(
- command.name, name,
- ', '.join([v['name'] for k, v in argument['flatten'].items()])
- ))
+ LOG.debug(
+ 'Flattening {0} argument {1} into {2}'.format(
+ command.name,
+ name,
+ ', '.join(
+ [v['name'] for k, v in argument['flatten'].items()]
+ ),
+ )
+ )
# For each parameter to flatten out
for sub_argument, new_config in argument['flatten'].items():
@@ -200,8 +218,9 @@ def flatten_args(self, command, argument_table, **kwargs):
overwritten = True
# Delete the original argument?
- if not overwritten and ('keep' not in argument or
- not argument['keep']):
+ if not overwritten and (
+ 'keep' not in argument or not argument['keep']
+ ):
del argument_table[name]
def _find_nested_arg(self, argument, name):
@@ -239,7 +258,9 @@ def _merge_member_config(self, argument, name, config):
config['help_text'] = member.documentation
if 'required' not in config:
- config['required'] = member_name in argument.required_members
+ config['required'] = (
+ member_name in argument.required_members
+ )
if 'type' not in config:
config['type'] = member.type_name
diff --git a/awscli/customizations/gamelift/__init__.py b/awscli/customizations/gamelift/__init__.py
index 6a4857a9e69b..e7a3022e7adc 100644
--- a/awscli/customizations/gamelift/__init__.py
+++ b/awscli/customizations/gamelift/__init__.py
@@ -10,8 +10,8 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.gamelift.uploadbuild import UploadBuildCommand
from awscli.customizations.gamelift.getlog import GetGameSessionLogCommand
+from awscli.customizations.gamelift.uploadbuild import UploadBuildCommand
def register_gamelift_commands(event_emitter):
diff --git a/awscli/customizations/gamelift/getlog.py b/awscli/customizations/gamelift/getlog.py
index 3bded0e9dc13..15339089d588 100644
--- a/awscli/customizations/gamelift/getlog.py
+++ b/awscli/customizations/gamelift/getlog.py
@@ -21,30 +21,38 @@ class GetGameSessionLogCommand(BasicCommand):
NAME = 'get-game-session-log'
DESCRIPTION = 'Download a compressed log file for a game session.'
ARG_TABLE = [
- {'name': 'game-session-id', 'required': True,
- 'help_text': 'The game session ID'},
- {'name': 'save-as', 'required': True,
- 'help_text': 'The filename to which the file should be saved (.zip)'}
+ {
+ 'name': 'game-session-id',
+ 'required': True,
+ 'help_text': 'The game session ID',
+ },
+ {
+ 'name': 'save-as',
+ 'required': True,
+ 'help_text': 'The filename to which the file should be saved (.zip)',
+ },
]
def _run_main(self, args, parsed_globals):
client = self._session.create_client(
- 'gamelift', region_name=parsed_globals.region,
+ 'gamelift',
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
# Retrieve a signed url.
response = client.get_game_session_log_url(
- GameSessionId=args.game_session_id)
+ GameSessionId=args.game_session_id
+ )
url = response['PreSignedUrl']
# Retrieve the content from the presigned url and save it locally.
contents = urlopen(url)
sys.stdout.write(
- 'Downloading log archive for game session %s...\r' %
- args.game_session_id
+ 'Downloading log archive for game session %s...\r'
+ % args.game_session_id
)
with open(args.save_as, 'wb') as f:
@@ -53,6 +61,7 @@ def _run_main(self, args, parsed_globals):
sys.stdout.write(
'Successfully downloaded log archive for game '
- 'session %s to %s\n' % (args.game_session_id, args.save_as))
+ 'session %s to %s\n' % (args.game_session_id, args.save_as)
+ )
return 0
diff --git a/awscli/customizations/gamelift/uploadbuild.py b/awscli/customizations/gamelift/uploadbuild.py
index 369317c5ffd9..b71ec860b4f1 100644
--- a/awscli/customizations/gamelift/uploadbuild.py
+++ b/awscli/customizations/gamelift/uploadbuild.py
@@ -10,43 +10,56 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import threading
import contextlib
import os
-import tempfile
import sys
+import tempfile
+import threading
import zipfile
-from s3transfer import S3Transfer
-
from awscli.customizations.commands import BasicCommand
from awscli.customizations.s3.utils import human_readable_size
+from s3transfer import S3Transfer
class UploadBuildCommand(BasicCommand):
NAME = 'upload-build'
DESCRIPTION = 'Upload a new build to AWS GameLift.'
ARG_TABLE = [
- {'name': 'name', 'required': True,
- 'help_text': 'The name of the build'},
- {'name': 'build-version', 'required': True,
- 'help_text': 'The version of the build'},
- {'name': 'build-root', 'required': True,
- 'help_text':
- 'The path to the directory containing the build to upload'},
- {'name': 'server-sdk-version', 'required': False,
- 'help_text':
- 'The version of the GameLift server SDK used to '
- 'create the game server'},
- {'name': 'operating-system', 'required': False,
- 'help_text': 'The operating system the build runs on'}
+ {
+ 'name': 'name',
+ 'required': True,
+ 'help_text': 'The name of the build',
+ },
+ {
+ 'name': 'build-version',
+ 'required': True,
+ 'help_text': 'The version of the build',
+ },
+ {
+ 'name': 'build-root',
+ 'required': True,
+ 'help_text': 'The path to the directory containing the build to upload',
+ },
+ {
+ 'name': 'server-sdk-version',
+ 'required': False,
+ 'help_text': 'The version of the GameLift server SDK used to '
+ 'create the game server',
+ },
+ {
+ 'name': 'operating-system',
+ 'required': False,
+ 'help_text': 'The operating system the build runs on',
+ },
]
def _run_main(self, args, parsed_globals):
gamelift_client = self._session.create_client(
- 'gamelift', region_name=parsed_globals.region,
+ 'gamelift',
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
# Validate a build directory
if not validate_directory(args.build_root):
@@ -60,7 +73,7 @@ def _run_main(self, args, parsed_globals):
# Create a build based on the operating system given.
create_build_kwargs = {
'Name': args.name,
- 'Version': args.build_version
+ 'Version': args.build_version,
}
if args.operating_system:
create_build_kwargs['OperatingSystem'] = args.operating_system
@@ -70,8 +83,7 @@ def _run_main(self, args, parsed_globals):
build_id = response['Build']['BuildId']
# Retrieve a set of credentials and the s3 bucket and key.
- response = gamelift_client.request_upload_credentials(
- BuildId=build_id)
+ response = gamelift_client.request_upload_credentials(BuildId=build_id)
upload_credentials = response['UploadCredentials']
bucket = response['StorageLocation']['Bucket']
key = response['StorageLocation']['Key']
@@ -82,11 +94,12 @@ def _run_main(self, args, parsed_globals):
secret_key = upload_credentials['SecretAccessKey']
session_token = upload_credentials['SessionToken']
s3_client = self._session.create_client(
- 's3', aws_access_key_id=access_key,
+ 's3',
+ aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name=parsed_globals.region,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
s3_transfer_mgr = S3Transfer(s3_client)
@@ -95,11 +108,13 @@ def _run_main(self, args, parsed_globals):
fd, temporary_zipfile = tempfile.mkstemp('%s.zip' % build_id)
zip_directory(temporary_zipfile, args.build_root)
s3_transfer_mgr.upload_file(
- temporary_zipfile, bucket, key,
+ temporary_zipfile,
+ bucket,
+ key,
callback=ProgressPercentage(
temporary_zipfile,
- label='Uploading ' + args.build_root + ':'
- )
+ label='Uploading ' + args.build_root + ':',
+ ),
)
finally:
os.close(fd)
@@ -107,7 +122,8 @@ def _run_main(self, args, parsed_globals):
sys.stdout.write(
'Successfully uploaded %s to AWS GameLift\n'
- 'Build ID: %s\n' % (args.build_root, build_id))
+ 'Build ID: %s\n' % (args.build_root, build_id)
+ )
return 0
@@ -120,8 +136,7 @@ def zip_directory(zipfile_name, source_root):
for root, dirs, files in os.walk(source_root):
for filename in files:
full_path = os.path.join(root, filename)
- relative_path = os.path.relpath(
- full_path, source_root)
+ relative_path = os.path.relpath(full_path, source_root)
zf.write(full_path, relative_path)
@@ -156,9 +171,12 @@ def __call__(self, bytes_amount):
if self._size > 0:
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
- "\r%s %s / %s (%.2f%%)" % (
- self._label, human_readable_size(self._seen_so_far),
- human_readable_size(self._size), percentage
+ "\r%s %s / %s (%.2f%%)"
+ % (
+ self._label,
+ human_readable_size(self._seen_so_far),
+ human_readable_size(self._size),
+ percentage,
)
)
sys.stdout.flush()
diff --git a/awscli/customizations/generatecliskeleton.py b/awscli/customizations/generatecliskeleton.py
index 3f0f12dd6588..552badf3dba7 100644
--- a/awscli/customizations/generatecliskeleton.py
+++ b/awscli/customizations/generatecliskeleton.py
@@ -13,15 +13,15 @@
import json
import sys
-from botocore import xform_name
-from botocore.stub import Stubber
-from botocore.utils import ArgumentGenerator
from ruamel.yaml import YAML
from awscli.clidriver import CLIOperationCaller
from awscli.customizations.arguments import OverrideRequiredArgsArgument
from awscli.customizations.utils import get_shape_doc_overview
from awscli.utils import json_encoder
+from botocore import xform_name
+from botocore.stub import Stubber
+from botocore.utils import ArgumentGenerator
def register_generate_cli_skeleton(cli):
@@ -33,7 +33,8 @@ def add_generate_skeleton(session, operation_model, argument_table, **kwargs):
# is designated by the argument name `outfile`.
if 'outfile' not in argument_table:
generate_cli_skeleton_argument = GenerateCliSkeletonArgument(
- session, operation_model)
+ session, operation_model
+ )
generate_cli_skeleton_argument.add_to_arg_table(argument_table)
@@ -44,6 +45,7 @@ class GenerateCliSkeletonArgument(OverrideRequiredArgsArgument):
command from taking place. Instead, it will generate a JSON skeleton and
print it to standard output.
"""
+
ARG_DATA = {
'name': 'generate-cli-skeleton',
'help_text': (
@@ -86,17 +88,18 @@ def override_required_args(self, argument_table, args, **kwargs):
except IndexError:
pass
super(GenerateCliSkeletonArgument, self).override_required_args(
- argument_table, args, **kwargs)
+ argument_table, args, **kwargs
+ )
- def generate_skeleton(self, call_parameters, parsed_args,
- parsed_globals, **kwargs):
+ def generate_skeleton(
+ self, call_parameters, parsed_args, parsed_globals, **kwargs
+ ):
if not getattr(parsed_args, 'generate_cli_skeleton', None):
return
arg_value = parsed_args.generate_cli_skeleton
return getattr(
- self, '_generate_%s_skeleton' % arg_value.replace('-', '_'))(
- call_parameters=call_parameters, parsed_globals=parsed_globals
- )
+ self, '_generate_%s_skeleton' % arg_value.replace('-', '_')
+ )(call_parameters=call_parameters, parsed_globals=parsed_globals)
def _generate_yaml_input_skeleton(self, **kwargs):
input_shape = self._operation_model.input_shape
@@ -120,13 +123,14 @@ def _generate_input_skeleton(self, **kwargs):
outfile.write('\n')
return 0
- def _generate_output_skeleton(self, call_parameters, parsed_globals,
- **kwargs):
+ def _generate_output_skeleton(
+ self, call_parameters, parsed_globals, **kwargs
+ ):
service_name = self._operation_model.service_model.service_name
operation_name = self._operation_model.name
return StubbedCLIOperationCaller(self._session).invoke(
- service_name, operation_name, call_parameters,
- parsed_globals)
+ service_name, operation_name, call_parameters, parsed_globals
+ )
class StubbedCLIOperationCaller(CLIOperationCaller):
@@ -135,16 +139,20 @@ class StubbedCLIOperationCaller(CLIOperationCaller):
It generates a fake response and uses the response and provided parameters
to make a stubbed client call for an operation command.
"""
- def _make_client_call(self, client, operation_name, parameters,
- parsed_globals):
+
+ def _make_client_call(
+ self, client, operation_name, parameters, parsed_globals
+ ):
method_name = xform_name(operation_name)
operation_model = client.meta.service_model.operation_model(
- operation_name)
+ operation_name
+ )
fake_response = {}
if operation_model.output_shape:
argument_generator = ArgumentGenerator(use_member_names=True)
fake_response = argument_generator.generate_skeleton(
- operation_model.output_shape)
+ operation_model.output_shape
+ )
with Stubber(client) as stubber:
stubber.add_response(method_name, fake_response)
return getattr(client, method_name)(**parameters)
@@ -153,13 +161,14 @@ def _make_client_call(self, client, operation_name, parameters,
class _Bytes(object):
@classmethod
def represent(cls, dumper, data):
- return dumper.represent_scalar(u'tag:yaml.org,2002:binary', '')
+ return dumper.represent_scalar('tag:yaml.org,2002:binary', '')
class YAMLArgumentGenerator(ArgumentGenerator):
def __init__(self, use_member_names=False, yaml=None):
super(YAMLArgumentGenerator, self).__init__(
- use_member_names=use_member_names)
+ use_member_names=use_member_names
+ )
self._yaml = yaml
if self._yaml is None:
self._yaml = YAML()
@@ -181,14 +190,17 @@ def _generate_type_structure(self, shape, stack):
skeleton = self._yaml.map()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
- member_shape, stack, name=member_name)
+ member_shape, stack, name=member_name
+ )
is_required = member_name in shape.required_members
self._add_member_comments(
- skeleton, member_name, member_shape, is_required)
+ skeleton, member_name, member_shape, is_required
+ )
return skeleton
- def _add_member_comments(self, skeleton, member_name, member_shape,
- is_required):
+ def _add_member_comments(
+ self, skeleton, member_name, member_shape, is_required
+ ):
comment_components = []
if is_required:
comment_components.append('[REQUIRED]')
@@ -208,6 +220,6 @@ def _generate_type_map(self, shape, stack):
# YAML has support for ordered maps, so don't use ordereddicts
# because that isn't necessary and it makes the output harder to
# understand and read.
- return dict(super(YAMLArgumentGenerator, self)._generate_type_map(
- shape, stack
- ))
+ return dict(
+ super(YAMLArgumentGenerator, self)._generate_type_map(shape, stack)
+ )
diff --git a/awscli/customizations/globalargs.py b/awscli/customizations/globalargs.py
index 3a84223f93ce..056b09a5fe33 100644
--- a/awscli/customizations/globalargs.py
+++ b/awscli/customizations/globalargs.py
@@ -10,29 +10,38 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import sys
import os
+import sys
-from botocore.client import Config
-from botocore.endpoint import DEFAULT_TIMEOUT
-from botocore.handlers import disable_signing
import jmespath
from awscli.compat import urlparse
from awscli.customizations.exceptions import ParamValidationError
+from botocore.client import Config
+from botocore.endpoint import DEFAULT_TIMEOUT
+from botocore.handlers import disable_signing
def register_parse_global_args(cli):
- cli.register('top-level-args-parsed', resolve_types,
- unique_id='resolve-types')
- cli.register('top-level-args-parsed', no_sign_request,
- unique_id='no-sign')
- cli.register('top-level-args-parsed', resolve_verify_ssl,
- unique_id='resolve-verify-ssl')
- cli.register('top-level-args-parsed', resolve_cli_read_timeout,
- unique_id='resolve-cli-read-timeout')
- cli.register('top-level-args-parsed', resolve_cli_connect_timeout,
- unique_id='resolve-cli-connect-timeout')
+ cli.register(
+ 'top-level-args-parsed', resolve_types, unique_id='resolve-types'
+ )
+ cli.register('top-level-args-parsed', no_sign_request, unique_id='no-sign')
+ cli.register(
+ 'top-level-args-parsed',
+ resolve_verify_ssl,
+ unique_id='resolve-verify-ssl',
+ )
+ cli.register(
+ 'top-level-args-parsed',
+ resolve_cli_read_timeout,
+ unique_id='resolve-cli-read-timeout',
+ )
+ cli.register(
+ 'top-level-args-parsed',
+ resolve_cli_connect_timeout,
+ unique_id='resolve-cli-connect-timeout',
+ )
def resolve_types(parsed_args, **kwargs):
@@ -94,7 +103,9 @@ def no_sign_request(parsed_args, session, **kwargs):
# Register this first to override other handlers.
emitter = session.get_component('event_emitter')
emitter.register_first(
- 'choose-signer', disable_signing, unique_id='disable-signing',
+ 'choose-signer',
+ disable_signing,
+ unique_id='disable-signing',
)
diff --git a/awscli/customizations/history/__init__.py b/awscli/customizations/history/__init__.py
index 68da5710c323..3a21c48d82e3 100644
--- a/awscli/customizations/history/__init__.py
+++ b/awscli/customizations/history/__init__.py
@@ -10,37 +10,39 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import logging
import os
import sys
-import logging
-
-from botocore.history import get_global_history_recorder
-from botocore.exceptions import ProfileNotFound
from awscli.compat import sqlite3
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.history.constants import HISTORY_FILENAME_ENV_VAR
-from awscli.customizations.history.constants import DEFAULT_HISTORY_FILENAME
-from awscli.customizations.history.db import DatabaseConnection
-from awscli.customizations.history.db import DatabaseRecordWriter
-from awscli.customizations.history.db import RecordBuilder
-from awscli.customizations.history.db import DatabaseHistoryHandler
-from awscli.customizations.history.show import ShowCommand
+from awscli.customizations.history.constants import (
+ DEFAULT_HISTORY_FILENAME,
+ HISTORY_FILENAME_ENV_VAR,
+)
+from awscli.customizations.history.db import (
+ DatabaseConnection,
+ DatabaseHistoryHandler,
+ DatabaseRecordWriter,
+ RecordBuilder,
+)
from awscli.customizations.history.list import ListCommand
-
+from awscli.customizations.history.show import ShowCommand
+from botocore.exceptions import ProfileNotFound
+from botocore.history import get_global_history_recorder
LOG = logging.getLogger(__name__)
HISTORY_RECORDER = get_global_history_recorder()
def register_history_mode(event_handlers):
- event_handlers.register(
- 'session-initialized', attach_history_handler)
+ event_handlers.register('session-initialized', attach_history_handler)
def register_history_commands(event_handlers):
event_handlers.register(
- "building-command-table.main", add_history_commands)
+ "building-command-table.main", add_history_commands
+ )
def attach_history_handler(session, parsed_args, **kwargs):
@@ -48,7 +50,8 @@ def attach_history_handler(session, parsed_args, **kwargs):
LOG.debug('Enabling CLI history')
history_filename = os.environ.get(
- HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME)
+ HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME
+ )
if not os.path.isdir(os.path.dirname(history_filename)):
os.makedirs(os.path.dirname(history_filename))
@@ -98,7 +101,7 @@ class HistoryCommand(BasicCommand):
)
SUBCOMMANDS = [
{'name': 'show', 'command_class': ShowCommand},
- {'name': 'list', 'command_class': ListCommand}
+ {'name': 'list', 'command_class': ListCommand},
]
def _run_main(self, parsed_args, parsed_globals):
diff --git a/awscli/customizations/history/commands.py b/awscli/customizations/history/commands.py
index 42c8de1af8c8..41973aa67940 100644
--- a/awscli/customizations/history/commands.py
+++ b/awscli/customizations/history/commands.py
@@ -13,14 +13,16 @@
import os
from awscli.compat import is_windows
-from awscli.utils import is_a_tty
-from awscli.utils import OutputStreamFactory
-
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.history.db import DatabaseConnection
-from awscli.customizations.history.constants import HISTORY_FILENAME_ENV_VAR
-from awscli.customizations.history.constants import DEFAULT_HISTORY_FILENAME
-from awscli.customizations.history.db import DatabaseRecordReader
+from awscli.customizations.history.constants import (
+ DEFAULT_HISTORY_FILENAME,
+ HISTORY_FILENAME_ENV_VAR,
+)
+from awscli.customizations.history.db import (
+ DatabaseConnection,
+ DatabaseRecordReader,
+)
+from awscli.utils import OutputStreamFactory, is_a_tty
class HistorySubcommand(BasicCommand):
@@ -29,8 +31,9 @@ def __init__(self, session, db_reader=None, output_stream_factory=None):
self._db_reader = db_reader
self._output_stream_factory = output_stream_factory
if output_stream_factory is None:
- self._output_stream_factory = \
+ self._output_stream_factory = (
self._get_default_output_stream_factory()
+ )
def _get_default_output_stream_factory(self):
return OutputStreamFactory(self._session)
@@ -45,7 +48,8 @@ def _close_history_db(self):
def _get_history_db_filename(self):
filename = os.environ.get(
- HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME)
+ HISTORY_FILENAME_ENV_VAR, DEFAULT_HISTORY_FILENAME
+ )
if not os.path.exists(filename):
raise RuntimeError(
'Could not locate history. Make sure cli_history is set to '
diff --git a/awscli/customizations/history/constants.py b/awscli/customizations/history/constants.py
index 486e274f612b..48558bb05b1c 100644
--- a/awscli/customizations/history/constants.py
+++ b/awscli/customizations/history/constants.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
import os
-
HISTORY_FILENAME_ENV_VAR = 'AWS_CLI_HISTORY_FILE'
DEFAULT_HISTORY_FILENAME = os.path.expanduser(
- os.path.join('~', '.aws', 'cli', 'history', 'history.db'))
+ os.path.join('~', '.aws', 'cli', 'history', 'history.db')
+)
diff --git a/awscli/customizations/history/db.py b/awscli/customizations/history/db.py
index bdb96d1dc4bd..2b1e270779c0 100644
--- a/awscli/customizations/history/db.py
+++ b/awscli/customizations/history/db.py
@@ -10,20 +10,16 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import uuid
-import time
-import json
import datetime
-import threading
+import json
import logging
-from awscli.compat import collections_abc
+import threading
+import time
+import uuid
+from awscli.compat import binary_type, collections_abc, sqlite3
from botocore.history import BaseHistoryHandler
-from awscli.compat import sqlite3
-from awscli.compat import binary_type
-
-
LOG = logging.getLogger(__name__)
@@ -41,7 +37,8 @@ class DatabaseConnection(object):
def __init__(self, db_filename):
self._connection = sqlite3.connect(
- db_filename, check_same_thread=False, isolation_level=None)
+ db_filename, check_same_thread=False, isolation_level=None
+ )
self._ensure_database_setup()
def close(self):
@@ -92,8 +89,9 @@ def _remove_non_unicode_stings(self, obj):
if isinstance(obj, str):
obj = self._try_decode_bytes(obj)
elif isinstance(obj, dict):
- obj = dict((k, self._remove_non_unicode_stings(v)) for k, v
- in obj.items())
+ obj = dict(
+ (k, self._remove_non_unicode_stings(v)) for k, v in obj.items()
+ )
elif isinstance(obj, (list, tuple)):
obj = [self._remove_non_unicode_stings(o) for o in obj]
return obj
@@ -152,26 +150,30 @@ def write_record(self, record):
def _create_db_record(self, record):
event_type = record['event_type']
- json_serialized_payload = json.dumps(record['payload'],
- cls=PayloadSerializer)
+ json_serialized_payload = json.dumps(
+ record['payload'], cls=PayloadSerializer
+ )
db_record = (
record['command_id'],
record.get('request_id'),
record['source'],
event_type,
record['timestamp'],
- json_serialized_payload
+ json_serialized_payload,
)
return db_record
class DatabaseRecordReader(object):
_ORDERING = 'ORDER BY timestamp'
- _GET_LAST_ID_RECORDS = """
+ _GET_LAST_ID_RECORDS = (
+ """
SELECT * FROM records
WHERE id =
(SELECT id FROM records WHERE timestamp =
- (SELECT max(timestamp) FROM records)) %s;""" % _ORDERING
+ (SELECT max(timestamp) FROM records)) %s;"""
+ % _ORDERING
+ )
_GET_RECORDS_BY_ID = 'SELECT * from records where id = ? %s' % _ORDERING
_GET_ALL_RECORDS = (
'SELECT a.id AS id_a, '
@@ -220,7 +222,8 @@ def iter_all_records(self):
class RecordBuilder(object):
_REQUEST_LIFECYCLE_EVENTS = set(
- ['API_CALL', 'HTTP_REQUEST', 'HTTP_RESPONSE', 'PARSED_RESPONSE'])
+ ['API_CALL', 'HTTP_REQUEST', 'HTTP_RESPONSE', 'PARSED_RESPONSE']
+ )
_START_OF_REQUEST_LIFECYCLE_EVENT = 'API_CALL'
def __init__(self):
@@ -254,7 +257,7 @@ def build_record(self, event_type, payload, source):
'event_type': event_type,
'payload': payload,
'source': source,
- 'timestamp': int(time.time() * 1000)
+ 'timestamp': int(time.time() * 1000),
}
request_id = self._get_request_id(event_type)
if request_id:
diff --git a/awscli/customizations/history/list.py b/awscli/customizations/history/list.py
index 81ebbf208652..67601133b30d 100644
--- a/awscli/customizations/history/list.py
+++ b/awscli/customizations/history/list.py
@@ -10,11 +10,11 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import json
import datetime
+import json
-from awscli.utils import OutputStreamFactory
from awscli.customizations.history.commands import HistorySubcommand
+from awscli.utils import OutputStreamFactory
class ListCommand(HistorySubcommand):
@@ -26,12 +26,7 @@ class ListCommand(HistorySubcommand):
'``history show`` with the command_id to see more details about '
'a particular entry.'
)
- _COL_WIDTHS = {
- 'id_a': 38,
- 'timestamp': 24,
- 'args': 50,
- 'rc': 0
- }
+ _COL_WIDTHS = {'id_a': 38, 'timestamp': 24, 'args': 50, 'rc': 0}
def _get_default_output_stream_factory(self):
return OutputStreamFactory(self._session, default_less_flags='SR')
@@ -45,7 +40,8 @@ def _run_main(self, parsed_args, parsed_globals):
raise RuntimeError(
'No commands were found in your history. Make sure you have '
'enabled history mode by adding "cli_history = enabled" '
- 'to the config file.')
+ 'to the config file.'
+ )
with self._output_stream_factory.get_output_stream() as stream:
formatter = TextFormatter(self._COL_WIDTHS, stream)
@@ -60,6 +56,7 @@ class RecordAdapter(object):
If there are no records we can just exit early.
"""
+
def __init__(self, records):
self._records = records
self._next = None
@@ -88,27 +85,28 @@ def __init__(self, col_widths, output_stream):
def _format_time(self, timestamp):
command_time = datetime.datetime.fromtimestamp(timestamp / 1000)
formatted = datetime.datetime.strftime(
- command_time, '%Y-%m-%d %I:%M:%S %p')
+ command_time, '%Y-%m-%d %I:%M:%S %p'
+ )
return formatted
def _format_args(self, args, arg_width):
json_value = json.loads(args)
formatted = ' '.join(json_value[:2])
if len(formatted) >= arg_width:
- formatted = '%s...' % formatted[:arg_width-4]
+ formatted = '%s...' % formatted[: arg_width - 4]
return formatted
def _format_record(self, record):
fmt_string = "{0:<%s}{1:<%s}{2:<%s}{3}\n" % (
self._col_widths['id_a'],
self._col_widths['timestamp'],
- self._col_widths['args']
+ self._col_widths['args'],
)
record_line = fmt_string.format(
record['id_a'],
self._format_time(record['timestamp']),
self._format_args(record['args'], self._col_widths['args']),
- record['rc']
+ record['rc'],
)
return record_line
diff --git a/awscli/customizations/history/show.py b/awscli/customizations/history/show.py
index 93e49aed568b..b9dda2577508 100644
--- a/awscli/customizations/history/show.py
+++ b/awscli/customizations/history/show.py
@@ -13,15 +13,15 @@
import datetime
import json
import sys
-import xml.parsers.expat
import xml.dom.minidom
+import xml.parsers.expat
import colorama
-from awscli.table import COLORAMA_KWARGS
+from awscli.customizations.exceptions import ParamValidationError
from awscli.customizations.history.commands import HistorySubcommand
from awscli.customizations.history.filters import RegexFilter
-from awscli.customizations.exceptions import ParamValidationError
+from awscli.table import COLORAMA_KWARGS
class Formatter(object):
@@ -46,7 +46,8 @@ def __init__(self, output=None, include=None, exclude=None):
self._output = sys.stdout
if include and exclude:
raise ParamValidationError(
- 'Either input or exclude can be provided but not both')
+ 'Either input or exclude can be provided but not both'
+ )
self._include = include
self._exclude = exclude
@@ -80,97 +81,73 @@ class DetailedFormatter(Formatter):
_SECTIONS = {
'CLI_VERSION': {
'title': 'AWS CLI command entered',
- 'values': [
- {'description': 'with AWS CLI version'}
- ]
- },
- 'CLI_ARGUMENTS': {
- 'values': [
- {'description': 'with arguments'}
- ]
+ 'values': [{'description': 'with AWS CLI version'}],
},
+ 'CLI_ARGUMENTS': {'values': [{'description': 'with arguments'}]},
'API_CALL': {
'title': 'API call made',
'values': [
- {
- 'description': 'to service',
- 'payload_key': 'service'
- },
- {
- 'description': 'using operation',
- 'payload_key': 'operation'
- },
+ {'description': 'to service', 'payload_key': 'service'},
+ {'description': 'using operation', 'payload_key': 'operation'},
{
'description': 'with parameters',
'payload_key': 'params',
- 'value_format': 'dictionary'
- }
- ]
+ 'value_format': 'dictionary',
+ },
+ ],
},
'HTTP_REQUEST': {
'title': 'HTTP request sent',
'values': [
- {
- 'description': 'to URL',
- 'payload_key': 'url'
- },
- {
- 'description': 'with method',
- 'payload_key': 'method'
- },
+ {'description': 'to URL', 'payload_key': 'url'},
+ {'description': 'with method', 'payload_key': 'method'},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary',
- 'filters': [_SIG_FILTER]
+ 'filters': [_SIG_FILTER],
},
{
'description': 'with body',
'payload_key': 'body',
- 'value_format': 'http_body'
- }
-
- ]
+ 'value_format': 'http_body',
+ },
+ ],
},
'HTTP_RESPONSE': {
'title': 'HTTP response received',
'values': [
{
'description': 'with status code',
- 'payload_key': 'status_code'
+ 'payload_key': 'status_code',
},
{
'description': 'with headers',
'payload_key': 'headers',
- 'value_format': 'dictionary'
+ 'value_format': 'dictionary',
},
{
'description': 'with body',
'payload_key': 'body',
- 'value_format': 'http_body'
- }
- ]
+ 'value_format': 'http_body',
+ },
+ ],
},
'PARSED_RESPONSE': {
'title': 'HTTP response parsed',
'values': [
- {
- 'description': 'parsed to',
- 'value_format': 'dictionary'
- }
- ]
+ {'description': 'parsed to', 'value_format': 'dictionary'}
+ ],
},
'CLI_RC': {
'title': 'AWS CLI command exited',
- 'values': [
- {'description': 'with return code'}
- ]
+ 'values': [{'description': 'with return code'}],
},
}
_COMPONENT_COLORS = {
'title': colorama.Style.BRIGHT,
- 'description': colorama.Fore.CYAN
+ 'description': colorama.Fore.CYAN,
}
def __init__(self, output=None, include=None, exclude=None, colorize=True):
@@ -225,7 +202,8 @@ def _format_section_title(self, title, event_record):
formatted_timestamp = self._format_description('at time')
formatted_timestamp += self._format_value(
- event_record['timestamp'], event_record, value_format='timestamp')
+ event_record['timestamp'], event_record, value_format='timestamp'
+ )
return '\n' + formatted_title + formatted_timestamp
@@ -233,19 +211,20 @@ def _get_api_num(self, event_record):
request_id = event_record['request_id']
if request_id:
if request_id not in self._request_id_to_api_num:
- self._request_id_to_api_num[
- request_id] = self._num_api_calls
+ self._request_id_to_api_num[request_id] = self._num_api_calls
self._num_api_calls += 1
return self._request_id_to_api_num[request_id]
def _format_description(self, value_description):
return self._color_if_configured(
- value_description + ': ', 'description')
+ value_description + ': ', 'description'
+ )
def _format_value(self, value, event_record, value_format=None):
if value_format:
formatted_value = self._value_pformatter.pformat(
- value, value_format, event_record)
+ value, value_format, event_record
+ )
else:
formatted_value = str(value)
return formatted_value + '\n'
@@ -263,7 +242,8 @@ def pformat(self, value, value_format, event_record):
def _pformat_timestamp(self, event_timestamp, event_record=None):
return datetime.datetime.fromtimestamp(
- event_timestamp/1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
+ event_timestamp / 1000.0
+ ).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def _pformat_dictionary(self, obj, event_record=None):
return json.dumps(obj=obj, sort_keys=True, indent=4)
@@ -295,7 +275,7 @@ def _get_pretty_xml(self, body):
# is called.
stripped_body = self._strip_whitespace(body)
xml_dom = xml.dom.minidom.parseString(stripped_body)
- return xml_dom.toprettyxml(indent=' '*4, newl='\n')
+ return xml_dom.toprettyxml(indent=' ' * 4, newl='\n')
def _get_pretty_json(self, body):
# The json body is loaded so it can be dumped in a format that
@@ -312,9 +292,7 @@ def _is_xml(self, body):
def _strip_whitespace(self, xml_string):
xml_dom = xml.dom.minidom.parseString(xml_string)
- return ''.join(
- [line.strip() for line in xml_dom.toxml().splitlines()]
- )
+ return ''.join([line.strip() for line in xml_dom.toxml().splitlines()])
def _is_json_structure(self, body):
if body.startswith('{'):
@@ -333,43 +311,57 @@ class ShowCommand(HistorySubcommand):
'If this command is ran without any positional arguments, it will '
'display the events for the last CLI command ran.'
)
- FORMATTERS = {
- 'detailed': DetailedFormatter
- }
+ FORMATTERS = {'detailed': DetailedFormatter}
ARG_TABLE = [
- {'name': 'command_id', 'nargs': '?', 'default': 'latest',
- 'positional_arg': True,
- 'help_text': (
- 'The ID of the CLI command to show. If this positional argument '
- 'is omitted, it will show the last the CLI command ran.')},
- {'name': 'include', 'nargs': '+',
- 'help_text': (
- 'Specifies which events to **only** include when showing the '
- 'CLI command. This argument is mutually exclusive with '
- '``--exclude``.')},
- {'name': 'exclude', 'nargs': '+',
- 'help_text': (
- 'Specifies which events to exclude when showing the '
- 'CLI command. This argument is mutually exclusive with '
- '``--include``.')},
- {'name': 'format', 'choices': FORMATTERS.keys(),
- 'default': 'detailed', 'help_text': (
- 'Specifies which format to use in showing the events for '
- 'the specified CLI command. The following formats are '
- 'supported:\n\n'
- ''
- '- detailed - This the default format. It prints out a '
- 'detailed overview of the CLI command ran. It displays all '
- 'of the key events in the command lifecycle where each '
- 'important event has a title and its important values '
- 'underneath. The events are ordered by timestamp and events of '
- 'the same API call are associated together with the '
- '[``api_id``] notation where events that share the same '
- '``api_id`` belong to the lifecycle of the same API call.'
- '
'
- '
'
- )
- }
+ {
+ 'name': 'command_id',
+ 'nargs': '?',
+ 'default': 'latest',
+ 'positional_arg': True,
+ 'help_text': (
+ 'The ID of the CLI command to show. If this positional argument '
+ 'is omitted, it will show the last the CLI command ran.'
+ ),
+ },
+ {
+ 'name': 'include',
+ 'nargs': '+',
+ 'help_text': (
+ 'Specifies which events to **only** include when showing the '
+ 'CLI command. This argument is mutually exclusive with '
+ '``--exclude``.'
+ ),
+ },
+ {
+ 'name': 'exclude',
+ 'nargs': '+',
+ 'help_text': (
+ 'Specifies which events to exclude when showing the '
+ 'CLI command. This argument is mutually exclusive with '
+ '``--include``.'
+ ),
+ },
+ {
+ 'name': 'format',
+ 'choices': FORMATTERS.keys(),
+ 'default': 'detailed',
+ 'help_text': (
+ 'Specifies which format to use in showing the events for '
+ 'the specified CLI command. The following formats are '
+ 'supported:\n\n'
+ ''
+ '- detailed - This the default format. It prints out a '
+ 'detailed overview of the CLI command ran. It displays all '
+ 'of the key events in the command lifecycle where each '
+ 'important event has a title and its important values '
+ 'underneath. The events are ordered by timestamp and events of '
+ 'the same API call are associated together with the '
+ '[``api_id``] notation where events that share the same '
+ '``api_id`` belong to the lifecycle of the same API call.'
+ '
'
+ '
'
+ ),
+ },
]
def _run_main(self, parsed_args, parsed_globals):
@@ -378,7 +370,8 @@ def _run_main(self, parsed_args, parsed_globals):
self._validate_args(parsed_args)
with self._output_stream_factory.get_output_stream() as stream:
formatter = self._get_formatter(
- parsed_args, parsed_globals, stream)
+ parsed_args, parsed_globals, stream
+ )
for record in self._get_record_iterator(parsed_args):
formatter.display(record)
finally:
@@ -388,18 +381,20 @@ def _run_main(self, parsed_args, parsed_globals):
def _validate_args(self, parsed_args):
if parsed_args.exclude and parsed_args.include:
raise ParamValidationError(
- 'Either --exclude or --include can be provided but not both')
+ 'Either --exclude or --include can be provided but not both'
+ )
def _get_formatter(self, parsed_args, parsed_globals, output_stream):
format_type = parsed_args.format
formatter_kwargs = {
'include': parsed_args.include,
'exclude': parsed_args.exclude,
- 'output': output_stream
+ 'output': output_stream,
}
if format_type == 'detailed':
formatter_kwargs['colorize'] = self._should_use_color(
- parsed_globals)
+ parsed_globals
+ )
return self.FORMATTERS[format_type](**formatter_kwargs)
def _get_record_iterator(self, parsed_args):
diff --git a/awscli/customizations/iamvirtmfa.py b/awscli/customizations/iamvirtmfa.py
index c0ee3582d6b4..ce40c41c3003 100644
--- a/awscli/customizations/iamvirtmfa.py
+++ b/awscli/customizations/iamvirtmfa.py
@@ -22,22 +22,27 @@
to the specified file. It will also remove the two bootstrap data
fields from the response.
"""
-import base64
-from awscli.customizations.arguments import StatefulArgument
-from awscli.customizations.arguments import resolve_given_outfile_path
-from awscli.customizations.arguments import is_parsed_result_successful
+import base64
+from awscli.customizations.arguments import (
+ StatefulArgument,
+ is_parsed_result_successful,
+ resolve_given_outfile_path,
+)
CHOICES = ('QRCodePNG', 'Base32StringSeed')
-OUTPUT_HELP = ('The output path and file name where the bootstrap '
- 'information will be stored.')
-BOOTSTRAP_HELP = ('Method to use to seed the virtual MFA. '
- 'Valid values are: %s | %s' % CHOICES)
+OUTPUT_HELP = (
+ 'The output path and file name where the bootstrap '
+ 'information will be stored.'
+)
+BOOTSTRAP_HELP = (
+ 'Method to use to seed the virtual MFA. '
+ 'Valid values are: %s | %s' % CHOICES
+)
class FileArgument(StatefulArgument):
-
def add_to_params(self, parameters, value):
# Validate the file here so we can raise an error prior
# calling the service.
@@ -46,19 +51,24 @@ def add_to_params(self, parameters, value):
class IAMVMFAWrapper(object):
-
def __init__(self, event_handler):
self._event_handler = event_handler
self._outfile = FileArgument(
- 'outfile', help_text=OUTPUT_HELP, required=True)
+ 'outfile', help_text=OUTPUT_HELP, required=True
+ )
self._method = StatefulArgument(
- 'bootstrap-method', help_text=BOOTSTRAP_HELP,
- choices=CHOICES, required=True)
+ 'bootstrap-method',
+ help_text=BOOTSTRAP_HELP,
+ choices=CHOICES,
+ required=True,
+ )
self._event_handler.register(
'building-argument-table.iam.create-virtual-mfa-device',
- self._add_options)
+ self._add_options,
+ )
self._event_handler.register(
- 'after-call.iam.CreateVirtualMFADevice', self._save_file)
+ 'after-call.iam.CreateVirtualMFADevice', self._save_file
+ )
def _add_options(self, argument_table, **kwargs):
argument_table['outfile'] = self._outfile
diff --git a/awscli/customizations/iot.py b/awscli/customizations/iot.py
index 7703014335b1..f4e4b9770513 100644
--- a/awscli/customizations/iot.py
+++ b/awscli/customizations/iot.py
@@ -22,6 +22,7 @@
- ``--public-key-outfile``: keyPair.PublicKey
- ``--private-key-outfile``: keyPair.PrivateKey
"""
+
from awscli.customizations.arguments import QueryOutFileArgument
@@ -34,19 +35,34 @@ def register_create_keys_and_cert_arguments(session, argument_table, **kwargs):
"""
after_event = 'after-call.iot.CreateKeysAndCertificate'
argument_table['certificate-pem-outfile'] = QueryOutFileArgument(
- session=session, name='certificate-pem-outfile',
- query='certificatePem', after_call_event=after_event, perm=0o600)
+ session=session,
+ name='certificate-pem-outfile',
+ query='certificatePem',
+ after_call_event=after_event,
+ perm=0o600,
+ )
argument_table['public-key-outfile'] = QueryOutFileArgument(
- session=session, name='public-key-outfile', query='keyPair.PublicKey',
- after_call_event=after_event, perm=0o600)
+ session=session,
+ name='public-key-outfile',
+ query='keyPair.PublicKey',
+ after_call_event=after_event,
+ perm=0o600,
+ )
argument_table['private-key-outfile'] = QueryOutFileArgument(
- session=session, name='private-key-outfile',
- query='keyPair.PrivateKey', after_call_event=after_event, perm=0o600)
+ session=session,
+ name='private-key-outfile',
+ query='keyPair.PrivateKey',
+ after_call_event=after_event,
+ perm=0o600,
+ )
def register_create_keys_from_csr_arguments(session, argument_table, **kwargs):
"""Add certificate-pem-outfile to create-certificate-from-csr"""
argument_table['certificate-pem-outfile'] = QueryOutFileArgument(
- session=session, name='certificate-pem-outfile',
+ session=session,
+ name='certificate-pem-outfile',
query='certificatePem',
- after_call_event='after-call.iot.CreateCertificateFromCsr', perm=0o600)
+ after_call_event='after-call.iot.CreateCertificateFromCsr',
+ perm=0o600,
+ )
diff --git a/awscli/customizations/iot_data.py b/awscli/customizations/iot_data.py
index 62c02ee126dd..2b29c94a9579 100644
--- a/awscli/customizations/iot_data.py
+++ b/awscli/customizations/iot_data.py
@@ -14,7 +14,8 @@
def register_custom_endpoint_note(event_emitter):
event_emitter.register_last(
- 'doc-description.iot-data', add_custom_endpoint_url_note)
+ 'doc-description.iot-data', add_custom_endpoint_url_note
+ )
def add_custom_endpoint_url_note(help_command, **kwargs):
diff --git a/awscli/customizations/lightsail/__init__.py b/awscli/customizations/lightsail/__init__.py
index aa9f33389398..19c1aaad5b81 100644
--- a/awscli/customizations/lightsail/__init__.py
+++ b/awscli/customizations/lightsail/__init__.py
@@ -11,8 +11,9 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.lightsail.push_container_image \
- import PushContainerImage
+from awscli.customizations.lightsail.push_container_image import (
+ PushContainerImage,
+)
def initialize(cli):
diff --git a/awscli/customizations/lightsail/push_container_image.py b/awscli/customizations/lightsail/push_container_image.py
index 9a1640945b53..4c605489f8ce 100644
--- a/awscli/customizations/lightsail/push_container_image.py
+++ b/awscli/customizations/lightsail/push_container_image.py
@@ -14,6 +14,7 @@
import json
import logging
import subprocess
+
import awscli
from awscli.compat import ignore_user_entered_signals
from awscli.customizations.commands import BasicCommand
@@ -24,11 +25,12 @@
ERROR_MESSAGE = (
'The Lightsail Control (lightsailctl) plugin was not found. ',
'To download and install it, see ',
- 'https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software'
+ 'https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-install-software',
)
INPUT_VERSION = '1'
+
class PushContainerImage(BasicCommand):
NAME = 'push-container-image'
@@ -38,18 +40,11 @@ class PushContainerImage(BasicCommand):
{
'name': 'service-name',
'help_text': helptext.SERVICENAME,
- 'required': True
+ 'required': True,
},
- {
- 'name': 'image',
- 'help_text': helptext.IMAGE,
- 'required': True
- },
- {
- 'name': 'label',
- 'help_text': helptext.LABEL,
- 'required': True
- }]
+ {'name': 'image', 'help_text': helptext.IMAGE, 'required': True},
+ {'name': 'label', 'help_text': helptext.LABEL, 'required': True},
+ ]
def _run_main(self, parsed_args, parsed_globals):
payload = self._get_input_request(parsed_args, parsed_globals)
@@ -65,24 +60,24 @@ def _run_main(self, parsed_args, parsed_globals):
subprocess.run(
['lightsailctl', '--plugin', '--input-stdin'],
input=json.dumps(payload).encode('utf-8'),
- check=True)
+ check=True,
+ )
return 0
except OSError as ex:
if ex.errno == errno.ENOENT:
- logger.debug('lightsailctl not found',
- exc_info=True)
+ logger.debug('lightsailctl not found', exc_info=True)
raise ValueError(''.join(ERROR_MESSAGE))
def _get_input_request(self, parsed_args, parsed_globals):
input_request = {
- 'inputVersion' : INPUT_VERSION,
+ 'inputVersion': INPUT_VERSION,
'operation': 'PushContainerImage',
}
payload = dict(
service=parsed_args.service_name,
image=parsed_args.image,
- label=parsed_args.label
+ label=parsed_args.label,
)
configuration = {}
@@ -105,22 +100,25 @@ def _get_input_request(self, parsed_args, parsed_globals):
if parsed_globals.profile:
configuration['profile'] = parsed_globals.profile
elif self._session.get_config_variable('profile'):
- configuration['profile'] = \
- self._session.get_config_variable('profile')
+ configuration['profile'] = self._session.get_config_variable(
+ 'profile'
+ )
if parsed_globals.region:
configuration['region'] = parsed_globals.region
elif self._session.get_config_variable('region'):
- configuration['region'] = \
- self._session.get_config_variable('region')
+ configuration['region'] = self._session.get_config_variable(
+ 'region'
+ )
configuration['doNotSignRequest'] = not parsed_globals.sign_request
if parsed_globals.ca_bundle:
configuration['caBundle'] = parsed_globals.ca_bundle
elif self._session.get_config_variable('ca_bundle'):
- configuration['caBundle'] = \
- self._session.get_config_variable('ca_bundle')
+ configuration['caBundle'] = self._session.get_config_variable(
+ 'ca_bundle'
+ )
if parsed_globals.read_timeout is not None:
configuration['readTimeout'] = parsed_globals.read_timeout
diff --git a/awscli/customizations/logs/__init__.py b/awscli/customizations/logs/__init__.py
index c59af9c8492c..6cdb6292c1c5 100644
--- a/awscli/customizations/logs/__init__.py
+++ b/awscli/customizations/logs/__init__.py
@@ -10,13 +10,15 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.logs.tail import TailCommand
from awscli.customizations.logs.startlivetail import StartLiveTailCommand
+from awscli.customizations.logs.tail import TailCommand
def register_logs_commands(event_emitter):
event_emitter.register('building-command-table.logs', inject_tail_command)
- event_emitter.register('building-command-table.logs', inject_start_live_tail_command)
+ event_emitter.register(
+ 'building-command-table.logs', inject_start_live_tail_command
+ )
def inject_tail_command(command_table, session, **kwargs):
diff --git a/awscli/customizations/logs/startlivetail.py b/awscli/customizations/logs/startlivetail.py
index 907e752996d1..eb79f87cf188 100644
--- a/awscli/customizations/logs/startlivetail.py
+++ b/awscli/customizations/logs/startlivetail.py
@@ -10,30 +10,30 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from enum import Enum
-from functools import partial
-from threading import Thread
import asyncio
-import colorama
import contextlib
import json
import re
import signal
import sys
import time
+from enum import Enum
+from functools import partial
+from threading import Thread
+import colorama
from prompt_toolkit.application import Application, get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import (
ANSI,
- to_formatted_text,
fragment_list_to_text,
+ to_formatted_text,
)
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import Layout, Window, WindowAlign
-from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.containers import HSplit, VSplit
+from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.processors import Processor, Transformation
@@ -42,7 +42,6 @@
from awscli.customizations.exceptions import ParamValidationError
from awscli.utils import is_a_tty
-
DESCRIPTION = (
"Starts a Live Tail streaming session for one or more log groups. "
"A Live Tail session provides a near real-time streaming of log events "
@@ -824,7 +823,9 @@ def exit(self):
self._application.exit()
async def _run_ui(self):
- self._application.create_background_task(self._log_events_printer.run())
+ self._application.create_background_task(
+ self._log_events_printer.run()
+ )
self._application.create_background_task(self._render_metadata())
self._application.create_background_task(self._trim_buffers())
diff --git a/awscli/customizations/logs/tail.py b/awscli/customizations/logs/tail.py
index cb3151003270..de806db4bd88 100644
--- a/awscli/customizations/logs/tail.py
+++ b/awscli/customizations/logs/tail.py
@@ -10,23 +10,22 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from collections import defaultdict
-from datetime import datetime, timedelta
import json
import re
import time
+from collections import defaultdict
+from datetime import datetime, timedelta
-from botocore.utils import parse_timestamp, datetime2timestamp
-from dateutil import tz
import colorama
+from dateutil import tz
from awscli.compat import get_stdout_text_writer
-from awscli.utils import is_a_tty
from awscli.customizations.commands import BasicCommand
+from awscli.utils import is_a_tty
+from botocore.utils import datetime2timestamp, parse_timestamp
class BaseLogEventsFormatter(object):
-
_TIMESTAMP_COLOR = colorama.Fore.GREEN
_STREAM_NAME_COLOR = colorama.Fore.CYAN
@@ -56,13 +55,14 @@ class ShortLogEventsFormatter(BaseLogEventsFormatter):
def display_log_event(self, log_event):
log_event = '%s %s' % (
self._format_timestamp(log_event['timestamp']),
- log_event['message']
+ log_event['message'],
)
self._write_log_event(log_event)
def _format_timestamp(self, timestamp):
return self._color_if_configured(
- timestamp.strftime("%Y-%m-%dT%H:%M:%S"), self._TIMESTAMP_COLOR)
+ timestamp.strftime("%Y-%m-%dT%H:%M:%S"), self._TIMESTAMP_COLOR
+ )
class DetailedLogEventsFormatter(BaseLogEventsFormatter):
@@ -70,15 +70,15 @@ def display_log_event(self, log_event):
log_event = '%s %s %s' % (
self._format_timestamp(log_event['timestamp']),
self._color_if_configured(
- log_event['logStreamName'], self._STREAM_NAME_COLOR),
- log_event['message']
+ log_event['logStreamName'], self._STREAM_NAME_COLOR
+ ),
+ log_event['message'],
)
self._write_log_event(log_event)
def _format_timestamp(self, timestamp):
return self._color_if_configured(
- timestamp.isoformat(timespec='microseconds'),
- self._TIMESTAMP_COLOR
+ timestamp.isoformat(timespec='microseconds'), self._TIMESTAMP_COLOR
)
@@ -87,8 +87,9 @@ def display_log_event(self, log_event):
log_event = '%s %s %s' % (
self._format_timestamp(log_event['timestamp']),
self._color_if_configured(
- log_event['logStreamName'], self._STREAM_NAME_COLOR),
- self._format_pretty_json(log_event['message'])
+ log_event['logStreamName'], self._STREAM_NAME_COLOR
+ ),
+ self._format_pretty_json(log_event['message']),
)
self._write_log_event(log_event)
@@ -102,7 +103,8 @@ def _format_pretty_json(self, log_message):
def _format_timestamp(self, timestamp):
return self._color_if_configured(
- timestamp.isoformat(), self._TIMESTAMP_COLOR)
+ timestamp.isoformat(), self._TIMESTAMP_COLOR
+ )
class TailCommand(BasicCommand):
@@ -139,7 +141,7 @@ class TailCommand(BasicCommand):
'display logs starting five minutes in the past. '
'Note that multiple units are **not** supported '
'(i.e. ``5h30m``)'
- )
+ ),
},
{
'name': 'follow',
@@ -149,7 +151,7 @@ class TailCommand(BasicCommand):
'Whether to continuously poll for new logs. By default, the '
'command will exit once there are no more logs to display. '
'To exit from this mode, use Control-C.'
- )
+ ),
},
{
'name': 'format',
@@ -169,7 +171,7 @@ class TailCommand(BasicCommand):
' json - Pretty print any messages that are entirely JSON.'
' '
''
- )
+ ),
},
{
'name': 'filter-pattern',
@@ -179,7 +181,7 @@ class TailCommand(BasicCommand):
'latest/logs/FilterAndPatternSyntax.html">Filter and '
'Pattern Syntax for details. If not provided, all '
'the events are matched'
- )
+ ),
},
{
'name': 'log-stream-names',
@@ -188,7 +190,7 @@ class TailCommand(BasicCommand):
'The list of stream names to filter logs by. This parameter '
'cannot be specified when ``--log-stream-name-prefix`` is '
'also specified.'
- )
+ ),
},
{
'name': 'log-stream-name-prefix',
@@ -197,10 +199,8 @@ class TailCommand(BasicCommand):
'with names beginning with this prefix will be returned. This '
'parameter cannot be specified when ``log-stream-names`` is '
'also specified.'
- )
+ ),
},
-
-
]
_FORMAT_TO_FORMATTER_CLS = {
'detailed': DetailedLogEventsFormatter,
@@ -210,17 +210,21 @@ class TailCommand(BasicCommand):
def _run_main(self, parsed_args, parsed_globals):
logs_client = self._session.create_client(
- 'logs', region_name=parsed_globals.region,
+ 'logs',
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
logs_generator = self._get_log_events_generator(
- logs_client, parsed_args.follow)
+ logs_client, parsed_args.follow
+ )
log_events = logs_generator.iter_log_events(
- parsed_args.group_name, start=parsed_args.since,
+ parsed_args.group_name,
+ start=parsed_args.since,
filter_pattern=parsed_args.filter_pattern,
log_stream_names=parsed_args.log_stream_names,
- log_stream_name_prefix=parsed_args.log_stream_name_prefix)
+ log_stream_name_prefix=parsed_args.log_stream_name_prefix,
+ )
self._output_log_events(parsed_args, parsed_globals, log_events)
return 0
@@ -234,7 +238,8 @@ def _get_log_events_generator(self, logs_client, follow):
def _output_log_events(self, parsed_args, parsed_globals, log_events):
output = get_stdout_text_writer()
logs_formatter = self._FORMAT_TO_FORMATTER_CLS[parsed_args.format](
- output, colorize=self._should_use_color(parsed_globals))
+ output, colorize=self._should_use_color(parsed_globals)
+ )
for event in log_events:
logs_formatter.display_log_event(event)
@@ -286,11 +291,21 @@ def __init__(self, client, timestamp_utils):
self._client = client
self._timestamp_utils = timestamp_utils
- def iter_log_events(self, group_name, start=None, filter_pattern=None,
- log_stream_names=None, log_stream_name_prefix=None):
+ def iter_log_events(
+ self,
+ group_name,
+ start=None,
+ filter_pattern=None,
+ log_stream_names=None,
+ log_stream_name_prefix=None,
+ ):
filter_logs_events_kwargs = self._get_filter_logs_events_kwargs(
- group_name, start, filter_pattern, log_stream_names,
- log_stream_name_prefix)
+ group_name,
+ start,
+ filter_pattern,
+ log_stream_names,
+ log_stream_name_prefix,
+ )
log_events = self._filter_log_events(filter_logs_events_kwargs)
for log_event in log_events:
self._convert_event_timestamps(log_event)
@@ -299,14 +314,15 @@ def iter_log_events(self, group_name, start=None, filter_pattern=None,
def _filter_log_events(self, filter_logs_events_kwargs):
raise NotImplementedError('_filter_log_events()')
- def _get_filter_logs_events_kwargs(self, group_name, start,
- filter_pattern,
- log_stream_names,
- log_stream_name_prefix):
- kwargs = {
- 'logGroupName': group_name,
- 'interleaved': True
- }
+ def _get_filter_logs_events_kwargs(
+ self,
+ group_name,
+ start,
+ filter_pattern,
+ log_stream_names,
+ log_stream_name_prefix,
+ ):
+ kwargs = {'logGroupName': group_name, 'interleaved': True}
if start is not None:
kwargs['startTime'] = self._timestamp_utils.to_epoch_millis(start)
if filter_pattern is not None:
@@ -319,9 +335,11 @@ def _get_filter_logs_events_kwargs(self, group_name, start,
def _convert_event_timestamps(self, event):
event['ingestionTime'] = self._timestamp_utils.to_datetime(
- event['ingestionTime'])
+ event['ingestionTime']
+ )
event['timestamp'] = self._timestamp_utils.to_datetime(
- event['timestamp'])
+ event['timestamp']
+ )
class NoFollowLogEventsGenerator(BaseLogEventsGenerator):
@@ -356,32 +374,39 @@ def _get_latest_events_and_timestamp(self, event_ids_per_timestamp):
# Keep only ids of the events with the newest timestamp
newest_timestamp = max(event_ids_per_timestamp.keys())
event_ids_per_timestamp = defaultdict(
- set, {newest_timestamp: event_ids_per_timestamp[newest_timestamp]}
+ set,
+ {newest_timestamp: event_ids_per_timestamp[newest_timestamp]},
)
return event_ids_per_timestamp
- def _reset_filter_log_events_params(self, fle_kwargs, event_ids_per_timestamp):
+ def _reset_filter_log_events_params(
+ self, fle_kwargs, event_ids_per_timestamp
+ ):
# Remove nextToken and update startTime for the next request
# with the timestamp of the newest event
if event_ids_per_timestamp:
- fle_kwargs['startTime'] = max(
- event_ids_per_timestamp.keys()
- )
+ fle_kwargs['startTime'] = max(event_ids_per_timestamp.keys())
fle_kwargs.pop('nextToken', None)
def _do_filter_log_events(self, filter_logs_events_kwargs):
event_ids_per_timestamp = defaultdict(set)
while True:
response = self._client.filter_log_events(
- **filter_logs_events_kwargs)
+ **filter_logs_events_kwargs
+ )
for event in response['events']:
# For the case where we've hit the last page, we will be
# reusing the newest timestamp of the received events to keep polling.
# This means it is possible that duplicate log events with same timestamp
# are returned back which we do not want to yield again.
# We only want to yield log events that we have not seen.
- if event['eventId'] not in event_ids_per_timestamp[event['timestamp']]:
- event_ids_per_timestamp[event['timestamp']].add(event['eventId'])
+ if (
+ event['eventId']
+ not in event_ids_per_timestamp[event['timestamp']]
+ ):
+ event_ids_per_timestamp[event['timestamp']].add(
+ event['eventId']
+ )
yield event
event_ids_per_timestamp = self._get_latest_events_and_timestamp(
event_ids_per_timestamp
@@ -390,7 +415,6 @@ def _do_filter_log_events(self, filter_logs_events_kwargs):
filter_logs_events_kwargs['nextToken'] = response['nextToken']
else:
self._reset_filter_log_events_params(
- filter_logs_events_kwargs,
- event_ids_per_timestamp
+ filter_logs_events_kwargs, event_ids_per_timestamp
)
self._sleep(self._TIME_TO_SLEEP)
diff --git a/awscli/customizations/opsworks.py b/awscli/customizations/opsworks.py
index e91d47896fd9..1b02f3120094 100644
--- a/awscli/customizations/opsworks.py
+++ b/awscli/customizations/opsworks.py
@@ -22,13 +22,11 @@
import tempfile
import textwrap
-from botocore.exceptions import ClientError
-
-from awscli.compat import urlopen, ensure_text_type
+from awscli.compat import ensure_text_type, urlopen
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.utils import create_client_from_parsed_globals
from awscli.customizations.exceptions import ParamValidationError
-
+from awscli.customizations.utils import create_client_from_parsed_globals
+from botocore.exceptions import ClientError
LOG = logging.getLogger(__name__)
@@ -41,8 +39,9 @@
INSTANCE_ID_RE = re.compile(r"^i-[0-9a-f]+$")
IP_ADDRESS_RE = re.compile(r"^\d+\.\d+\.\d+\.\d+$")
-IDENTITY_URL = \
+IDENTITY_URL = (
"http://169.254.169.254/latest/dynamic/instance-identity/document"
+)
REMOTE_SCRIPT = """
set -e
@@ -78,49 +77,83 @@ class OpsWorksRegister(BasicCommand):
""").strip()
ARG_TABLE = [
- {'name': 'stack-id', 'required': True,
- 'help_text': """A stack ID. The instance will be registered with the
- given stack."""},
- {'name': 'infrastructure-class', 'required': True,
- 'choices': ['ec2', 'on-premises'],
- 'help_text': """Specifies whether to register an EC2 instance (`ec2`)
- or an on-premises instance (`on-premises`)."""},
- {'name': 'override-hostname', 'dest': 'hostname',
- 'help_text': """The instance hostname. If not provided, the current
- hostname of the machine will be used."""},
- {'name': 'override-private-ip', 'dest': 'private_ip',
- 'help_text': """An IP address. If you set this parameter, the given IP
+ {
+ 'name': 'stack-id',
+ 'required': True,
+ 'help_text': """A stack ID. The instance will be registered with the
+ given stack.""",
+ },
+ {
+ 'name': 'infrastructure-class',
+ 'required': True,
+ 'choices': ['ec2', 'on-premises'],
+ 'help_text': """Specifies whether to register an EC2 instance (`ec2`)
+ or an on-premises instance (`on-premises`).""",
+ },
+ {
+ 'name': 'override-hostname',
+ 'dest': 'hostname',
+ 'help_text': """The instance hostname. If not provided, the current
+ hostname of the machine will be used.""",
+ },
+ {
+ 'name': 'override-private-ip',
+ 'dest': 'private_ip',
+ 'help_text': """An IP address. If you set this parameter, the given IP
address will be used as the private IP address within
OpsWorks. Otherwise the private IP address will be
determined automatically. Not to be used with EC2
- instances."""},
- {'name': 'override-public-ip', 'dest': 'public_ip',
- 'help_text': """An IP address. If you set this parameter, the given IP
+ instances.""",
+ },
+ {
+ 'name': 'override-public-ip',
+ 'dest': 'public_ip',
+ 'help_text': """An IP address. If you set this parameter, the given IP
address will be used as the public IP address within
OpsWorks. Otherwise the public IP address will be
determined automatically. Not to be used with EC2
- instances."""},
- {'name': 'override-ssh', 'dest': 'ssh',
- 'help_text': """If you set this parameter, the given command will be
- used to connect to the machine."""},
- {'name': 'ssh-username', 'dest': 'username',
- 'help_text': """If provided, this username will be used to connect to
- the host."""},
- {'name': 'ssh-private-key', 'dest': 'private_key',
- 'help_text': """If provided, the given private key file will be used
- to connect to the machine."""},
- {'name': 'local', 'action': 'store_true',
- 'help_text': """If given, instead of a remote machine, the local
+ instances.""",
+ },
+ {
+ 'name': 'override-ssh',
+ 'dest': 'ssh',
+ 'help_text': """If you set this parameter, the given command will be
+ used to connect to the machine.""",
+ },
+ {
+ 'name': 'ssh-username',
+ 'dest': 'username',
+ 'help_text': """If provided, this username will be used to connect to
+ the host.""",
+ },
+ {
+ 'name': 'ssh-private-key',
+ 'dest': 'private_key',
+ 'help_text': """If provided, the given private key file will be used
+ to connect to the machine.""",
+ },
+ {
+ 'name': 'local',
+ 'action': 'store_true',
+ 'help_text': """If given, instead of a remote machine, the local
machine will be imported. Cannot be used together
- with `target`."""},
- {'name': 'use-instance-profile', 'action': 'store_true',
- 'help_text': """Use the instance profile instead of creating an IAM
- user."""},
- {'name': 'target', 'positional_arg': True, 'nargs': '?',
- 'synopsis': '[]',
- 'help_text': """Either the EC2 instance ID or the hostname of the
+ with `target`.""",
+ },
+ {
+ 'name': 'use-instance-profile',
+ 'action': 'store_true',
+ 'help_text': """Use the instance profile instead of creating an IAM
+ user.""",
+ },
+ {
+ 'name': 'target',
+ 'positional_arg': True,
+ 'nargs': '?',
+ 'synopsis': '[]',
+ 'help_text': """Either the EC2 instance ID or the hostname of the
instance or machine to be registered with OpsWorks.
- Cannot be used together with `--local`."""},
+ Cannot be used together with `--local`.""",
+ },
]
def __init__(self, session):
@@ -136,7 +169,8 @@ def __init__(self, session):
def _create_clients(self, args, parsed_globals):
self.iam = self._session.create_client('iam')
self.opsworks = create_client_from_parsed_globals(
- self._session, 'opsworks', parsed_globals)
+ self._session, 'opsworks', parsed_globals
+ )
def _run_main(self, args, parsed_globals):
self._create_clients(args, parsed_globals)
@@ -157,36 +191,45 @@ def prevalidate_arguments(self, args):
raise ParamValidationError("One of target or --local is required.")
elif args.target and args.local:
raise ParamValidationError(
- "Arguments target and --local are mutually exclusive.")
+ "Arguments target and --local are mutually exclusive."
+ )
if args.local and platform.system() != 'Linux':
raise ParamValidationError(
- "Non-Linux instances are not supported by AWS OpsWorks.")
+ "Non-Linux instances are not supported by AWS OpsWorks."
+ )
if args.ssh and (args.username or args.private_key):
raise ParamValidationError(
"Argument --override-ssh cannot be used together with "
- "--ssh-username or --ssh-private-key.")
+ "--ssh-username or --ssh-private-key."
+ )
if args.infrastructure_class == 'ec2':
if args.private_ip:
raise ParamValidationError(
- "--override-private-ip is not supported for EC2.")
+ "--override-private-ip is not supported for EC2."
+ )
if args.public_ip:
raise ParamValidationError(
- "--override-public-ip is not supported for EC2.")
+ "--override-public-ip is not supported for EC2."
+ )
- if args.infrastructure_class == 'on-premises' and \
- args.use_instance_profile:
+ if (
+ args.infrastructure_class == 'on-premises'
+ and args.use_instance_profile
+ ):
raise ParamValidationError(
- "--use-instance-profile is only supported for EC2.")
+ "--use-instance-profile is only supported for EC2."
+ )
if args.hostname:
if not HOSTNAME_RE.match(args.hostname):
raise ParamValidationError(
"Invalid hostname: '%s'. Hostnames must consist of "
"letters, digits and dashes only and must not start or "
- "end with a dash." % args.hostname)
+ "end with a dash." % args.hostname
+ )
def retrieve_stack(self, args):
"""
@@ -197,18 +240,20 @@ def retrieve_stack(self, args):
"""
LOG.debug("Retrieving stack and provisioning parameters")
- self._stack = self.opsworks.describe_stacks(
- StackIds=[args.stack_id]
- )['Stacks'][0]
- self._prov_params = \
+ self._stack = self.opsworks.describe_stacks(StackIds=[args.stack_id])[
+ 'Stacks'
+ ][0]
+ self._prov_params = (
self.opsworks.describe_stack_provisioning_parameters(
StackId=self._stack['StackId']
)
+ )
if args.infrastructure_class == 'ec2' and not args.local:
LOG.debug("Retrieving EC2 instance information")
ec2 = self._session.create_client(
- 'ec2', region_name=self._stack['Region'])
+ 'ec2', region_name=self._stack['Region']
+ )
# `desc_args` are arguments for the describe_instances call,
# whereas `conditions` is a list of lambdas for further filtering
@@ -234,9 +279,10 @@ def retrieve_stack(self, args):
# Cannot search for either private or public IP at the same
# time, thus filter afterwards
conditions.append(
- lambda instance:
- instance.get('PrivateIpAddress') == args.target or
- instance.get('PublicIpAddress') == args.target)
+ lambda instance: instance.get('PrivateIpAddress')
+ == args.target
+ or instance.get('PublicIpAddress') == args.target
+ )
# also use the given address to connect
self._use_address = args.target
else:
@@ -255,12 +301,16 @@ def retrieve_stack(self, args):
if not instances:
raise ValueError(
- "Did not find any instance matching %s." % args.target)
+ "Did not find any instance matching %s." % args.target
+ )
elif len(instances) > 1:
raise ValueError(
- "Found multiple instances matching %s: %s." % (
+ "Found multiple instances matching %s: %s."
+ % (
args.target,
- ", ".join(i['InstanceId'] for i in instances)))
+ ", ".join(i['InstanceId'] for i in instances),
+ )
+ )
self._ec2_instance = instances[0]
@@ -273,19 +323,24 @@ def validate_arguments(self, args):
instances = self.opsworks.describe_instances(
StackId=self._stack['StackId']
)['Instances']
- if any(args.hostname.lower() == instance['Hostname']
- for instance in instances):
+ if any(
+ args.hostname.lower() == instance['Hostname']
+ for instance in instances
+ ):
raise ValueError(
"Invalid hostname: '%s'. Hostnames must be unique within "
- "a stack." % args.hostname)
+ "a stack." % args.hostname
+ )
if args.infrastructure_class == 'ec2' and args.local:
# make sure the regions match
region = json.loads(
- ensure_text_type(urlopen(IDENTITY_URL).read()))['region']
+ ensure_text_type(urlopen(IDENTITY_URL).read())
+ )['region']
if region != self._stack['Region']:
raise ValueError(
- "The stack's and the instance's region must match.")
+ "The stack's and the instance's region must match."
+ )
def determine_details(self, args):
"""
@@ -306,12 +361,14 @@ def determine_details(self, args):
elif 'PrivateIpAddress' in self._ec2_instance:
LOG.warning(
"Instance does not have a public IP address. Trying "
- "to use the private address to connect.")
+ "to use the private address to connect."
+ )
self._use_address = self._ec2_instance['PrivateIpAddress']
else:
# Should never happen
raise ValueError(
- "The instance does not seem to have an IP address.")
+ "The instance does not seem to have an IP address."
+ )
elif args.infrastructure_class == 'on-premises':
self._use_address = args.target
@@ -344,7 +401,10 @@ def create_iam_entities(self, args):
self.iam.create_group(GroupName=group_name, Path=IAM_PATH)
LOG.debug("Created IAM group %s", group_name)
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists':
+ if (
+ e.response.get('Error', {}).get('Code')
+ == 'EntityAlreadyExists'
+ ):
LOG.debug("IAM group %s exists, continuing", group_name)
# group already exists, good
pass
@@ -355,17 +415,20 @@ def create_iam_entities(self, args):
LOG.debug("Creating an IAM user")
base_username = "OpsWorks-%s-%s" % (
shorten_name(clean_for_iam(self._stack['Name']), 25),
- shorten_name(clean_for_iam(self._name_for_iam), 25)
+ shorten_name(clean_for_iam(self._name_for_iam), 25),
)
for try_ in range(20):
username = base_username + ("+%s" % try_ if try_ else "")
try:
self.iam.create_user(UserName=username, Path=IAM_PATH)
except ClientError as e:
- if e.response.get('Error', {}).get('Code') == 'EntityAlreadyExists':
+ if (
+ e.response.get('Error', {}).get('Code')
+ == 'EntityAlreadyExists'
+ ):
LOG.debug(
"IAM user %s already exists, trying another name",
- username
+ username,
)
# user already exists, try the next one
pass
@@ -382,8 +445,7 @@ def create_iam_entities(self, args):
try:
self.iam.attach_user_policy(
- PolicyArn=IAM_POLICY_ARN,
- UserName=username
+ PolicyArn=IAM_POLICY_ARN, UserName=username
)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'AccessDenied':
@@ -391,32 +453,29 @@ def create_iam_entities(self, args):
"Unauthorized to attach policy %s to user %s. Trying "
"to put user policy",
IAM_POLICY_ARN,
- username
+ username,
)
self.iam.put_user_policy(
PolicyName=IAM_USER_POLICY_NAME,
PolicyDocument=self._iam_policy_document(
- self._stack['Arn'], IAM_USER_POLICY_TIMEOUT),
- UserName=username
+ self._stack['Arn'], IAM_USER_POLICY_TIMEOUT
+ ),
+ UserName=username,
)
LOG.debug(
- "Put policy %s to user %s",
- IAM_USER_POLICY_NAME,
- username
+ "Put policy %s to user %s", IAM_USER_POLICY_NAME, username
)
else:
raise
else:
LOG.debug(
- "Attached policy %s to user %s",
- IAM_POLICY_ARN,
- username
+ "Attached policy %s to user %s", IAM_POLICY_ARN, username
)
LOG.debug("Creating an access key")
- self.access_key = self.iam.create_access_key(
- UserName=username
- )['AccessKey']
+ self.access_key = self.iam.create_access_key(UserName=username)[
+ 'AccessKey'
+ ]
def setup_target_machine(self, args):
"""
@@ -425,12 +484,11 @@ def setup_target_machine(self, args):
"""
remote_script = REMOTE_SCRIPT % {
- 'agent_installer_url':
- self._prov_params['AgentInstallerUrl'],
- 'preconfig':
- self._to_ruby_yaml(self._pre_config_document(args)),
- 'assets_download_bucket':
- self._prov_params['Parameters']['assets_download_bucket']
+ 'agent_installer_url': self._prov_params['AgentInstallerUrl'],
+ 'preconfig': self._to_ruby_yaml(self._pre_config_document(args)),
+ 'assets_download_bucket': self._prov_params['Parameters'][
+ 'assets_download_bucket'
+ ],
}
if args.local:
@@ -482,13 +540,13 @@ def ssh(self, args, remote_script):
def _pre_config_document(self, args):
parameters = dict(
- stack_id=self._stack['StackId'],
- **self._prov_params["Parameters"]
+ stack_id=self._stack['StackId'], **self._prov_params["Parameters"]
)
if self.access_key:
parameters['access_key_id'] = self.access_key['AccessKeyId']
- parameters['secret_access_key'] = \
- self.access_key['SecretAccessKey']
+ parameters['secret_access_key'] = self.access_key[
+ 'SecretAccessKey'
+ ]
if self._use_hostname:
parameters['hostname'] = self._use_hostname
if args.private_ip:
@@ -510,20 +568,20 @@ def _iam_policy_document(arn, timeout=None):
valid_until = datetime.datetime.utcnow() + timeout
statement["Condition"] = {
"DateLessThan": {
- "aws:CurrentTime":
- valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
+ "aws:CurrentTime": valid_until.strftime(
+ "%Y-%m-%dT%H:%M:%SZ"
+ )
}
}
- policy_document = {
- "Statement": [statement],
- "Version": "2012-10-17"
- }
+ policy_document = {"Statement": [statement], "Version": "2012-10-17"}
return json.dumps(policy_document)
@staticmethod
def _to_ruby_yaml(parameters):
- return "\n".join(":%s: %s" % (k, json.dumps(v))
- for k, v in sorted(parameters.items()))
+ return "\n".join(
+ ":%s: %s" % (k, json.dumps(v))
+ for k, v in sorted(parameters.items())
+ )
def clean_for_iam(name):
@@ -542,4 +600,4 @@ def shorten_name(name, max_length):
if len(name) <= max_length:
return name
q, r = divmod(max_length - 3, 2)
- return name[:q + r] + "..." + name[-q:]
+ return name[: q + r] + "..." + name[-q:]
diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py
index 77b0f3362ec2..074a59875beb 100644
--- a/awscli/customizations/paginate.py
+++ b/awscli/customizations/paginate.py
@@ -23,17 +23,16 @@
* Add a ``--starting-token`` and a ``--max-items`` argument.
"""
+
import logging
import sys
from functools import partial
-from botocore import xform_name
-from botocore.exceptions import DataNotFoundError
-from botocore import model
-
from awscli.arguments import BaseCLIArgument
from awscli.customizations.exceptions import ParamValidationError
from awscli.customizations.utils import uni_print
+from botocore import model, xform_name
+from botocore.exceptions import DataNotFoundError
logger = logging.getLogger(__name__)
@@ -87,7 +86,8 @@ def get_paginator_config(session, service_name, operation_name):
return None
try:
operation_paginator_config = paginator_model.get_paginator(
- operation_name)
+ operation_name
+ )
except ValueError:
return None
return operation_paginator_config
@@ -100,15 +100,19 @@ def add_paging_description(help_command, **kwargs):
return
service_name = help_command.obj.service_model.service_name
paginator_config = get_paginator_config(
- help_command.session, service_name, help_command.obj.name)
+ help_command.session, service_name, help_command.obj.name
+ )
if not paginator_config:
return
help_command.doc.style.new_paragraph()
help_command.doc.writeln(
- ('``%s`` is a paginated operation. Multiple API calls may be issued '
- 'in order to retrieve the entire data set of results. You can '
- 'disable pagination by providing the ``--no-paginate`` argument.')
- % help_command.name)
+ (
+ '``%s`` is a paginated operation. Multiple API calls may be issued '
+ 'in order to retrieve the entire data set of results. You can '
+ 'disable pagination by providing the ``--no-paginate`` argument.'
+ )
+ % help_command.name
+ )
# Only include result key information if it is present.
if paginator_config.get('result_key'):
queries = paginator_config['result_key']
@@ -116,33 +120,48 @@ def add_paging_description(help_command, **kwargs):
queries = [queries]
queries = ", ".join([('``%s``' % s) for s in queries])
help_command.doc.writeln(
- ('When using ``--output text`` and the ``--query`` argument on a '
- 'paginated response, the ``--query`` argument must extract data '
- 'from the results of the following query expressions: %s')
- % queries)
+ (
+ 'When using ``--output text`` and the ``--query`` argument on a '
+ 'paginated response, the ``--query`` argument must extract data '
+ 'from the results of the following query expressions: %s'
+ )
+ % queries
+ )
-def unify_paging_params(argument_table, operation_model, event_name,
- session, **kwargs):
+def unify_paging_params(
+ argument_table, operation_model, event_name, session, **kwargs
+):
paginator_config = get_paginator_config(
- session, operation_model.service_model.service_name,
- operation_model.name)
+ session,
+ operation_model.service_model.service_name,
+ operation_model.name,
+ )
if paginator_config is None:
# We only apply these customizations to paginated responses.
return
- logger.debug("Modifying paging parameters for operation: %s",
- operation_model.name)
+ logger.debug(
+ "Modifying paging parameters for operation: %s", operation_model.name
+ )
_remove_existing_paging_arguments(argument_table, paginator_config)
- parsed_args_event = event_name.replace('building-argument-table.',
- 'operation-args-parsed.')
- call_parameters_event = event_name.replace('building-argument-table',
- 'calling-command')
+ parsed_args_event = event_name.replace(
+ 'building-argument-table.', 'operation-args-parsed.'
+ )
+ call_parameters_event = event_name.replace(
+ 'building-argument-table', 'calling-command'
+ )
shadowed_args = {}
- add_paging_argument(argument_table, 'starting-token',
- PageArgument('starting-token', STARTING_TOKEN_HELP,
- parse_type='string',
- serialized_name='StartingToken'),
- shadowed_args)
+ add_paging_argument(
+ argument_table,
+ 'starting-token',
+ PageArgument(
+ 'starting-token',
+ STARTING_TOKEN_HELP,
+ parse_type='string',
+ serialized_name='StartingToken',
+ ),
+ shadowed_args,
+ )
input_members = operation_model.input_shape.members
type_name = 'integer'
if 'limit_key' in paginator_config:
@@ -150,21 +169,38 @@ def unify_paging_params(argument_table, operation_model, event_name,
type_name = limit_key_shape.type_name
if type_name not in PageArgument.type_map:
raise TypeError(
- ('Unsupported pagination type {0} for operation {1}'
- ' and parameter {2}').format(
- type_name, operation_model.name,
- paginator_config['limit_key']))
- add_paging_argument(argument_table, 'page-size',
- PageArgument('page-size', PAGE_SIZE_HELP,
- parse_type=type_name,
- serialized_name='PageSize'),
- shadowed_args)
-
- add_paging_argument(argument_table, 'max-items',
- PageArgument('max-items', MAX_ITEMS_HELP,
- parse_type=type_name,
- serialized_name='MaxItems'),
- shadowed_args)
+ (
+ 'Unsupported pagination type {0} for operation {1}'
+ ' and parameter {2}'
+ ).format(
+ type_name,
+ operation_model.name,
+ paginator_config['limit_key'],
+ )
+ )
+ add_paging_argument(
+ argument_table,
+ 'page-size',
+ PageArgument(
+ 'page-size',
+ PAGE_SIZE_HELP,
+ parse_type=type_name,
+ serialized_name='PageSize',
+ ),
+ shadowed_args,
+ )
+
+ add_paging_argument(
+ argument_table,
+ 'max-items',
+ PageArgument(
+ 'max-items',
+ MAX_ITEMS_HELP,
+ parse_type=type_name,
+ serialized_name='MaxItems',
+ ),
+ shadowed_args,
+ )
# We will register two pagination handlers.
#
# The first is focused on analyzing the CLI arguments passed to see
@@ -179,13 +215,20 @@ def unify_paging_params(argument_table, operation_model, event_name,
# directly and this bypasses all of the CLI args processing.
session.register(
parsed_args_event,
- partial(check_should_enable_pagination,
- list(_get_all_cli_input_tokens(paginator_config)),
- shadowed_args, argument_table))
+ partial(
+ check_should_enable_pagination,
+ list(_get_all_cli_input_tokens(paginator_config)),
+ shadowed_args,
+ argument_table,
+ ),
+ )
session.register(
call_parameters_event,
- partial(check_should_enable_pagination_call_parameters,
- list(_get_all_input_tokens(paginator_config))))
+ partial(
+ check_should_enable_pagination_call_parameters,
+ list(_get_all_input_tokens(paginator_config)),
+ ),
+ )
def add_paging_argument(argument_table, arg_name, argument, shadowed_args):
@@ -199,17 +242,27 @@ def add_paging_argument(argument_table, arg_name, argument, shadowed_args):
argument_table[arg_name] = argument
-def check_should_enable_pagination(input_tokens, shadowed_args, argument_table,
- parsed_args, parsed_globals, **kwargs):
+def check_should_enable_pagination(
+ input_tokens,
+ shadowed_args,
+ argument_table,
+ parsed_args,
+ parsed_globals,
+ **kwargs,
+):
normalized_paging_args = ['start_token', 'max_items']
for token in input_tokens:
py_name = token.replace('-', '_')
- if getattr(parsed_args, py_name) is not None and \
- py_name not in normalized_paging_args:
+ if (
+ getattr(parsed_args, py_name) is not None
+ and py_name not in normalized_paging_args
+ ):
# The user has specified a manual (undocumented) pagination arg.
# We need to automatically turn pagination off.
- logger.debug("User has specified a manual pagination arg. "
- "Automatically setting --no-paginate.")
+ logger.debug(
+ "User has specified a manual pagination arg. "
+ "Automatically setting --no-paginate."
+ )
parsed_globals.paginate = False
if not parsed_globals.paginate:
@@ -229,12 +282,16 @@ def check_should_enable_pagination(input_tokens, shadowed_args, argument_table,
def ensure_paging_params_not_set(parsed_args, shadowed_args):
paging_params = ['starting_token', 'page_size', 'max_items']
shadowed_params = [p.replace('-', '_') for p in shadowed_args.keys()]
- params_used = [p for p in paging_params if
- p not in shadowed_params and getattr(parsed_args, p, None)]
+ params_used = [
+ p
+ for p in paging_params
+ if p not in shadowed_params and getattr(parsed_args, p, None)
+ ]
if len(params_used) > 0:
converted_params = ', '.join(
- ["--" + p.replace('_', '-') for p in params_used])
+ ["--" + p.replace('_', '-') for p in params_used]
+ )
raise ParamValidationError(
"Cannot specify --no-paginate along with pagination "
"arguments: %s" % converted_params
@@ -291,11 +348,14 @@ def _get_cli_name(param_objects, token_name):
# and would be missed by the processing above. This function gets
# called on the calling-command event.
def check_should_enable_pagination_call_parameters(
- input_tokens, call_parameters, parsed_args, parsed_globals, **kwargs):
+ input_tokens, call_parameters, parsed_args, parsed_globals, **kwargs
+):
for param in call_parameters:
if param in input_tokens:
- logger.debug("User has specified a manual pagination arg. "
- "Automatically setting --no-paginate.")
+ logger.debug(
+ "User has specified a manual pagination arg. "
+ "Automatically setting --no-paginate."
+ )
parsed_globals.paginate = False
@@ -317,7 +377,8 @@ def __init__(self, name, documentation, parse_type, serialized_name):
def _emit_non_positive_max_items_warning(self):
uni_print(
"warning: Non-positive values for --max-items may result in undefined behavior.\n",
- sys.stderr)
+ sys.stderr,
+ )
@property
def cli_name(self):
@@ -340,8 +401,11 @@ def documentation(self):
return self._documentation
def add_to_parser(self, parser):
- parser.add_argument(self.cli_name, dest=self.py_name,
- type=self.type_map[self._parse_type])
+ parser.add_argument(
+ self.cli_name,
+ dest=self.py_name,
+ type=self.type_map[self._parse_type],
+ )
def add_to_params(self, parameters, value):
if value is not None:
diff --git a/awscli/customizations/putmetricdata.py b/awscli/customizations/putmetricdata.py
index 10ef322b2323..da63967bdd8b 100644
--- a/awscli/customizations/putmetricdata.py
+++ b/awscli/customizations/putmetricdata.py
@@ -23,21 +23,32 @@
* --storage-resolution
"""
+
import decimal
from awscli.arguments import CustomArgument
-from awscli.utils import split_on_commas
from awscli.customizations.utils import validate_mutually_exclusive_handler
+from awscli.utils import split_on_commas
def register_put_metric_data(event_handler):
event_handler.register(
- 'building-argument-table.cloudwatch.put-metric-data', _promote_args)
+ 'building-argument-table.cloudwatch.put-metric-data', _promote_args
+ )
event_handler.register(
'operation-args-parsed.cloudwatch.put-metric-data',
validate_mutually_exclusive_handler(
- ['metric_data'], ['metric_name', 'timestamp', 'unit', 'value',
- 'dimensions', 'statistic_values']))
+ ['metric_data'],
+ [
+ 'metric_name',
+ 'timestamp',
+ 'unit',
+ 'value',
+ 'dimensions',
+ 'statistic_values',
+ ],
+ ),
+ )
def _promote_args(argument_table, operation_model, **kwargs):
@@ -48,25 +59,32 @@ def _promote_args(argument_table, operation_model, **kwargs):
argument_table['metric-data'].required = False
argument_table['metric-name'] = PutMetricArgument(
- 'metric-name', help_text='The name of the metric.')
+ 'metric-name', help_text='The name of the metric.'
+ )
argument_table['timestamp'] = PutMetricArgument(
- 'timestamp', help_text='The time stamp used for the metric. '
- 'If not specified, the default value is '
- 'set to the time the metric data was '
- 'received.')
+ 'timestamp',
+ help_text='The time stamp used for the metric. '
+ 'If not specified, the default value is '
+ 'set to the time the metric data was '
+ 'received.',
+ )
argument_table['unit'] = PutMetricArgument(
- 'unit', help_text='The unit of metric.')
+ 'unit', help_text='The unit of metric.'
+ )
argument_table['value'] = PutMetricArgument(
- 'value', help_text='The value for the metric. Although the --value '
- 'parameter accepts numbers of type Double, '
- 'Amazon CloudWatch truncates values with very '
- 'large exponents. Values with base-10 exponents '
- 'greater than 126 (1 x 10^126) are truncated. '
- 'Likewise, values with base-10 exponents less '
- 'than -130 (1 x 10^-130) are also truncated.')
+ 'value',
+ help_text='The value for the metric. Although the --value '
+ 'parameter accepts numbers of type Double, '
+ 'Amazon CloudWatch truncates values with very '
+ 'large exponents. Values with base-10 exponents '
+ 'greater than 126 (1 x 10^126) are truncated. '
+ 'Likewise, values with base-10 exponents less '
+ 'than -130 (1 x 10^-130) are also truncated.',
+ )
argument_table['dimensions'] = PutMetricArgument(
- 'dimensions', help_text=(
+ 'dimensions',
+ help_text=(
'The --dimensions argument further expands '
'on the identity of a metric using a Name=Value '
'pair, separated by commas, for example: '
@@ -76,11 +94,12 @@ def _promote_args(argument_table, operation_model, **kwargs):
'where for the same example you would use the format '
'--dimensions Name=InstanceID,Value=i-aaba32d4 '
'Name=InstanceType,value=m1.small
.'
- )
+ ),
)
argument_table['statistic-values'] = PutMetricArgument(
- 'statistic-values', help_text='A set of statistical values describing '
- 'the metric.')
+ 'statistic-values',
+ help_text='A set of statistical values describing ' 'the metric.',
+ )
metric_data = operation_model.input_shape.members['MetricData'].member
storage_resolution = metric_data.members['StorageResolution']
@@ -103,7 +122,9 @@ def _add_to_params(self, parameters, value):
parameters[name] = [{}]
first_element = parameters[name][0]
return func(self, first_element, value)
+
return _add_to_params
+
return _wrap_add_to_params
diff --git a/awscli/customizations/quicksight.py b/awscli/customizations/quicksight.py
index 3cc048452573..6750e0b5b0f2 100644
--- a/awscli/customizations/quicksight.py
+++ b/awscli/customizations/quicksight.py
@@ -16,11 +16,13 @@
_ASSET_BUNDLE_FILE_DOCSTRING = (
'The content of the asset bundle to be uploaded. '
'To specify the content of a local file use the '
- 'fileb:// prefix. Example: fileb://asset-bundle.zip
')
+ 'fileb:// prefix. Example: fileb://asset-bundle.zip'
+)
_ASSET_BUNDLE_DOCSTRING_ADDENDUM = (
'To specify a local file use '
- '--asset-bundle-import-source-bytes
instead.
')
+ '--asset-bundle-import-source-bytes
instead.'
+)
def register_quicksight_asset_bundle_customizations(cli):
@@ -31,4 +33,6 @@ def register_quicksight_asset_bundle_customizations(cli):
source_arg_blob_member='Body',
new_arg='asset-bundle-import-source-bytes',
new_arg_doc_string=_ASSET_BUNDLE_FILE_DOCSTRING,
- doc_string_addendum=_ASSET_BUNDLE_DOCSTRING_ADDENDUM))
+ doc_string_addendum=_ASSET_BUNDLE_DOCSTRING_ADDENDUM,
+ ),
+ )
diff --git a/awscli/customizations/rds.py b/awscli/customizations/rds.py
index cac3173f3f76..48fd7c3b042f 100644
--- a/awscli/customizations/rds.py
+++ b/awscli/customizations/rds.py
@@ -24,8 +24,7 @@
"""
-from awscli.clidriver import ServiceOperation
-from awscli.clidriver import CLIOperationCaller
+from awscli.clidriver import CLIOperationCaller, ServiceOperation
from awscli.customizations import utils
from awscli.customizations.commands import BasicCommand
from awscli.customizations.utils import uni_print
@@ -33,10 +32,14 @@
def register_rds_modify_split(cli):
cli.register('building-command-table.rds', _building_command_table)
- cli.register('building-argument-table.rds.add-option-to-option-group',
- _rename_add_option)
- cli.register('building-argument-table.rds.remove-option-from-option-group',
- _rename_remove_option)
+ cli.register(
+ 'building-argument-table.rds.add-option-to-option-group',
+ _rename_add_option,
+ )
+ cli.register(
+ 'building-argument-table.rds.remove-option-from-option-group',
+ _rename_remove_option,
+ )
def register_add_generate_db_auth_token(cli):
@@ -49,14 +52,16 @@ def _add_generate_db_auth_token(command_table, session, **kwargs):
def _rename_add_option(argument_table, **kwargs):
- utils.rename_argument(argument_table, 'options-to-include',
- new_name='options')
+ utils.rename_argument(
+ argument_table, 'options-to-include', new_name='options'
+ )
del argument_table['options-to-remove']
def _rename_remove_option(argument_table, **kwargs):
- utils.rename_argument(argument_table, 'options-to-remove',
- new_name='options')
+ utils.rename_argument(
+ argument_table, 'options-to-remove', new_name='options'
+ )
del argument_table['options-to-include']
@@ -69,15 +74,19 @@ def _building_command_table(command_table, session, **kwargs):
rds_model = session.get_service_model('rds')
modify_operation_model = rds_model.operation_model('ModifyOptionGroup')
command_table['add-option-to-option-group'] = ServiceOperation(
- parent_name='rds', name='add-option-to-option-group',
+ parent_name='rds',
+ name='add-option-to-option-group',
operation_caller=CLIOperationCaller(session),
session=session,
- operation_model=modify_operation_model)
+ operation_model=modify_operation_model,
+ )
command_table['remove-option-from-option-group'] = ServiceOperation(
- parent_name='rds', name='remove-option-from-option-group',
+ parent_name='rds',
+ name='remove-option-from-option-group',
session=session,
operation_model=modify_operation_model,
- operation_caller=CLIOperationCaller(session))
+ operation_caller=CLIOperationCaller(session),
+ )
class GenerateDBAuthTokenCommand(BasicCommand):
@@ -86,23 +95,35 @@ class GenerateDBAuthTokenCommand(BasicCommand):
'Generates an auth token used to connect to a db with IAM credentials.'
)
ARG_TABLE = [
- {'name': 'hostname', 'required': True,
- 'help_text': 'The hostname of the database to connect to.'},
- {'name': 'port', 'cli_type_name': 'integer', 'required': True,
- 'help_text': 'The port number the database is listening on.'},
- {'name': 'username', 'required': True,
- 'help_text': 'The username to log in as.'}
+ {
+ 'name': 'hostname',
+ 'required': True,
+ 'help_text': 'The hostname of the database to connect to.',
+ },
+ {
+ 'name': 'port',
+ 'cli_type_name': 'integer',
+ 'required': True,
+ 'help_text': 'The port number the database is listening on.',
+ },
+ {
+ 'name': 'username',
+ 'required': True,
+ 'help_text': 'The username to log in as.',
+ },
]
def _run_main(self, parsed_args, parsed_globals):
rds = self._session.create_client(
- 'rds', parsed_globals.region, parsed_globals.endpoint_url,
- parsed_globals.verify_ssl
+ 'rds',
+ parsed_globals.region,
+ parsed_globals.endpoint_url,
+ parsed_globals.verify_ssl,
)
token = rds.generate_db_auth_token(
DBHostname=parsed_args.hostname,
Port=parsed_args.port,
- DBUsername=parsed_args.username
+ DBUsername=parsed_args.username,
)
uni_print(token)
uni_print('\n')
diff --git a/awscli/customizations/rekognition.py b/awscli/customizations/rekognition.py
index ba03ef1d7e9f..267376015b36 100644
--- a/awscli/customizations/rekognition.py
+++ b/awscli/customizations/rekognition.py
@@ -13,12 +13,15 @@
from awscli.customizations.arguments import NestedBlobArgumentHoister
-IMAGE_FILE_DOCSTRING = ('The content of the image to be uploaded. '
- 'To specify the content of a local file use the '
- 'fileb:// prefix. '
- 'Example: fileb://image.png
')
-IMAGE_DOCSTRING_ADDENDUM = ('To specify a local file use --%s
'
- 'instead.
')
+IMAGE_FILE_DOCSTRING = (
+ 'The content of the image to be uploaded. '
+ 'To specify the content of a local file use the '
+ 'fileb:// prefix. '
+ 'Example: fileb://image.png
'
+)
+IMAGE_DOCSTRING_ADDENDUM = (
+ 'To specify a local file use --%s
' 'instead.
'
+)
FILE_PARAMETER_UPDATES = {
@@ -32,10 +35,13 @@ def register_rekognition_detect_labels(cli):
for target, new_param in FILE_PARAMETER_UPDATES.items():
operation, old_param = target.rsplit('.', 1)
doc_string_addendum = IMAGE_DOCSTRING_ADDENDUM % new_param
- cli.register('building-argument-table.rekognition.%s' % operation,
- NestedBlobArgumentHoister(
- source_arg=old_param,
- source_arg_blob_member='Bytes',
- new_arg=new_param,
- new_arg_doc_string=IMAGE_FILE_DOCSTRING,
- doc_string_addendum=doc_string_addendum))
+ cli.register(
+ 'building-argument-table.rekognition.%s' % operation,
+ NestedBlobArgumentHoister(
+ source_arg=old_param,
+ source_arg_blob_member='Bytes',
+ new_arg=new_param,
+ new_arg_doc_string=IMAGE_FILE_DOCSTRING,
+ doc_string_addendum=doc_string_addendum,
+ ),
+ )
diff --git a/awscli/customizations/removals.py b/awscli/customizations/removals.py
index 5add46dc4f81..a7d99862f42e 100644
--- a/awscli/customizations/removals.py
+++ b/awscli/customizations/removals.py
@@ -18,6 +18,7 @@
yet fully supported.
"""
+
import logging
from functools import partial
@@ -26,43 +27,73 @@
def register_removals(event_handler):
cmd_remover = CommandRemover(event_handler)
- cmd_remover.remove(on_event='building-command-table.ses',
- remove_commands=['delete-verified-email-address',
- 'list-verified-email-addresses',
- 'verify-email-address'])
- cmd_remover.remove(on_event='building-command-table.ec2',
- remove_commands=['import-instance', 'import-volume'])
- cmd_remover.remove(on_event='building-command-table.emr',
- remove_commands=['run-job-flow', 'describe-job-flows',
- 'add-job-flow-steps',
- 'terminate-job-flows',
- 'list-bootstrap-actions',
- 'list-instance-groups',
- 'set-termination-protection',
- 'set-keep-job-flow-alive-when-no-steps',
- 'set-visible-to-all-users',
- 'set-unhealthy-node-replacement'])
- cmd_remover.remove(on_event='building-command-table.kinesis',
- remove_commands=['subscribe-to-shard'])
- cmd_remover.remove(on_event='building-command-table.lexv2-runtime',
- remove_commands=['start-conversation'])
- cmd_remover.remove(on_event='building-command-table.lambda',
- remove_commands=['invoke-with-response-stream'])
- cmd_remover.remove(on_event='building-command-table.sagemaker-runtime',
- remove_commands=['invoke-endpoint-with-response-stream'])
- cmd_remover.remove(on_event='building-command-table.bedrock-runtime',
- remove_commands=['invoke-model-with-response-stream',
- 'converse-stream'])
- cmd_remover.remove(on_event='building-command-table.bedrock-agent-runtime',
- remove_commands=['invoke-agent',
- 'invoke-flow',
- 'invoke-inline-agent',
- 'optimize-prompt',
- 'retrieve-and-generate-stream'])
- cmd_remover.remove(on_event='building-command-table.qbusiness',
- remove_commands=['chat'])
- cmd_remover.remove(on_event='building-command-table.iotsitewise',
- remove_commands=['invoke-assistant'])
+ cmd_remover.remove(
+ on_event='building-command-table.ses',
+ remove_commands=[
+ 'delete-verified-email-address',
+ 'list-verified-email-addresses',
+ 'verify-email-address',
+ ],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.ec2',
+ remove_commands=['import-instance', 'import-volume'],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.emr',
+ remove_commands=[
+ 'run-job-flow',
+ 'describe-job-flows',
+ 'add-job-flow-steps',
+ 'terminate-job-flows',
+ 'list-bootstrap-actions',
+ 'list-instance-groups',
+ 'set-termination-protection',
+ 'set-keep-job-flow-alive-when-no-steps',
+ 'set-visible-to-all-users',
+ 'set-unhealthy-node-replacement',
+ ],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.kinesis',
+ remove_commands=['subscribe-to-shard'],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.lexv2-runtime',
+ remove_commands=['start-conversation'],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.lambda',
+ remove_commands=['invoke-with-response-stream'],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.sagemaker-runtime',
+ remove_commands=['invoke-endpoint-with-response-stream'],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.bedrock-runtime',
+ remove_commands=[
+ 'invoke-model-with-response-stream',
+ 'converse-stream',
+ ],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.bedrock-agent-runtime',
+ remove_commands=[
+ 'invoke-agent',
+ 'invoke-flow',
+ 'invoke-inline-agent',
+ 'optimize-prompt',
+ 'retrieve-and-generate-stream',
+ ],
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.qbusiness', remove_commands=['chat']
+ )
+ cmd_remover.remove(
+ on_event='building-command-table.iotsitewise',
+ remove_commands=['invoke-assistant'],
+ )
class CommandRemover(object):
@@ -70,8 +101,7 @@ def __init__(self, events):
self._events = events
def remove(self, on_event, remove_commands):
- self._events.register(on_event,
- self._create_remover(remove_commands))
+ self._events.register(on_event, self._create_remover(remove_commands))
def _create_remover(self, commands_to_remove):
return partial(_remove_commands, commands_to_remove=commands_to_remove)
@@ -84,5 +114,6 @@ def _remove_commands(command_table, commands_to_remove, **kwargs):
LOG.debug("Removing operation: %s", command)
del command_table[command]
except KeyError:
- LOG.warning("Attempting to delete command that does not exist: %s",
- command)
+ LOG.warning(
+ "Attempting to delete command that does not exist: %s", command
+ )
diff --git a/awscli/customizations/route53.py b/awscli/customizations/route53.py
index 686abc40c914..f482ff605827 100644
--- a/awscli/customizations/route53.py
+++ b/awscli/customizations/route53.py
@@ -18,7 +18,8 @@ def register_create_hosted_zone_doc_fix(cli):
# has the necessary documentation.
cli.register(
'doc-option.route53.create-hosted-zone.hosted-zone-config',
- add_private_zone_note)
+ add_private_zone_note,
+ )
def add_private_zone_note(help_command, **kwargs):
diff --git a/awscli/customizations/s3/comparator.py b/awscli/customizations/s3/comparator.py
index efe49c63a120..06ab58c76688 100644
--- a/awscli/customizations/s3/comparator.py
+++ b/awscli/customizations/s3/comparator.py
@@ -11,8 +11,8 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
-from awscli.compat import advance_iterator
+from awscli.compat import advance_iterator
LOG = logging.getLogger(__name__)
@@ -21,10 +21,13 @@ class Comparator(object):
"""
This class performs all of the comparisons behind the sync operation
"""
- def __init__(self, file_at_src_and_dest_sync_strategy,
- file_not_at_dest_sync_strategy,
- file_not_at_src_sync_strategy):
+ def __init__(
+ self,
+ file_at_src_and_dest_sync_strategy,
+ file_not_at_dest_sync_strategy,
+ file_not_at_src_sync_strategy,
+ ):
self._sync_strategy = file_at_src_and_dest_sync_strategy
self._not_at_dest_sync_strategy = file_not_at_dest_sync_strategy
self._not_at_src_sync_strategy = file_not_at_src_sync_strategy
@@ -102,26 +105,42 @@ def call(self, src_files, dest_files):
elif compare_keys == 'less_than':
src_take = True
dest_take = False
- should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)
+ should_sync = (
+ self._not_at_dest_sync_strategy.determine_should_sync(
+ src_file, None
+ )
+ )
if should_sync:
yield src_file
elif compare_keys == 'greater_than':
src_take = False
dest_take = True
- should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)
+ should_sync = (
+ self._not_at_src_sync_strategy.determine_should_sync(
+ None, dest_file
+ )
+ )
if should_sync:
yield dest_file
elif (not src_done) and dest_done:
src_take = True
- should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)
+ should_sync = (
+ self._not_at_dest_sync_strategy.determine_should_sync(
+ src_file, None
+ )
+ )
if should_sync:
yield src_file
elif src_done and (not dest_done):
dest_take = True
- should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)
+ should_sync = (
+ self._not_at_src_sync_strategy.determine_should_sync(
+ None, dest_file
+ )
+ )
if should_sync:
yield dest_file
else:
@@ -135,10 +154,10 @@ def compare_comp_key(self, src_file, dest_file):
src_comp_key = src_file.compare_key
dest_comp_key = dest_file.compare_key
- if (src_comp_key == dest_comp_key):
+ if src_comp_key == dest_comp_key:
return 'equal'
- elif (src_comp_key < dest_comp_key):
+ elif src_comp_key < dest_comp_key:
return 'less_than'
else:
diff --git a/awscli/customizations/s3/factory.py b/awscli/customizations/s3/factory.py
index 1692fd1cef86..afc98abe0f48 100644
--- a/awscli/customizations/s3/factory.py
+++ b/awscli/customizations/s3/factory.py
@@ -13,20 +13,22 @@
import logging
import awscrt.s3
+
+from awscli.compat import urlparse
+from awscli.customizations.s3 import constants
+from awscli.customizations.s3.transferconfig import (
+ create_transfer_config_from_runtime_config,
+)
from botocore.client import Config
from botocore.httpsession import DEFAULT_CA_BUNDLE
-from s3transfer.manager import TransferManager
from s3transfer.crt import (
- acquire_crt_s3_process_lock, create_s3_crt_client,
- BotocoreCRTRequestSerializer, CRTTransferManager,
- BotocoreCRTCredentialsWrapper
+ BotocoreCRTCredentialsWrapper,
+ BotocoreCRTRequestSerializer,
+ CRTTransferManager,
+ acquire_crt_s3_process_lock,
+ create_s3_crt_client,
)
-
-from awscli.compat import urlparse
-from awscli.customizations.s3 import constants
-from awscli.customizations.s3.transferconfig import \
- create_transfer_config_from_runtime_config
-
+from s3transfer.manager import TransferManager
LOGGER = logging.getLogger(__name__)
@@ -36,9 +38,7 @@ def __init__(self, session):
self._session = session
def create_client(self, params, is_source_client=False):
- create_client_kwargs = {
- 'verify': params['verify_ssl']
- }
+ create_client_kwargs = {'verify': params['verify_ssl']}
if params.get('sse') == 'aws:kms':
create_client_kwargs['config'] = Config(signature_version='s3v4')
region = params['region']
@@ -61,22 +61,24 @@ def __init__(self, session):
self._session = session
self._botocore_client_factory = ClientFactory(self._session)
- def create_transfer_manager(self, params, runtime_config,
- botocore_client=None):
+ def create_transfer_manager(
+ self, params, runtime_config, botocore_client=None
+ ):
client_type = self._compute_transfer_client_type(
- params, runtime_config)
+ params, runtime_config
+ )
if client_type == constants.CRT_TRANSFER_CLIENT:
return self._create_crt_transfer_manager(params, runtime_config)
else:
return self._create_classic_transfer_manager(
- params, runtime_config, botocore_client)
+ params, runtime_config, botocore_client
+ )
def _compute_transfer_client_type(self, params, runtime_config):
if params.get('paths_type') == 's3s3':
return constants.CLASSIC_TRANSFER_CLIENT
preferred_transfer_client = runtime_config.get(
- 'preferred_transfer_client',
- constants.AUTO_RESOLVE_TRANSFER_CLIENT
+ 'preferred_transfer_client', constants.AUTO_RESOLVE_TRANSFER_CLIENT
)
if preferred_transfer_client == constants.AUTO_RESOLVE_TRANSFER_CLIENT:
return self._resolve_transfer_client_type_for_system()
@@ -92,7 +94,7 @@ def _resolve_transfer_client_type_for_system(self):
is_running = self._is_crt_client_running_in_other_aws_cli_process()
LOGGER.debug(
'S3 CRT client running in different AWS CLI process: %s',
- is_running
+ is_running,
)
if not is_running:
transfer_client_type = constants.CRT_TRANSFER_CLIENT
@@ -114,7 +116,7 @@ def _create_crt_transfer_manager(self, params, runtime_config):
self._acquire_crt_s3_process_lock()
return CRTTransferManager(
self._create_crt_client(params, runtime_config),
- self._create_crt_request_serializer(params)
+ self._create_crt_request_serializer(params),
)
def _create_crt_client(self, params, runtime_config):
@@ -133,8 +135,9 @@ def _create_crt_client(self, params, runtime_config):
create_crt_client_kwargs['part_size'] = multipart_chunksize
if params.get('sign_request', True):
crt_credentials_provider = self._get_crt_credentials_provider()
- create_crt_client_kwargs[
- 'crt_credentials_provider'] = crt_credentials_provider
+ create_crt_client_kwargs['crt_credentials_provider'] = (
+ crt_credentials_provider
+ )
return create_s3_crt_client(**create_crt_client_kwargs)
@@ -144,23 +147,27 @@ def _create_crt_request_serializer(self, params):
{
'region_name': self._resolve_region(params),
'endpoint_url': params.get('endpoint_url'),
- }
+ },
)
- def _create_classic_transfer_manager(self, params, runtime_config,
- client=None):
+ def _create_classic_transfer_manager(
+ self, params, runtime_config, client=None
+ ):
if client is None:
client = self._botocore_client_factory.create_client(params)
transfer_config = create_transfer_config_from_runtime_config(
- runtime_config)
- transfer_config.max_in_memory_upload_chunks = \
+ runtime_config
+ )
+ transfer_config.max_in_memory_upload_chunks = (
self._MAX_IN_MEMORY_CHUNKS
- transfer_config.max_in_memory_download_chunks = \
+ )
+ transfer_config.max_in_memory_download_chunks = (
self._MAX_IN_MEMORY_CHUNKS
+ )
LOGGER.debug(
"Using a multipart threshold of %s and a part size of %s",
transfer_config.multipart_threshold,
- transfer_config.multipart_chunksize
+ transfer_config.multipart_chunksize,
)
return TransferManager(client, transfer_config)
diff --git a/awscli/customizations/s3/fileformat.py b/awscli/customizations/s3/fileformat.py
index ef15fd6785af..4f9d40448510 100644
--- a/awscli/customizations/s3/fileformat.py
+++ b/awscli/customizations/s3/fileformat.py
@@ -53,9 +53,12 @@ def format(self, src, dest, parameters):
# will take on the name the user specified in the
# command line.
dest_path, use_src_name = format_table[dest_type](dest_path, dir_op)
- files = {'src': {'path': src_path, 'type': src_type},
- 'dest': {'path': dest_path, 'type': dest_type},
- 'dir_op': dir_op, 'use_src_name': use_src_name}
+ files = {
+ 'src': {'path': src_path, 'type': src_type},
+ 'dest': {'path': dest_path, 'type': dest_type},
+ 'dir_op': dir_op,
+ 'use_src_name': use_src_name,
+ }
return files
def local_format(self, path, dir_op):
diff --git a/awscli/customizations/s3/filegenerator.py b/awscli/customizations/s3/filegenerator.py
index e98d78c78edb..d68f26b7b527 100644
--- a/awscli/customizations/s3/filegenerator.py
+++ b/awscli/customizations/s3/filegenerator.py
@@ -11,17 +11,22 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
-import sys
import stat
+import sys
from dateutil.parser import parse
from dateutil.tz import tzlocal
-from botocore.exceptions import ClientError
-from awscli.customizations.s3.utils import find_bucket_key, get_file_stat
-from awscli.customizations.s3.utils import BucketLister, create_warning, \
- find_dest_path_comp_key, EPOCH_TIME
from awscli.compat import queue
+from awscli.customizations.s3.utils import (
+ EPOCH_TIME,
+ BucketLister,
+ create_warning,
+ find_bucket_key,
+ find_dest_path_comp_key,
+ get_file_stat,
+)
+from botocore.exceptions import ClientError
_open = open
@@ -70,6 +75,7 @@ def is_readable(path):
# This class is provided primarily to provide a detailed error message.
+
class FileDecodingError(Exception):
"""Raised when there was an issue decoding the file."""
@@ -84,17 +90,25 @@ def __init__(self, directory, filename):
self.file_name = filename
self.error_message = (
'There was an error trying to decode the the file %s in '
- 'directory "%s". \n%s' % (repr(self.file_name),
- self.directory,
- self.ADVICE)
+ 'directory "%s". \n%s'
+ % (repr(self.file_name), self.directory, self.ADVICE)
)
super(FileDecodingError, self).__init__(self.error_message)
class FileStat(object):
- def __init__(self, src, dest=None, compare_key=None, size=None,
- last_update=None, src_type=None, dest_type=None,
- operation_name=None, response_data=None):
+ def __init__(
+ self,
+ src,
+ dest=None,
+ compare_key=None,
+ size=None,
+ last_update=None,
+ src_type=None,
+ dest_type=None,
+ operation_name=None,
+ response_data=None,
+ ):
self.src = src
self.dest = dest
self.compare_key = compare_key
@@ -114,8 +128,16 @@ class FileGenerator(object):
under the same common prefix. The generator yields corresponding
``FileInfo`` objects to send to a ``Comparator`` or ``S3Handler``.
"""
- def __init__(self, client, operation_name, follow_symlinks=True,
- page_size=None, result_queue=None, request_parameters=None):
+
+ def __init__(
+ self,
+ client,
+ operation_name,
+ follow_symlinks=True,
+ page_size=None,
+ result_queue=None,
+ request_parameters=None,
+ ):
self._client = client
self.operation_name = operation_name
self.follow_symlinks = follow_symlinks
@@ -141,9 +163,12 @@ def call(self, files):
for src_path, extra_information in file_iterator:
dest_path, compare_key = find_dest_path_comp_key(files, src_path)
file_stat_kwargs = {
- 'src': src_path, 'dest': dest_path, 'compare_key': compare_key,
- 'src_type': src_type, 'dest_type': dest_type,
- 'operation_name': self.operation_name
+ 'src': src_path,
+ 'dest': dest_path,
+ 'compare_key': compare_key,
+ 'src_type': src_type,
+ 'dest_type': dest_type,
+ 'operation_name': self.operation_name,
}
self._inject_extra_information(file_stat_kwargs, extra_information)
yield FileStat(**file_stat_kwargs)
@@ -188,7 +213,8 @@ def list_files(self, path, dir_op):
names = []
for name in listdir_names:
if not self.should_ignore_file_with_decoding_warnings(
- path, name):
+ path, name
+ ):
file_path = join(path, name)
if isdir(file_path):
name = name + os.path.sep
@@ -225,8 +251,9 @@ def _validate_update_time(self, update_time, path):
warning = create_warning(
path=path,
error_message="File has an invalid timestamp. Passing epoch "
- "time as timestamp.",
- skip_file=False)
+ "time as timestamp.",
+ skip_file=False,
+ )
self.result_queue.put(warning)
return EPOCH_TIME
return update_time
@@ -251,8 +278,9 @@ def should_ignore_file_with_decoding_warnings(self, dirname, filename):
"""
if not isinstance(filename, str):
decoding_error = FileDecodingError(dirname, filename)
- warning = create_warning(repr(filename),
- decoding_error.error_message)
+ warning = create_warning(
+ repr(filename), decoding_error.error_message
+ )
self.result_queue.put(warning)
return True
path = os.path.join(dirname, filename)
@@ -290,10 +318,14 @@ def triggers_warning(self, path):
self.result_queue.put(warning)
return True
if is_special_file(path):
- warning = create_warning(path,
- ("File is character special device, "
- "block special device, FIFO, or "
- "socket."))
+ warning = create_warning(
+ path,
+ (
+ "File is character special device, "
+ "block special device, FIFO, or "
+ "socket."
+ ),
+ )
self.result_queue.put(warning)
return True
if not is_readable(path):
@@ -318,9 +350,12 @@ def list_objects(self, s3_path, dir_op):
else:
lister = BucketLister(self._client)
extra_args = self.request_parameters.get('ListObjectsV2', {})
- for key in lister.list_objects(bucket=bucket, prefix=prefix,
- page_size=self.page_size,
- extra_args=extra_args):
+ for key in lister.list_objects(
+ bucket=bucket,
+ prefix=prefix,
+ page_size=self.page_size,
+ extra_args=extra_args,
+ ):
source_path, response_data = key
if response_data['Size'] == 0 and source_path.endswith('/'):
if self.operation_name == 'delete':
diff --git a/awscli/customizations/s3/fileinfo.py b/awscli/customizations/s3/fileinfo.py
index 615be15e81f6..a1b05e97f547 100644
--- a/awscli/customizations/s3/fileinfo.py
+++ b/awscli/customizations/s3/fileinfo.py
@@ -38,11 +38,23 @@ class FileInfo(object):
from the list of a ListObjects or the response from a HeadObject. It
will only be filled if the task was generated from an S3 bucket.
"""
- def __init__(self, src, dest=None, compare_key=None, size=None,
- last_update=None, src_type=None, dest_type=None,
- operation_name=None, client=None, parameters=None,
- source_client=None, is_stream=False,
- associated_response_data=None):
+
+ def __init__(
+ self,
+ src,
+ dest=None,
+ compare_key=None,
+ size=None,
+ last_update=None,
+ src_type=None,
+ dest_type=None,
+ operation_name=None,
+ client=None,
+ parameters=None,
+ source_client=None,
+ is_stream=False,
+ associated_response_data=None,
+ ):
self.src = src
self.src_type = src_type
self.operation_name = operation_name
@@ -82,8 +94,11 @@ def is_glacier_compatible(self):
def _is_glacier_object(self, response_data):
glacier_storage_classes = ['GLACIER', 'DEEP_ARCHIVE']
if response_data:
- if response_data.get('StorageClass') in glacier_storage_classes \
- and not self._is_restored(response_data):
+ if response_data.get(
+ 'StorageClass'
+ ) in glacier_storage_classes and not self._is_restored(
+ response_data
+ ):
return True
return False
diff --git a/awscli/customizations/s3/fileinfobuilder.py b/awscli/customizations/s3/fileinfobuilder.py
index d539bbb051ef..d9f8bce9ae14 100644
--- a/awscli/customizations/s3/fileinfobuilder.py
+++ b/awscli/customizations/s3/fileinfobuilder.py
@@ -18,8 +18,10 @@ class FileInfoBuilder(object):
This class takes a ``FileBase`` object's attributes and generates
a ``FileInfo`` object so that the operation can be performed.
"""
- def __init__(self, client, source_client=None,
- parameters = None, is_stream=False):
+
+ def __init__(
+ self, client, source_client=None, parameters=None, is_stream=False
+ ):
self._client = client
self._source_client = client
if source_client is not None:
@@ -57,8 +59,9 @@ def _inject_info(self, file_base):
# issue by swapping clients only in the case of a sync delete since
# swapping which client is used in the delete function would then break
# moving under s3v4.
- if (file_base.operation_name == 'delete' and
- self._parameters.get('delete')):
+ if file_base.operation_name == 'delete' and self._parameters.get(
+ 'delete'
+ ):
file_info_attr['client'] = self._source_client
file_info_attr['source_client'] = self._client
else:
diff --git a/awscli/customizations/s3/filters.py b/awscli/customizations/s3/filters.py
index f41820ac09de..04f926d73332 100644
--- a/awscli/customizations/s3/filters.py
+++ b/awscli/customizations/s3/filters.py
@@ -10,13 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import logging
import fnmatch
+import logging
import os
from awscli.customizations.s3.utils import split_s3_bucket_key
-
LOG = logging.getLogger(__name__)
@@ -28,24 +27,26 @@ def create_filter(parameters):
cli_filters = parameters['filters']
real_filters = []
for filter_type, filter_pattern in cli_filters:
- real_filters.append((filter_type.lstrip('-'),
- filter_pattern))
+ real_filters.append((filter_type.lstrip('-'), filter_pattern))
source_location = parameters['src']
if source_location.startswith('s3://'):
# This gives us (bucket, keyname) and we want
# the bucket to be the root dir.
- src_rootdir = _get_s3_root(source_location,
- parameters['dir_op'])
+ src_rootdir = _get_s3_root(source_location, parameters['dir_op'])
else:
- src_rootdir = _get_local_root(parameters['src'], parameters['dir_op'])
+ src_rootdir = _get_local_root(
+ parameters['src'], parameters['dir_op']
+ )
destination_location = parameters['dest']
if destination_location.startswith('s3://'):
- dst_rootdir = _get_s3_root(parameters['dest'],
- parameters['dir_op'])
+ dst_rootdir = _get_s3_root(
+ parameters['dest'], parameters['dir_op']
+ )
else:
- dst_rootdir = _get_local_root(parameters['dest'],
- parameters['dir_op'])
+ dst_rootdir = _get_local_root(
+ parameters['dest'], parameters['dir_op']
+ )
return Filter(real_filters, src_rootdir, dst_rootdir)
else:
@@ -77,6 +78,7 @@ class Filter(object):
"""
This is a universal exclude/include filter.
"""
+
def __init__(self, patterns, rootdir, dst_rootdir):
"""
:var patterns: A list of patterns. A pattern consists of a list
@@ -100,7 +102,8 @@ def _full_path_patterns(self, original_patterns, rootdir):
full_patterns = []
for pattern in original_patterns:
full_patterns.append(
- (pattern[0], os.path.join(rootdir, pattern[1])))
+ (pattern[0], os.path.join(rootdir, pattern[1]))
+ )
return full_patterns
def call(self, file_infos):
@@ -122,11 +125,16 @@ def call(self, file_infos):
current_file_status = self._match_pattern(pattern, file_info)
if current_file_status is not None:
file_status = current_file_status
- dst_current_file_status = self._match_pattern(dst_pattern, file_info)
+ dst_current_file_status = self._match_pattern(
+ dst_pattern, file_info
+ )
if dst_current_file_status is not None:
file_status = dst_current_file_status
- LOG.debug("=%s final filtered status, should_include: %s",
- file_path, file_status[1])
+ LOG.debug(
+ "=%s final filtered status, should_include: %s",
+ file_path,
+ file_status[1],
+ )
if file_status[1]:
yield file_info
@@ -141,13 +149,15 @@ def _match_pattern(self, pattern, file_info):
is_match = fnmatch.fnmatch(file_path, path_pattern)
if is_match and pattern_type == 'include':
file_status = (file_info, True)
- LOG.debug("%s matched include filter: %s",
- file_path, path_pattern)
+ LOG.debug("%s matched include filter: %s", file_path, path_pattern)
elif is_match and pattern_type == 'exclude':
file_status = (file_info, False)
- LOG.debug("%s matched exclude filter: %s",
- file_path, path_pattern)
+ LOG.debug("%s matched exclude filter: %s", file_path, path_pattern)
else:
- LOG.debug("%s did not match %s filter: %s",
- file_path, pattern_type, path_pattern)
+ LOG.debug(
+ "%s did not match %s filter: %s",
+ file_path,
+ pattern_type,
+ path_pattern,
+ )
return file_status
diff --git a/awscli/customizations/s3/results.py b/awscli/customizations/s3/results.py
index 3a4ea5df44c3..11c118b0d72c 100644
--- a/awscli/customizations/s3/results.py
+++ b/awscli/customizations/s3/results.py
@@ -11,23 +11,19 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import division
+
import logging
import sys
import threading
import time
-from collections import namedtuple
-from collections import defaultdict
-
-from s3transfer.exceptions import CancelledError
-from s3transfer.exceptions import FatalError
-from s3transfer.subscribers import BaseSubscriber
+from collections import defaultdict, namedtuple
-from awscli.compat import queue, ensure_text_type
-from awscli.customizations.s3.utils import human_readable_size
-from awscli.customizations.utils import uni_print
-from awscli.customizations.s3.utils import WarningResult
+from awscli.compat import ensure_text_type, queue
from awscli.customizations.s3.subscribers import OnDoneFilteredSubscriber
-
+from awscli.customizations.s3.utils import WarningResult, human_readable_size
+from awscli.customizations.utils import uni_print
+from s3transfer.exceptions import CancelledError, FatalError
+from s3transfer.subscribers import BaseSubscriber
LOGGER = logging.getLogger(__name__)
@@ -51,8 +47,8 @@ def _create_new_result_cls(name, extra_fields=None, base_cls=BaseResult):
QueuedResult = _create_new_result_cls('QueuedResult', ['total_transfer_size'])
ProgressResult = _create_new_result_cls(
- 'ProgressResult', ['bytes_transferred', 'total_transfer_size',
- 'timestamp'])
+ 'ProgressResult', ['bytes_transferred', 'total_transfer_size', 'timestamp']
+)
SuccessResult = _create_new_result_cls('SuccessResult')
@@ -65,10 +61,12 @@ def _create_new_result_cls(name, extra_fields=None, base_cls=BaseResult):
CtrlCResult = _create_new_result_cls('CtrlCResult', base_cls=ErrorResult)
CommandResult = namedtuple(
- 'CommandResult', ['num_tasks_failed', 'num_tasks_warned'])
+ 'CommandResult', ['num_tasks_failed', 'num_tasks_warned']
+)
FinalTotalSubmissionsResult = namedtuple(
- 'FinalTotalSubmissionsResult', ['total_submissions'])
+ 'FinalTotalSubmissionsResult', ['total_submissions']
+)
class ShutdownThreadRequest(object):
@@ -91,7 +89,7 @@ def on_queued(self, future, **kwargs):
transfer_type=self._transfer_type,
src=self._src,
dest=self._dest,
- total_transfer_size=self._size
+ total_transfer_size=self._size,
)
)
@@ -105,7 +103,7 @@ def on_progress(self, future, bytes_transferred, **kwargs):
dest=self._dest,
bytes_transferred=bytes_transferred,
timestamp=time.time(),
- total_transfer_size=self._size
+ total_transfer_size=self._size,
)
)
@@ -139,12 +137,14 @@ def _on_failure(self, future, e):
class BaseResultHandler(object):
"""Base handler class to be called in the ResultProcessor"""
+
def __call__(self, result):
raise NotImplementedError('__call__()')
class ResultRecorder(BaseResultHandler):
"""Records and track transfer statistics based on results received"""
+
def __init__(self):
self.bytes_transferred = 0
self.bytes_failed_to_transfer = 0
@@ -175,14 +175,15 @@ def __init__(self):
def expected_totals_are_final(self):
return (
- self.final_expected_files_transferred ==
- self.expected_files_transferred
+ self.final_expected_files_transferred
+ == self.expected_files_transferred
)
def __call__(self, result):
"""Record the result of an individual Result object"""
self._result_handler_map.get(type(result), self._record_noop)(
- result=result)
+ result=result
+ )
def _get_ongoing_dict_key(self, result):
if not isinstance(result, BaseResult):
@@ -194,7 +195,7 @@ def _get_ongoing_dict_key(self, result):
for result_property in [result.transfer_type, result.src, result.dest]:
if result_property is not None:
key_parts.append(ensure_text_type(result_property))
- return u':'.join(key_parts)
+ return ':'.join(key_parts)
def _pop_result_from_ongoing_dicts(self, result):
ongoing_key = self._get_ongoing_dict_key(result)
@@ -210,8 +211,9 @@ def _record_queued_result(self, result, **kwargs):
if self.start_time is None:
self.start_time = time.time()
total_transfer_size = result.total_transfer_size
- self._ongoing_total_sizes[
- self._get_ongoing_dict_key(result)] = total_transfer_size
+ self._ongoing_total_sizes[self._get_ongoing_dict_key(result)] = (
+ total_transfer_size
+ )
# The total transfer size can be None if we do not know the size
# immediately so do not add to the total right away.
if total_transfer_size:
@@ -221,8 +223,9 @@ def _record_queued_result(self, result, **kwargs):
def _record_progress_result(self, result, **kwargs):
bytes_transferred = result.bytes_transferred
self._update_ongoing_transfer_size_if_unknown(result)
- self._ongoing_progress[
- self._get_ongoing_dict_key(result)] += bytes_transferred
+ self._ongoing_progress[self._get_ongoing_dict_key(result)] += (
+ bytes_transferred
+ )
self.bytes_transferred += bytes_transferred
# Since the start time is captured in the result recorder and
# capture timestamps in the subscriber, there is a chance that if
@@ -233,7 +236,8 @@ def _record_progress_result(self, result, **kwargs):
# negative progress being displayed or zero division occurring.
if result.timestamp > self.start_time:
self.bytes_transfer_speed = self.bytes_transferred / (
- result.timestamp - self.start_time)
+ result.timestamp - self.start_time
+ )
def _update_ongoing_transfer_size_if_unknown(self, result):
# This is a special case when the transfer size was previous not
@@ -270,7 +274,8 @@ def _record_failure_result(self, result, **kwargs):
# the count for bytes transferred by just adding on the remaining bytes
# that did not get transferred.
total_progress, total_file_size = self._pop_result_from_ongoing_dicts(
- result)
+ result
+ )
if total_file_size is not None:
progress_left = total_file_size - total_progress
self.bytes_failed_to_transfer += progress_left
@@ -299,25 +304,17 @@ class ResultPrinter(BaseResultHandler):
FILE_PROGRESS_FORMAT = (
'Completed {files_completed} file(s) with ' + _FILES_REMAINING
)
- SUCCESS_FORMAT = (
- u'{transfer_type}: {transfer_location}'
- )
- DRY_RUN_FORMAT = u'(dryrun) ' + SUCCESS_FORMAT
- FAILURE_FORMAT = (
- u'{transfer_type} failed: {transfer_location} {exception}'
- )
+ SUCCESS_FORMAT = '{transfer_type}: {transfer_location}'
+ DRY_RUN_FORMAT = '(dryrun) ' + SUCCESS_FORMAT
+ FAILURE_FORMAT = '{transfer_type} failed: {transfer_location} {exception}'
# TODO: Add "warning: " prefix once all commands are converted to using
# result printer and remove "warning: " prefix from ``create_warning``.
- WARNING_FORMAT = (
- u'{message}'
- )
- ERROR_FORMAT = (
- u'fatal error: {exception}'
- )
+ WARNING_FORMAT = '{message}'
+ ERROR_FORMAT = 'fatal error: {exception}'
CTRL_C_MSG = 'cancelled: ctrl-c received'
- SRC_DEST_TRANSFER_LOCATION_FORMAT = u'{src} to {dest}'
- SRC_TRANSFER_LOCATION_FORMAT = u'{src}'
+ SRC_DEST_TRANSFER_LOCATION_FORMAT = '{src} to {dest}'
+ SRC_TRANSFER_LOCATION_FORMAT = '{src}'
def __init__(self, result_recorder, out_file=None, error_file=None):
"""Prints status of ongoing transfer
@@ -349,14 +346,14 @@ def __init__(self, result_recorder, out_file=None, error_file=None):
ErrorResult: self._print_error,
CtrlCResult: self._print_ctrl_c,
DryRunResult: self._print_dry_run,
- FinalTotalSubmissionsResult:
- self._clear_progress_if_no_more_expected_transfers,
+ FinalTotalSubmissionsResult: self._clear_progress_if_no_more_expected_transfers,
}
def __call__(self, result):
"""Print the progress of the ongoing transfer based on a result"""
self._result_handler_map.get(type(result), self._print_noop)(
- result=result)
+ result=result
+ )
def _print_noop(self, **kwargs):
# If the result does not have a handler, then do nothing with it.
@@ -365,7 +362,7 @@ def _print_noop(self, **kwargs):
def _print_dry_run(self, result, **kwargs):
statement = self.DRY_RUN_FORMAT.format(
transfer_type=result.transfer_type,
- transfer_location=self._get_transfer_location(result)
+ transfer_location=self._get_transfer_location(result),
)
statement = self._adjust_statement_padding(statement)
self._print_to_out_file(statement)
@@ -373,7 +370,7 @@ def _print_dry_run(self, result, **kwargs):
def _print_success(self, result, **kwargs):
success_statement = self.SUCCESS_FORMAT.format(
transfer_type=result.transfer_type,
- transfer_location=self._get_transfer_location(result)
+ transfer_location=self._get_transfer_location(result),
)
success_statement = self._adjust_statement_padding(success_statement)
self._print_to_out_file(success_statement)
@@ -383,7 +380,7 @@ def _print_failure(self, result, **kwargs):
failure_statement = self.FAILURE_FORMAT.format(
transfer_type=result.transfer_type,
transfer_location=self._get_transfer_location(result),
- exception=result.exception
+ exception=result.exception,
)
failure_statement = self._adjust_statement_padding(failure_statement)
self._print_to_error_file(failure_statement)
@@ -397,7 +394,8 @@ def _print_warning(self, result, **kwargs):
def _print_error(self, result, **kwargs):
self._flush_error_statement(
- self.ERROR_FORMAT.format(exception=result.exception))
+ self.ERROR_FORMAT.format(exception=result.exception)
+ )
def _print_ctrl_c(self, result, **kwargs):
self._flush_error_statement(self.CTRL_C_MSG)
@@ -410,7 +408,8 @@ def _get_transfer_location(self, result):
if result.dest is None:
return self.SRC_TRANSFER_LOCATION_FORMAT.format(src=result.src)
return self.SRC_DEST_TRANSFER_LOCATION_FORMAT.format(
- src=result.src, dest=result.dest)
+ src=result.src, dest=result.dest
+ )
def _redisplay_progress(self):
# Reset to zero because done statements are printed with new lines
@@ -426,34 +425,40 @@ def _add_progress_if_needed(self):
def _print_progress(self, **kwargs):
# Get all of the statistics in the correct form.
remaining_files = self._get_expected_total(
- str(self._result_recorder.expected_files_transferred -
- self._result_recorder.files_transferred)
+ str(
+ self._result_recorder.expected_files_transferred
+ - self._result_recorder.files_transferred
+ )
)
# Create the display statement.
if self._result_recorder.expected_bytes_transferred > 0:
bytes_completed = human_readable_size(
- self._result_recorder.bytes_transferred +
- self._result_recorder.bytes_failed_to_transfer
+ self._result_recorder.bytes_transferred
+ + self._result_recorder.bytes_failed_to_transfer
)
expected_bytes_completed = self._get_expected_total(
human_readable_size(
- self._result_recorder.expected_bytes_transferred))
+ self._result_recorder.expected_bytes_transferred
+ )
+ )
- transfer_speed = human_readable_size(
- self._result_recorder.bytes_transfer_speed) + '/s'
+ transfer_speed = (
+ human_readable_size(self._result_recorder.bytes_transfer_speed)
+ + '/s'
+ )
progress_statement = self.BYTE_PROGRESS_FORMAT.format(
bytes_completed=bytes_completed,
expected_bytes_completed=expected_bytes_completed,
transfer_speed=transfer_speed,
- remaining_files=remaining_files
+ remaining_files=remaining_files,
)
else:
# We're not expecting any bytes to be transferred, so we should
# only print of information about number of files transferred.
progress_statement = self.FILE_PROGRESS_FORMAT.format(
files_completed=self._result_recorder.files_transferred,
- remaining_files=remaining_files
+ remaining_files=remaining_files,
)
if not self._result_recorder.expected_totals_are_final():
@@ -461,7 +466,8 @@ def _print_progress(self, **kwargs):
# Make sure that it overrides any previous progress bar.
progress_statement = self._adjust_statement_padding(
- progress_statement, ending_char='\r')
+ progress_statement, ending_char='\r'
+ )
# We do not want to include the carriage return in this calculation
# as progress length is used for determining whitespace padding.
# So we subtract one off of the length.
@@ -473,7 +479,8 @@ def _print_progress(self, **kwargs):
def _get_expected_total(self, expected_total):
if not self._result_recorder.expected_totals_are_final():
return self._ESTIMATED_EXPECTED_TOTAL.format(
- expected_total=expected_total)
+ expected_total=expected_total
+ )
return expected_total
def _adjust_statement_padding(self, print_statement, ending_char='\n'):
@@ -500,12 +507,14 @@ def _clear_progress_if_no_more_expected_transfers(self, **kwargs):
class NoProgressResultPrinter(ResultPrinter):
"""A result printer that doesn't print progress"""
+
def _print_progress(self, **kwargs):
pass
class OnlyShowErrorsResultPrinter(ResultPrinter):
"""A result printer that only prints out errors"""
+
def _print_progress(self, **kwargs):
pass
@@ -537,7 +546,8 @@ def run(self):
if isinstance(result, ShutdownThreadRequest):
LOGGER.debug(
'Shutdown request received in result processing '
- 'thread, shutting down result thread.')
+ 'thread, shutting down result thread.'
+ )
break
if self._result_handlers_enabled:
self._process_result(result)
@@ -558,7 +568,11 @@ def _process_result(self, result):
except Exception as e:
LOGGER.debug(
'Error processing result %s with handler %s: %s',
- result, result_handler, e, exc_info=True)
+ result,
+ result_handler,
+ e,
+ exc_info=True,
+ )
class CommandResultRecorder(object):
@@ -600,7 +614,7 @@ def get_command_result(self):
"""
return CommandResult(
self._result_recorder.files_failed + self._result_recorder.errors,
- self._result_recorder.files_warned
+ self._result_recorder.files_warned,
)
def notify_total_submissions(self, total):
@@ -612,8 +626,11 @@ def __enter__(self):
def __exit__(self, exc_type, exc_value, *args):
if exc_type:
- LOGGER.debug('Exception caught during command execution: %s',
- exc_value, exc_info=True)
+ LOGGER.debug(
+ 'Exception caught during command execution: %s',
+ exc_value,
+ exc_info=True,
+ )
self.result_queue.put(ErrorResult(exception=exc_value))
self.shutdown()
return True
diff --git a/awscli/customizations/s3/s3.py b/awscli/customizations/s3/s3.py
index 7d9c3d6a70c9..725bd0ca9bf7 100644
--- a/awscli/customizations/s3/s3.py
+++ b/awscli/customizations/s3/s3.py
@@ -12,11 +12,20 @@
# language governing permissions and limitations under the License.
from awscli.customizations import utils
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.s3.subcommands import ListCommand, WebsiteCommand, \
- CpCommand, MvCommand, RmCommand, SyncCommand, MbCommand, RbCommand, \
- PresignCommand
-from awscli.customizations.s3.syncstrategy.register import \
- register_sync_strategies
+from awscli.customizations.s3.subcommands import (
+ CpCommand,
+ ListCommand,
+ MbCommand,
+ MvCommand,
+ PresignCommand,
+ RbCommand,
+ RmCommand,
+ SyncCommand,
+ WebsiteCommand,
+)
+from awscli.customizations.s3.syncstrategy.register import (
+ register_sync_strategies,
+)
def awscli_initialize(cli):
diff --git a/awscli/customizations/s3/s3handler.py b/awscli/customizations/s3/s3handler.py
index 1f875312d79b..43d86ca48f75 100644
--- a/awscli/customizations/s3/s3handler.py
+++ b/awscli/customizations/s3/s3handler.py
@@ -13,37 +13,46 @@
import logging
import os
-from s3transfer.manager import TransferManager
-
-from awscli.customizations.s3.utils import (
- human_readable_size, MAX_UPLOAD_SIZE, find_bucket_key, relative_path,
- create_warning, NonSeekableStream)
-from awscli.customizations.s3.transferconfig import \
- create_transfer_config_from_runtime_config
-from awscli.customizations.s3.results import QueuedResultSubscriber
-from awscli.customizations.s3.results import ProgressResultSubscriber
-from awscli.customizations.s3.results import DoneResultSubscriber
-from awscli.customizations.s3.results import QueuedResult
-from awscli.customizations.s3.results import SuccessResult
-from awscli.customizations.s3.results import FailureResult
-from awscli.customizations.s3.results import DryRunResult
-from awscli.customizations.s3.results import ResultRecorder
-from awscli.customizations.s3.results import ResultPrinter
-from awscli.customizations.s3.results import OnlyShowErrorsResultPrinter
-from awscli.customizations.s3.results import NoProgressResultPrinter
-from awscli.customizations.s3.results import ResultProcessor
-from awscli.customizations.s3.results import CommandResultRecorder
-from awscli.customizations.s3.utils import RequestParamsMapper
-from awscli.customizations.s3.utils import StdoutBytesWriter
+from awscli.compat import get_binary_stdin
+from awscli.customizations.s3.results import (
+ CommandResultRecorder,
+ DoneResultSubscriber,
+ DryRunResult,
+ FailureResult,
+ NoProgressResultPrinter,
+ OnlyShowErrorsResultPrinter,
+ ProgressResultSubscriber,
+ QueuedResult,
+ QueuedResultSubscriber,
+ ResultPrinter,
+ ResultProcessor,
+ ResultRecorder,
+ SuccessResult,
+)
from awscli.customizations.s3.subscribers import (
- ProvideSizeSubscriber, ProvideUploadContentTypeSubscriber,
+ CopyPropsSubscriberFactory,
+ DeleteCopySourceObjectSubscriber,
+ DeleteSourceFileSubscriber,
+ DeleteSourceObjectSubscriber,
+ DirectoryCreatorSubscriber,
ProvideLastModifiedTimeSubscriber,
- CopyPropsSubscriberFactory, DirectoryCreatorSubscriber,
- DeleteSourceFileSubscriber, DeleteSourceObjectSubscriber,
- DeleteCopySourceObjectSubscriber
+ ProvideSizeSubscriber,
+ ProvideUploadContentTypeSubscriber,
)
-from awscli.compat import get_binary_stdin
-
+from awscli.customizations.s3.transferconfig import (
+ create_transfer_config_from_runtime_config,
+)
+from awscli.customizations.s3.utils import (
+ MAX_UPLOAD_SIZE,
+ NonSeekableStream,
+ RequestParamsMapper,
+ StdoutBytesWriter,
+ create_warning,
+ find_bucket_key,
+ human_readable_size,
+ relative_path,
+)
+from s3transfer.manager import TransferManager
LOGGER = logging.getLogger(__name__)
@@ -73,12 +82,15 @@ def __call__(self, transfer_manager, result_queue):
result_processor_handlers = [result_recorder]
self._add_result_printer(result_recorder, result_processor_handlers)
result_processor = ResultProcessor(
- result_queue, result_processor_handlers)
+ result_queue, result_processor_handlers
+ )
command_result_recorder = CommandResultRecorder(
- result_queue, result_recorder, result_processor)
+ result_queue, result_recorder, result_processor
+ )
return S3TransferHandler(
- transfer_manager, self._cli_params, command_result_recorder)
+ transfer_manager, self._cli_params, command_result_recorder
+ )
def _add_result_printer(self, result_recorder, result_processor_handlers):
if self._cli_params.get('quiet'):
@@ -119,8 +131,9 @@ def __init__(self, transfer_manager, cli_params, result_command_recorder):
self._result_command_recorder = result_command_recorder
submitter_args = (
- self._transfer_manager, self._result_command_recorder.result_queue,
- cli_params
+ self._transfer_manager,
+ self._result_command_recorder.result_queue,
+ cli_params,
)
self._submitters = [
UploadStreamRequestSubmitter(*submitter_args),
@@ -129,7 +142,7 @@ def __init__(self, transfer_manager, cli_params, result_command_recorder):
DownloadRequestSubmitter(*submitter_args),
CopyRequestSubmitter(*submitter_args),
DeleteRequestSubmitter(*submitter_args),
- LocalDeleteRequestSubmitter(*submitter_args)
+ LocalDeleteRequestSubmitter(*submitter_args),
]
def call(self, fileinfos):
@@ -153,7 +166,8 @@ def call(self, fileinfos):
total_submissions += 1
break
self._result_command_recorder.notify_total_submissions(
- total_submissions)
+ total_submissions
+ )
return self._result_command_recorder.get_command_result()
@@ -219,7 +233,8 @@ def _do_submit(self, fileinfo):
self.REQUEST_MAPPER_METHOD(extra_args, self._cli_params)
if not self._cli_params.get('dryrun'):
return self._submit_transfer_request(
- fileinfo, extra_args, self._get_subscribers(fileinfo))
+ fileinfo, extra_args, self._get_subscribers(fileinfo)
+ )
else:
self._submit_dryrun(fileinfo)
@@ -232,9 +247,8 @@ def _get_subscribers(self, fileinfo):
subscribers.extend(
[
ProgressResultSubscriber(**result_subscriber_kwargs),
- DoneResultSubscriber(**result_subscriber_kwargs)
+ DoneResultSubscriber(**result_subscriber_kwargs),
]
-
)
return subscribers
@@ -251,8 +265,9 @@ def _get_result_subscriber_kwargs(self, fileinfo):
def _submit_dryrun(self, fileinfo):
transfer_type = self._get_transfer_type(fileinfo)
src, dest = self._format_src_dest(fileinfo)
- self._result_queue.put(DryRunResult(
- transfer_type=transfer_type, src=src, dest=dest))
+ self._result_queue.put(
+ DryRunResult(transfer_type=transfer_type, src=src, dest=dest)
+ )
def _add_provide_size_subscriber(self, subscribers, fileinfo):
subscribers.append(ProvideSizeSubscriber(fileinfo.size))
@@ -280,27 +295,27 @@ def _get_warning_handlers(self):
return []
def _should_inject_content_type(self):
- return (
- self._cli_params.get('guess_mime_type') and
- not self._cli_params.get('content_type')
- )
+ return self._cli_params.get(
+ 'guess_mime_type'
+ ) and not self._cli_params.get('content_type')
def _warn_glacier(self, fileinfo):
if not self._cli_params.get('force_glacier_transfer'):
if not fileinfo.is_glacier_compatible():
LOGGER.debug(
'Encountered glacier object s3://%s. Not performing '
- '%s on object.' % (fileinfo.src, fileinfo.operation_name))
+ '%s on object.' % (fileinfo.src, fileinfo.operation_name)
+ )
if not self._cli_params.get('ignore_glacier_warnings'):
warning = create_warning(
- 's3://'+fileinfo.src,
+ 's3://' + fileinfo.src,
'Object is of storage class GLACIER. Unable to '
'perform %s operations on GLACIER objects. You must '
'restore the object to be able to perform the '
'operation. See aws s3 %s help for additional '
'parameter options to ignore or force these '
- 'transfers.' %
- (fileinfo.operation_name, fileinfo.operation_name)
+ 'transfers.'
+ % (fileinfo.operation_name, fileinfo.operation_name),
)
self._result_queue.put(warning)
return True
@@ -311,10 +326,12 @@ def _warn_parent_reference(self, fileinfo):
# need to take that into account when checking for a parent prefix.
parent_prefix = '..' + os.path.sep
escapes_cwd = os.path.normpath(fileinfo.compare_key).startswith(
- parent_prefix)
+ parent_prefix
+ )
if escapes_cwd:
warning = create_warning(
- fileinfo.compare_key, "File references a parent directory.")
+ fileinfo.compare_key, "File references a parent directory."
+ )
self._result_queue.put(warning)
return True
return False
@@ -353,8 +370,11 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.dest)
filein = self._get_filein(fileinfo)
return self._transfer_manager.upload(
- fileobj=filein, bucket=bucket, key=key,
- extra_args=extra_args, subscribers=subscribers
+ fileobj=filein,
+ bucket=bucket,
+ key=key,
+ extra_args=extra_args,
+ subscribers=subscribers,
)
def _get_filein(self, fileinfo):
@@ -366,11 +386,13 @@ def _get_warning_handlers(self):
def _warn_if_too_large(self, fileinfo):
if getattr(fileinfo, 'size') and fileinfo.size > MAX_UPLOAD_SIZE:
file_path = relative_path(fileinfo.src)
- warning_message = (
- "File %s exceeds s3 upload limit of %s." % (
- file_path, human_readable_size(MAX_UPLOAD_SIZE)))
+ warning_message = "File %s exceeds s3 upload limit of %s." % (
+ file_path,
+ human_readable_size(MAX_UPLOAD_SIZE),
+ )
warning = create_warning(
- file_path, warning_message, skip_file=False)
+ file_path, warning_message, skip_file=False
+ )
self._result_queue.put(warning)
def _format_src_dest(self, fileinfo):
@@ -387,18 +409,25 @@ def can_submit(self, fileinfo):
def _add_additional_subscribers(self, subscribers, fileinfo):
subscribers.append(DirectoryCreatorSubscriber())
- subscribers.append(ProvideLastModifiedTimeSubscriber(
- fileinfo.last_update, self._result_queue))
+ subscribers.append(
+ ProvideLastModifiedTimeSubscriber(
+ fileinfo.last_update, self._result_queue
+ )
+ )
if self._cli_params.get('is_move', False):
- subscribers.append(DeleteSourceObjectSubscriber(
- fileinfo.source_client))
+ subscribers.append(
+ DeleteSourceObjectSubscriber(fileinfo.source_client)
+ )
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.src)
fileout = self._get_fileout(fileinfo)
return self._transfer_manager.download(
- fileobj=fileout, bucket=bucket, key=key,
- extra_args=extra_args, subscribers=subscribers
+ fileobj=fileout,
+ bucket=bucket,
+ key=key,
+ extra_args=extra_args,
+ subscribers=subscribers,
)
def _get_fileout(self, fileinfo):
@@ -423,8 +452,9 @@ def _add_additional_subscribers(self, subscribers, fileinfo):
if not self._cli_params.get('metadata_directive'):
self._add_copy_props_subscribers(subscribers, fileinfo)
if self._cli_params.get('is_move', False):
- subscribers.append(DeleteCopySourceObjectSubscriber(
- fileinfo.source_client))
+ subscribers.append(
+ DeleteCopySourceObjectSubscriber(fileinfo.source_client)
+ )
def _add_copy_props_subscribers(self, subscribers, fileinfo):
copy_props_factory = CopyPropsSubscriberFactory(
@@ -439,9 +469,12 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
source_bucket, source_key = find_bucket_key(fileinfo.src)
copy_source = {'Bucket': source_bucket, 'Key': source_key}
return self._transfer_manager.copy(
- bucket=bucket, key=key, copy_source=copy_source,
- extra_args=extra_args, subscribers=subscribers,
- source_client=fileinfo.source_client
+ bucket=bucket,
+ key=key,
+ copy_source=copy_source,
+ extra_args=extra_args,
+ subscribers=subscribers,
+ source_client=fileinfo.source_client,
)
def _get_warning_handlers(self):
@@ -455,9 +488,8 @@ def _format_src_dest(self, fileinfo):
class UploadStreamRequestSubmitter(UploadRequestSubmitter):
def can_submit(self, fileinfo):
- return (
- fileinfo.operation_name == 'upload' and
- self._cli_params.get('is_stream')
+ return fileinfo.operation_name == 'upload' and self._cli_params.get(
+ 'is_stream'
)
def _add_provide_size_subscriber(self, subscribers, fileinfo):
@@ -478,9 +510,8 @@ def _format_local_path(self, path):
class DownloadStreamRequestSubmitter(DownloadRequestSubmitter):
def can_submit(self, fileinfo):
- return (
- fileinfo.operation_name == 'download' and
- self._cli_params.get('is_stream')
+ return fileinfo.operation_name == 'download' and self._cli_params.get(
+ 'is_stream'
)
def _add_provide_size_subscriber(self, subscribers, fileinfo):
@@ -500,8 +531,9 @@ class DeleteRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = RequestParamsMapper.map_delete_object_params
def can_submit(self, fileinfo):
- return fileinfo.operation_name == 'delete' and \
- fileinfo.src_type == 's3'
+ return (
+ fileinfo.operation_name == 'delete' and fileinfo.src_type == 's3'
+ )
def _add_provide_size_subscriber(self, subscribers, fileinfo):
pass
@@ -509,8 +541,11 @@ def _add_provide_size_subscriber(self, subscribers, fileinfo):
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.src)
return self._transfer_manager.delete(
- bucket=bucket, key=key, extra_args=extra_args,
- subscribers=subscribers)
+ bucket=bucket,
+ key=key,
+ extra_args=extra_args,
+ subscribers=subscribers,
+ )
def _format_src_dest(self, fileinfo):
return self._format_s3_path(fileinfo.src), None
@@ -520,8 +555,10 @@ class LocalDeleteRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = None
def can_submit(self, fileinfo):
- return fileinfo.operation_name == 'delete' and \
- fileinfo.src_type == 'local'
+ return (
+ fileinfo.operation_name == 'delete'
+ and fileinfo.src_type == 'local'
+ )
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
# This is quirky but essentially instead of relying on a built-in
@@ -537,19 +574,15 @@ def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
# deleting a local file only happens for sync --delete downloads and
# is very fast compared to all of the other types of transfers.
src, dest = self._format_src_dest(fileinfo)
- result_kwargs = {
- 'transfer_type': 'delete',
- 'src': src,
- 'dest': dest
- }
+ result_kwargs = {'transfer_type': 'delete', 'src': src, 'dest': dest}
try:
- self._result_queue.put(QueuedResult(
- total_transfer_size=0, **result_kwargs))
+ self._result_queue.put(
+ QueuedResult(total_transfer_size=0, **result_kwargs)
+ )
os.remove(fileinfo.src)
self._result_queue.put(SuccessResult(**result_kwargs))
except Exception as e:
- self._result_queue.put(
- FailureResult(exception=e, **result_kwargs))
+ self._result_queue.put(FailureResult(exception=e, **result_kwargs))
finally:
# Return True to indicate that the transfer was submitted
return True
diff --git a/awscli/customizations/s3/subcommands.py b/awscli/customizations/s3/subcommands.py
index 1c51238c26f6..f6d614057dd6 100644
--- a/awscli/customizations/s3/subcommands.py
+++ b/awscli/customizations/s3/subcommands.py
@@ -10,146 +10,216 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import os
import logging
+import os
import sys
-from botocore.client import Config
-from botocore.utils import is_s3express_bucket, ensure_boolean
from dateutil.parser import parse
from dateutil.tz import tzlocal
from awscli.compat import queue
from awscli.customizations.commands import BasicCommand
+from awscli.customizations.exceptions import ParamValidationError
+from awscli.customizations.s3 import transferconfig
from awscli.customizations.s3.comparator import Comparator
from awscli.customizations.s3.factory import (
- ClientFactory, TransferManagerFactory
+ ClientFactory,
+ TransferManagerFactory,
)
-from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder
from awscli.customizations.s3.fileformat import FileFormat
from awscli.customizations.s3.filegenerator import FileGenerator
from awscli.customizations.s3.fileinfo import FileInfo
+from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder
from awscli.customizations.s3.filters import create_filter
from awscli.customizations.s3.s3handler import S3TransferHandlerFactory
-from awscli.customizations.s3.utils import find_bucket_key, AppendFilter, \
- find_dest_path_comp_key, human_readable_size, \
- RequestParamsMapper, split_s3_bucket_key, block_unsupported_resources, \
- S3PathResolver
+from awscli.customizations.s3.syncstrategy.base import (
+ MissingFileSync,
+ NeverSync,
+ SizeAndLastModifiedSync,
+)
+from awscli.customizations.s3.utils import (
+ AppendFilter,
+ RequestParamsMapper,
+ S3PathResolver,
+ block_unsupported_resources,
+ find_bucket_key,
+ find_dest_path_comp_key,
+ human_readable_size,
+ split_s3_bucket_key,
+)
from awscli.customizations.utils import uni_print
-from awscli.customizations.s3.syncstrategy.base import MissingFileSync, \
- SizeAndLastModifiedSync, NeverSync
-from awscli.customizations.s3 import transferconfig
-from awscli.customizations.exceptions import ParamValidationError
-
+from botocore.client import Config
+from botocore.utils import ensure_boolean, is_s3express_bucket
LOGGER = logging.getLogger(__name__)
-RECURSIVE = {'name': 'recursive', 'action': 'store_true', 'dest': 'dir_op',
- 'help_text': (
- "Command is performed on all files or objects "
- "under the specified directory or prefix.")}
+RECURSIVE = {
+ 'name': 'recursive',
+ 'action': 'store_true',
+ 'dest': 'dir_op',
+ 'help_text': (
+ "Command is performed on all files or objects "
+ "under the specified directory or prefix."
+ ),
+}
-HUMAN_READABLE = {'name': 'human-readable', 'action': 'store_true',
- 'help_text': "Displays file sizes in human readable format."}
+HUMAN_READABLE = {
+ 'name': 'human-readable',
+ 'action': 'store_true',
+ 'help_text': "Displays file sizes in human readable format.",
+}
-SUMMARIZE = {'name': 'summarize', 'action': 'store_true',
- 'help_text': (
- "Displays summary information "
- "(number of objects, total size).")}
+SUMMARIZE = {
+ 'name': 'summarize',
+ 'action': 'store_true',
+ 'help_text': (
+ "Displays summary information " "(number of objects, total size)."
+ ),
+}
-DRYRUN = {'name': 'dryrun', 'action': 'store_true',
- 'help_text': (
- "Displays the operations that would be performed using the "
- "specified command without actually running them.")}
+DRYRUN = {
+ 'name': 'dryrun',
+ 'action': 'store_true',
+ 'help_text': (
+ "Displays the operations that would be performed using the "
+ "specified command without actually running them."
+ ),
+}
-QUIET = {'name': 'quiet', 'action': 'store_true',
- 'help_text': (
- "Does not display the operations performed from the specified "
- "command.")}
+QUIET = {
+ 'name': 'quiet',
+ 'action': 'store_true',
+ 'help_text': (
+ "Does not display the operations performed from the specified "
+ "command."
+ ),
+}
-FORCE = {'name': 'force', 'action': 'store_true',
- 'help_text': (
- "Deletes all objects in the bucket including the bucket itself. "
- "Note that versioned objects will not be deleted in this "
- "process which would cause the bucket deletion to fail because "
- "the bucket would not be empty. To delete versioned "
- "objects use the ``s3api delete-object`` command with "
- "the ``--version-id`` parameter.")}
+FORCE = {
+ 'name': 'force',
+ 'action': 'store_true',
+ 'help_text': (
+ "Deletes all objects in the bucket including the bucket itself. "
+ "Note that versioned objects will not be deleted in this "
+ "process which would cause the bucket deletion to fail because "
+ "the bucket would not be empty. To delete versioned "
+ "objects use the ``s3api delete-object`` command with "
+ "the ``--version-id`` parameter."
+ ),
+}
-FOLLOW_SYMLINKS = {'name': 'follow-symlinks', 'action': 'store_true',
- 'default': True, 'group_name': 'follow_symlinks',
- 'help_text': (
- "Symbolic links are followed "
- "only when uploading to S3 from the local filesystem. "
- "Note that S3 does not support symbolic links, so the "
- "contents of the link target are uploaded under the "
- "name of the link. When neither ``--follow-symlinks`` "
- "nor ``--no-follow-symlinks`` is specified, the default "
- "is to follow symlinks.")}
+FOLLOW_SYMLINKS = {
+ 'name': 'follow-symlinks',
+ 'action': 'store_true',
+ 'default': True,
+ 'group_name': 'follow_symlinks',
+ 'help_text': (
+ "Symbolic links are followed "
+ "only when uploading to S3 from the local filesystem. "
+ "Note that S3 does not support symbolic links, so the "
+ "contents of the link target are uploaded under the "
+ "name of the link. When neither ``--follow-symlinks`` "
+ "nor ``--no-follow-symlinks`` is specified, the default "
+ "is to follow symlinks."
+ ),
+}
-NO_FOLLOW_SYMLINKS = {'name': 'no-follow-symlinks', 'action': 'store_false',
- 'dest': 'follow_symlinks', 'default': True,
- 'group_name': 'follow_symlinks'}
+NO_FOLLOW_SYMLINKS = {
+ 'name': 'no-follow-symlinks',
+ 'action': 'store_false',
+ 'dest': 'follow_symlinks',
+ 'default': True,
+ 'group_name': 'follow_symlinks',
+}
-NO_GUESS_MIME_TYPE = {'name': 'no-guess-mime-type', 'action': 'store_false',
- 'dest': 'guess_mime_type', 'default': True,
- 'help_text': (
- "Do not try to guess the mime type for "
- "uploaded files. By default the mime type of a "
- "file is guessed when it is uploaded.")}
+NO_GUESS_MIME_TYPE = {
+ 'name': 'no-guess-mime-type',
+ 'action': 'store_false',
+ 'dest': 'guess_mime_type',
+ 'default': True,
+ 'help_text': (
+ "Do not try to guess the mime type for "
+ "uploaded files. By default the mime type of a "
+ "file is guessed when it is uploaded."
+ ),
+}
-CONTENT_TYPE = {'name': 'content-type',
- 'help_text': (
- "Specify an explicit content type for this operation. "
- "This value overrides any guessed mime types.")}
+CONTENT_TYPE = {
+ 'name': 'content-type',
+ 'help_text': (
+ "Specify an explicit content type for this operation. "
+ "This value overrides any guessed mime types."
+ ),
+}
-EXCLUDE = {'name': 'exclude', 'action': AppendFilter, 'nargs': 1,
- 'dest': 'filters',
- 'help_text': (
- "Exclude all files or objects from the command that matches "
- "the specified pattern.")}
+EXCLUDE = {
+ 'name': 'exclude',
+ 'action': AppendFilter,
+ 'nargs': 1,
+ 'dest': 'filters',
+ 'help_text': (
+ "Exclude all files or objects from the command that matches "
+ "the specified pattern."
+ ),
+}
-INCLUDE = {'name': 'include', 'action': AppendFilter, 'nargs': 1,
- 'dest': 'filters',
- 'help_text': (
- "Don't exclude files or objects "
- "in the command that match the specified pattern. "
- 'See Use of '
- 'Exclude and Include Filters for details.')}
+INCLUDE = {
+ 'name': 'include',
+ 'action': AppendFilter,
+ 'nargs': 1,
+ 'dest': 'filters',
+ 'help_text': (
+ "Don't exclude files or objects "
+ "in the command that match the specified pattern. "
+ 'See Use of '
+ 'Exclude and Include Filters for details.'
+ ),
+}
-ACL = {'name': 'acl',
- 'choices': ['private', 'public-read', 'public-read-write',
- 'authenticated-read', 'aws-exec-read', 'bucket-owner-read',
- 'bucket-owner-full-control', 'log-delivery-write'],
- 'help_text': (
- "Sets the ACL for the object when the command is "
- "performed. If you use this parameter you must have the "
- '"s3:PutObjectAcl" permission included in the list of actions '
- "for your IAM policy. "
- "Only accepts values of ``private``, ``public-read``, "
- "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, "
- "``bucket-owner-read``, ``bucket-owner-full-control`` and "
- "``log-delivery-write``. "
- 'See Canned ACL for details')}
+ACL = {
+ 'name': 'acl',
+ 'choices': [
+ 'private',
+ 'public-read',
+ 'public-read-write',
+ 'authenticated-read',
+ 'aws-exec-read',
+ 'bucket-owner-read',
+ 'bucket-owner-full-control',
+ 'log-delivery-write',
+ ],
+ 'help_text': (
+ "Sets the ACL for the object when the command is "
+ "performed. If you use this parameter you must have the "
+ '"s3:PutObjectAcl" permission included in the list of actions '
+ "for your IAM policy. "
+ "Only accepts values of ``private``, ``public-read``, "
+ "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, "
+ "``bucket-owner-read``, ``bucket-owner-full-control`` and "
+ "``log-delivery-write``. "
+ 'See Canned ACL for details'
+ ),
+}
GRANTS = {
- 'name': 'grants', 'nargs': '+',
+ 'name': 'grants',
+ 'nargs': '+',
'help_text': (
'Grant specific permissions to individual users or groups. You '
'can supply a list of grants of the form
--grants '
@@ -174,40 +244,48 @@
''
'For more information on Amazon S3 access control, see '
'Access Control')}
+ 'UsingAuthAccess.html">Access Control'
+ ),
+}
SSE = {
- 'name': 'sse', 'nargs': '?', 'const': 'AES256',
+ 'name': 'sse',
+ 'nargs': '?',
+ 'const': 'AES256',
'choices': ['AES256', 'aws:kms'],
'help_text': (
'Specifies server-side encryption of the object in S3. '
'Valid values are ``AES256`` and ``aws:kms``. If the parameter is '
'specified but no value is provided, ``AES256`` is used.'
- )
+ ),
}
SSE_C = {
- 'name': 'sse-c', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'],
+ 'name': 'sse-c',
+ 'nargs': '?',
+ 'const': 'AES256',
+ 'choices': ['AES256'],
'help_text': (
'Specifies server-side encryption using customer provided keys '
'of the the object in S3. ``AES256`` is the only valid value. '
'If the parameter is specified but no value is provided, '
'``AES256`` is used. If you provide this value, ``--sse-c-key`` '
'must be specified as well.'
- )
+ ),
}
SSE_C_KEY = {
- 'name': 'sse-c-key', 'cli_type_name': 'blob',
+ 'name': 'sse-c-key',
+ 'cli_type_name': 'blob',
'help_text': (
'The customer-provided encryption key to use to server-side '
'encrypt the object in S3. If you provide this value, '
'``--sse-c`` must be specified as well. The key provided should '
'**not** be base64 encoded.'
- )
+ ),
}
@@ -218,13 +296,15 @@
'should be used to server-side encrypt the object in S3. You should '
'only provide this parameter if you are using a customer managed '
'customer master key (CMK) and not the AWS managed KMS CMK.'
- )
+ ),
}
SSE_C_COPY_SOURCE = {
- 'name': 'sse-c-copy-source', 'nargs': '?',
- 'const': 'AES256', 'choices': ['AES256'],
+ 'name': 'sse-c-copy-source',
+ 'nargs': '?',
+ 'const': 'AES256',
+ 'choices': ['AES256'],
'help_text': (
'This parameter should only be specified when copying an S3 object '
'that was encrypted server-side with a customer-provided '
@@ -233,12 +313,13 @@
'value. If the parameter is specified but no value is provided, '
'``AES256`` is used. If you provide this value, '
'``--sse-c-copy-source-key`` must be specified as well. '
- )
+ ),
}
SSE_C_COPY_SOURCE_KEY = {
- 'name': 'sse-c-copy-source-key', 'cli_type_name': 'blob',
+ 'name': 'sse-c-copy-source-key',
+ 'cli_type_name': 'blob',
'help_text': (
'This parameter should only be specified when copying an S3 object '
'that was encrypted server-side with a customer-provided '
@@ -247,105 +328,132 @@
'must be one that was used when the source object was created. '
'If you provide this value, ``--sse-c-copy-source`` be specified as '
'well. The key provided should **not** be base64 encoded.'
- )
+ ),
}
-STORAGE_CLASS = {'name': 'storage-class',
- 'choices': ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA',
- 'ONEZONE_IA', 'INTELLIGENT_TIERING', 'GLACIER',
- 'DEEP_ARCHIVE', 'GLACIER_IR'],
- 'help_text': (
- "The type of storage to use for the object. "
- "Valid choices are: STANDARD | REDUCED_REDUNDANCY "
- "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING "
- "| GLACIER | DEEP_ARCHIVE | GLACIER_IR. "
- "Defaults to 'STANDARD'")}
+STORAGE_CLASS = {
+ 'name': 'storage-class',
+ 'choices': [
+ 'STANDARD',
+ 'REDUCED_REDUNDANCY',
+ 'STANDARD_IA',
+ 'ONEZONE_IA',
+ 'INTELLIGENT_TIERING',
+ 'GLACIER',
+ 'DEEP_ARCHIVE',
+ 'GLACIER_IR',
+ ],
+ 'help_text': (
+ "The type of storage to use for the object. "
+ "Valid choices are: STANDARD | REDUCED_REDUNDANCY "
+ "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING "
+ "| GLACIER | DEEP_ARCHIVE | GLACIER_IR. "
+ "Defaults to 'STANDARD'"
+ ),
+}
-WEBSITE_REDIRECT = {'name': 'website-redirect',
- 'help_text': (
- "If the bucket is configured as a website, "
- "redirects requests for this object to another object "
- "in the same bucket or to an external URL. Amazon S3 "
- "stores the value of this header in the object "
- "metadata.")}
+WEBSITE_REDIRECT = {
+ 'name': 'website-redirect',
+ 'help_text': (
+ "If the bucket is configured as a website, "
+ "redirects requests for this object to another object "
+ "in the same bucket or to an external URL. Amazon S3 "
+ "stores the value of this header in the object "
+ "metadata."
+ ),
+}
-CACHE_CONTROL = {'name': 'cache-control',
- 'help_text': (
- "Specifies caching behavior along the "
- "request/reply chain.")}
+CACHE_CONTROL = {
+ 'name': 'cache-control',
+ 'help_text': (
+ "Specifies caching behavior along the " "request/reply chain."
+ ),
+}
-CONTENT_DISPOSITION = {'name': 'content-disposition',
- 'help_text': (
- "Specifies presentational information "
- "for the object.")}
+CONTENT_DISPOSITION = {
+ 'name': 'content-disposition',
+ 'help_text': ("Specifies presentational information " "for the object."),
+}
-CONTENT_ENCODING = {'name': 'content-encoding',
- 'help_text': (
- "Specifies what content encodings have been "
- "applied to the object and thus what decoding "
- "mechanisms must be applied to obtain the media-type "
- "referenced by the Content-Type header field.")}
+CONTENT_ENCODING = {
+ 'name': 'content-encoding',
+ 'help_text': (
+ "Specifies what content encodings have been "
+ "applied to the object and thus what decoding "
+ "mechanisms must be applied to obtain the media-type "
+ "referenced by the Content-Type header field."
+ ),
+}
-CONTENT_LANGUAGE = {'name': 'content-language',
- 'help_text': ("The language the content is in.")}
+CONTENT_LANGUAGE = {
+ 'name': 'content-language',
+ 'help_text': ("The language the content is in."),
+}
-SOURCE_REGION = {'name': 'source-region',
- 'help_text': (
- "When transferring objects from an s3 bucket to an s3 "
- "bucket, this specifies the region of the source bucket."
- " Note the region specified by ``--region`` or through "
- "configuration of the CLI refers to the region of the "
- "destination bucket. If ``--source-region`` is not "
- "specified the region of the source will be the same "
- "as the region of the destination bucket.")}
+SOURCE_REGION = {
+ 'name': 'source-region',
+ 'help_text': (
+ "When transferring objects from an s3 bucket to an s3 "
+ "bucket, this specifies the region of the source bucket."
+ " Note the region specified by ``--region`` or through "
+ "configuration of the CLI refers to the region of the "
+ "destination bucket. If ``--source-region`` is not "
+ "specified the region of the source will be the same "
+ "as the region of the destination bucket."
+ ),
+}
EXPIRES = {
'name': 'expires',
'help_text': (
- "The date and time at which the object is no longer cacheable.")
+ "The date and time at which the object is no longer cacheable."
+ ),
}
METADATA = {
- 'name': 'metadata', 'cli_type_name': 'map',
+ 'name': 'metadata',
+ 'cli_type_name': 'map',
'schema': {
'type': 'map',
'key': {'type': 'string'},
- 'value': {'type': 'string'}
+ 'value': {'type': 'string'},
},
'help_text': (
"A map of metadata to store with the objects in S3. This will be "
"applied to every object which is part of this request. In a sync, "
"this means that files which haven't changed won't receive the new "
"metadata. "
- )
+ ),
}
METADATA_DIRECTIVE = {
- 'name': 'metadata-directive', 'choices': ['COPY', 'REPLACE'],
+ 'name': 'metadata-directive',
+ 'choices': ['COPY', 'REPLACE'],
'help_text': (
'Sets the ``x-amz-metadata-directive`` header for CopyObject '
'operations. It is recommended to use the ``--copy-props`` parameter '
'instead to control copying of metadata properties. '
'If ``--metadata-directive`` is set, the ``--copy-props`` parameter '
'will be disabled and will have no affect on the transfer.'
- )
+ ),
}
COPY_PROPS = {
'name': 'copy-props',
'choices': ['none', 'metadata-directive', 'default'],
- 'default': 'default', 'help_text': (
+ 'default': 'default',
+ 'help_text': (
'Determines which properties are copied from the source S3 object. '
'This parameter only applies for S3 to S3 copies. Valid values are: '
''
@@ -375,81 +483,104 @@
'If you want to guarantee no additional API calls are made other than '
'than the ones needed to perform the actual copy, set this option to '
'``none``.'
- )
+ ),
}
-INDEX_DOCUMENT = {'name': 'index-document',
- 'help_text': (
- 'A suffix that is appended to a request that is for '
- 'a directory on the website endpoint (e.g. if the '
- 'suffix is index.html and you make a request to '
- 'samplebucket/images/ the data that is returned '
- 'will be for the object with the key name '
- 'images/index.html) The suffix must not be empty and '
- 'must not include a slash character.')}
+INDEX_DOCUMENT = {
+ 'name': 'index-document',
+ 'help_text': (
+ 'A suffix that is appended to a request that is for '
+ 'a directory on the website endpoint (e.g. if the '
+ 'suffix is index.html and you make a request to '
+ 'samplebucket/images/ the data that is returned '
+ 'will be for the object with the key name '
+ 'images/index.html) The suffix must not be empty and '
+ 'must not include a slash character.'
+ ),
+}
-ERROR_DOCUMENT = {'name': 'error-document',
- 'help_text': (
- 'The object key name to use when '
- 'a 4XX class error occurs.')}
+ERROR_DOCUMENT = {
+ 'name': 'error-document',
+ 'help_text': (
+ 'The object key name to use when ' 'a 4XX class error occurs.'
+ ),
+}
-ONLY_SHOW_ERRORS = {'name': 'only-show-errors', 'action': 'store_true',
- 'help_text': (
- 'Only errors and warnings are displayed. All other '
- 'output is suppressed.')}
+ONLY_SHOW_ERRORS = {
+ 'name': 'only-show-errors',
+ 'action': 'store_true',
+ 'help_text': (
+ 'Only errors and warnings are displayed. All other '
+ 'output is suppressed.'
+ ),
+}
-NO_PROGRESS = {'name': 'no-progress',
- 'action': 'store_false',
- 'dest': 'progress',
- 'help_text': (
- 'File transfer progress is not displayed. This flag '
- 'is only applied when the quiet and only-show-errors '
- 'flags are not provided.')}
+NO_PROGRESS = {
+ 'name': 'no-progress',
+ 'action': 'store_false',
+ 'dest': 'progress',
+ 'help_text': (
+ 'File transfer progress is not displayed. This flag '
+ 'is only applied when the quiet and only-show-errors '
+ 'flags are not provided.'
+ ),
+}
-EXPECTED_SIZE = {'name': 'expected-size',
- 'help_text': (
- 'This argument specifies the expected size of a stream '
- 'in terms of bytes. Note that this argument is needed '
- 'only when a stream is being uploaded to s3 and the size '
- 'is larger than 50GB. Failure to include this argument '
- 'under these conditions may result in a failed upload '
- 'due to too many parts in upload.')}
+EXPECTED_SIZE = {
+ 'name': 'expected-size',
+ 'help_text': (
+ 'This argument specifies the expected size of a stream '
+ 'in terms of bytes. Note that this argument is needed '
+ 'only when a stream is being uploaded to s3 and the size '
+ 'is larger than 50GB. Failure to include this argument '
+ 'under these conditions may result in a failed upload '
+ 'due to too many parts in upload.'
+ ),
+}
-PAGE_SIZE = {'name': 'page-size', 'cli_type_name': 'integer',
- 'help_text': (
- 'The number of results to return in each response to a list '
- 'operation. The default value is 1000 (the maximum allowed). '
- 'Using a lower value may help if an operation times out.')}
+PAGE_SIZE = {
+ 'name': 'page-size',
+ 'cli_type_name': 'integer',
+ 'help_text': (
+ 'The number of results to return in each response to a list '
+ 'operation. The default value is 1000 (the maximum allowed). '
+ 'Using a lower value may help if an operation times out.'
+ ),
+}
IGNORE_GLACIER_WARNINGS = {
- 'name': 'ignore-glacier-warnings', 'action': 'store_true',
+ 'name': 'ignore-glacier-warnings',
+ 'action': 'store_true',
'help_text': (
'Turns off glacier warnings. Warnings about an operation that cannot '
'be performed because it involves copying, downloading, or moving '
'a glacier object will no longer be printed to standard error and '
'will no longer cause the return code of the command to be ``2``.'
- )
+ ),
}
FORCE_GLACIER_TRANSFER = {
- 'name': 'force-glacier-transfer', 'action': 'store_true',
+ 'name': 'force-glacier-transfer',
+ 'action': 'store_true',
'help_text': (
'Forces a transfer request on all Glacier objects in a sync or '
'recursive copy.'
- )
+ ),
}
REQUEST_PAYER = {
- 'name': 'request-payer', 'choices': ['requester'],
- 'nargs': '?', 'const': 'requester',
+ 'name': 'request-payer',
+ 'choices': ['requester'],
+ 'nargs': '?',
+ 'const': 'requester',
'help_text': (
'Confirms that the requester knows that they will be charged '
'for the request. Bucket owners need not specify this parameter in '
@@ -457,11 +588,12 @@
'pays buckets can be found at '
'http://docs.aws.amazon.com/AmazonS3/latest/dev/'
'ObjectsinRequesterPaysBuckets.html'
- )
+ ),
}
VALIDATE_SAME_S3_PATHS = {
- 'name': 'validate-same-s3-paths', 'action': 'store_true',
+ 'name': 'validate-same-s3-paths',
+ 'action': 'store_true',
'help_text': (
'Resolves the source and destination S3 URIs to their '
'underlying buckets and verifies that the file or object '
@@ -478,29 +610,56 @@
'NOTE: Path validation requires making additional API calls. '
'Future updates to this path-validation mechanism might change '
'which API calls are made.'
- )
+ ),
}
CHECKSUM_MODE = {
- 'name': 'checksum-mode', 'choices': ['ENABLED'],
- 'help_text': 'To retrieve the checksum, this mode must be enabled. If the object has a '
- 'checksum, it will be verified.'
+ 'name': 'checksum-mode',
+ 'choices': ['ENABLED'],
+ 'help_text': 'To retrieve the checksum, this mode must be enabled. If the object has a '
+ 'checksum, it will be verified.',
}
CHECKSUM_ALGORITHM = {
- 'name': 'checksum-algorithm', 'choices': ['CRC32', 'SHA256', 'SHA1', 'CRC32C'],
- 'help_text': 'Indicates the algorithm used to create the checksum for the object.'
+ 'name': 'checksum-algorithm',
+ 'choices': ['CRC32', 'SHA256', 'SHA1', 'CRC32C'],
+ 'help_text': 'Indicates the algorithm used to create the checksum for the object.',
}
-TRANSFER_ARGS = [DRYRUN, QUIET, INCLUDE, EXCLUDE, ACL,
- FOLLOW_SYMLINKS, NO_FOLLOW_SYMLINKS, NO_GUESS_MIME_TYPE,
- SSE, SSE_C, SSE_C_KEY, SSE_KMS_KEY_ID, SSE_C_COPY_SOURCE,
- SSE_C_COPY_SOURCE_KEY, STORAGE_CLASS, GRANTS,
- WEBSITE_REDIRECT, CONTENT_TYPE, CACHE_CONTROL,
- CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
- EXPIRES, SOURCE_REGION, ONLY_SHOW_ERRORS, NO_PROGRESS,
- PAGE_SIZE, IGNORE_GLACIER_WARNINGS, FORCE_GLACIER_TRANSFER,
- REQUEST_PAYER, CHECKSUM_MODE, CHECKSUM_ALGORITHM]
+TRANSFER_ARGS = [
+ DRYRUN,
+ QUIET,
+ INCLUDE,
+ EXCLUDE,
+ ACL,
+ FOLLOW_SYMLINKS,
+ NO_FOLLOW_SYMLINKS,
+ NO_GUESS_MIME_TYPE,
+ SSE,
+ SSE_C,
+ SSE_C_KEY,
+ SSE_KMS_KEY_ID,
+ SSE_C_COPY_SOURCE,
+ SSE_C_COPY_SOURCE_KEY,
+ STORAGE_CLASS,
+ GRANTS,
+ WEBSITE_REDIRECT,
+ CONTENT_TYPE,
+ CACHE_CONTROL,
+ CONTENT_DISPOSITION,
+ CONTENT_ENCODING,
+ CONTENT_LANGUAGE,
+ EXPIRES,
+ SOURCE_REGION,
+ ONLY_SHOW_ERRORS,
+ NO_PROGRESS,
+ PAGE_SIZE,
+ IGNORE_GLACIER_WARNINGS,
+ FORCE_GLACIER_TRANSFER,
+ REQUEST_PAYER,
+ CHECKSUM_MODE,
+ CHECKSUM_ALGORITHM,
+]
class S3Command(BasicCommand):
@@ -515,13 +674,26 @@ def _run_main(self, parsed_args, parsed_globals):
class ListCommand(S3Command):
NAME = 'ls'
- DESCRIPTION = ("List S3 objects and common prefixes under a prefix or "
- "all S3 buckets. Note that the --output and --no-paginate "
- "arguments are ignored for this command.")
+ DESCRIPTION = (
+ "List S3 objects and common prefixes under a prefix or "
+ "all S3 buckets. Note that the --output and --no-paginate "
+ "arguments are ignored for this command."
+ )
USAGE = " or NONE"
- ARG_TABLE = [{'name': 'paths', 'nargs': '?', 'default': 's3://',
- 'positional_arg': True, 'synopsis': USAGE}, RECURSIVE,
- PAGE_SIZE, HUMAN_READABLE, SUMMARIZE, REQUEST_PAYER]
+ ARG_TABLE = [
+ {
+ 'name': 'paths',
+ 'nargs': '?',
+ 'default': 's3://',
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ },
+ RECURSIVE,
+ PAGE_SIZE,
+ HUMAN_READABLE,
+ SUMMARIZE,
+ REQUEST_PAYER,
+ ]
def _run_main(self, parsed_args, parsed_globals):
super(ListCommand, self)._run_main(parsed_args, parsed_globals)
@@ -539,10 +711,12 @@ def _run_main(self, parsed_args, parsed_globals):
elif parsed_args.dir_op:
# Then --recursive was specified.
self._list_all_objects_recursive(
- bucket, key, parsed_args.page_size, parsed_args.request_payer)
+ bucket, key, parsed_args.page_size, parsed_args.request_payer
+ )
else:
self._list_all_objects(
- bucket, key, parsed_args.page_size, parsed_args.request_payer)
+ bucket, key, parsed_args.page_size, parsed_args.request_payer
+ )
if parsed_args.summarize:
self._print_summary()
if key:
@@ -559,12 +733,15 @@ def _run_main(self, parsed_args, parsed_globals):
# thrown before reaching the automatic return of rc of zero.
return 0
- def _list_all_objects(self, bucket, key, page_size=None,
- request_payer=None):
+ def _list_all_objects(
+ self, bucket, key, page_size=None, request_payer=None
+ ):
paginator = self.client.get_paginator('list_objects_v2')
paging_args = {
- 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/',
- 'PaginationConfig': {'PageSize': page_size}
+ 'Bucket': bucket,
+ 'Prefix': key,
+ 'Delimiter': '/',
+ 'PaginationConfig': {'PageSize': page_size},
}
if request_payer is not None:
paging_args['RequestPayer'] = request_payer
@@ -594,16 +771,13 @@ def _display_page(self, response_data, use_basename=True):
filename = filename_components[-1]
else:
filename = content['Key']
- print_str = last_mod_str + ' ' + size_str + ' ' + \
- filename + '\n'
+ print_str = last_mod_str + ' ' + size_str + ' ' + filename + '\n'
uni_print(print_str)
self._at_first_page = False
def _list_all_buckets(self, page_size=None):
paginator = self.client.get_paginator('list_buckets')
- paging_args = {
- 'PaginationConfig': {'PageSize': page_size}
- }
+ paging_args = {'PaginationConfig': {'PageSize': page_size}}
iterator = paginator.paginate(**paging_args)
@@ -615,12 +789,14 @@ def _list_all_buckets(self, page_size=None):
print_str = last_mod_str + ' ' + bucket['Name'] + '\n'
uni_print(print_str)
- def _list_all_objects_recursive(self, bucket, key, page_size=None,
- request_payer=None):
+ def _list_all_objects_recursive(
+ self, bucket, key, page_size=None, request_payer=None
+ ):
paginator = self.client.get_paginator('list_objects_v2')
paging_args = {
- 'Bucket': bucket, 'Prefix': key,
- 'PaginationConfig': {'PageSize': page_size}
+ 'Bucket': bucket,
+ 'Prefix': key,
+ 'PaginationConfig': {'PageSize': page_size},
}
if request_payer is not None:
paging_args['RequestPayer'] = request_payer
@@ -642,11 +818,14 @@ def _make_last_mod_str(self, last_mod):
"""
last_mod = parse(last_mod)
last_mod = last_mod.astimezone(tzlocal())
- last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2),
- str(last_mod.day).zfill(2),
- str(last_mod.hour).zfill(2),
- str(last_mod.minute).zfill(2),
- str(last_mod.second).zfill(2))
+ last_mod_tup = (
+ str(last_mod.year),
+ str(last_mod.month).zfill(2),
+ str(last_mod.day).zfill(2),
+ str(last_mod.hour).zfill(2),
+ str(last_mod.minute).zfill(2),
+ str(last_mod.second).zfill(2),
+ )
last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup
return last_mod_str.ljust(19, ' ')
@@ -677,25 +856,36 @@ class WebsiteCommand(S3Command):
NAME = 'website'
DESCRIPTION = 'Set the website configuration for a bucket.'
USAGE = ''
- ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True,
- 'synopsis': USAGE}, INDEX_DOCUMENT, ERROR_DOCUMENT]
+ ARG_TABLE = [
+ {
+ 'name': 'paths',
+ 'nargs': 1,
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ },
+ INDEX_DOCUMENT,
+ ERROR_DOCUMENT,
+ ]
def _run_main(self, parsed_args, parsed_globals):
super(WebsiteCommand, self)._run_main(parsed_args, parsed_globals)
bucket = self._get_bucket_name(parsed_args.paths[0])
website_configuration = self._build_website_configuration(parsed_args)
self.client.put_bucket_website(
- Bucket=bucket, WebsiteConfiguration=website_configuration)
+ Bucket=bucket, WebsiteConfiguration=website_configuration
+ )
return 0
def _build_website_configuration(self, parsed_args):
website_config = {}
if parsed_args.index_document is not None:
- website_config['IndexDocument'] = \
- {'Suffix': parsed_args.index_document}
+ website_config['IndexDocument'] = {
+ 'Suffix': parsed_args.index_document
+ }
if parsed_args.error_document is not None:
- website_config['ErrorDocument'] = \
- {'Key': parsed_args.error_document}
+ website_config['ErrorDocument'] = {
+ 'Key': parsed_args.error_document
+ }
return website_config
def _get_bucket_name(self, path):
@@ -722,13 +912,18 @@ class PresignCommand(S3Command):
"so the region needs to be configured explicitly."
)
USAGE = ""
- ARG_TABLE = [{'name': 'path',
- 'positional_arg': True, 'synopsis': USAGE},
- {'name': 'expires-in', 'default': 3600,
- 'cli_type_name': 'integer',
- 'help_text': (
- 'Number of seconds until the pre-signed '
- 'URL expires. Default is 3600 seconds. Maximum is 604800 seconds.')}]
+ ARG_TABLE = [
+ {'name': 'path', 'positional_arg': True, 'synopsis': USAGE},
+ {
+ 'name': 'expires-in',
+ 'default': 3600,
+ 'cli_type_name': 'integer',
+ 'help_text': (
+ 'Number of seconds until the pre-signed '
+ 'URL expires. Default is 3600 seconds. Maximum is 604800 seconds.'
+ ),
+ },
+ ]
def _run_main(self, parsed_args, parsed_globals):
super(PresignCommand, self)._run_main(parsed_args, parsed_globals)
@@ -739,7 +934,7 @@ def _run_main(self, parsed_args, parsed_globals):
url = self.client.generate_presigned_url(
'get_object',
{'Bucket': bucket, 'Key': key},
- ExpiresIn=parsed_args.expires_in
+ ExpiresIn=parsed_args.expires_in,
)
uni_print(url)
uni_print('\n')
@@ -755,12 +950,15 @@ def _run_main(self, parsed_args, parsed_globals):
params=params
)
transfer_manager = self._get_transfer_manager(
- params=params,
- botocore_transfer_client=transfer_client
+ params=params, botocore_transfer_client=transfer_client
)
cmd = CommandArchitecture(
- self._session, self.NAME, params,
- transfer_manager, source_client, transfer_client
+ self._session,
+ self.NAME,
+ params,
+ transfer_manager,
+ source_client,
+ transfer_client,
)
cmd.create_instructions()
return cmd.run()
@@ -778,8 +976,8 @@ def _convert_path_args(self, parsed_args):
def _get_params(self, parsed_args, parsed_globals, session):
cmd_params = CommandParameters(
- self.NAME, vars(parsed_args), self.USAGE,
- session, parsed_globals)
+ self.NAME, vars(parsed_args), self.USAGE, session, parsed_globals
+ )
cmd_params.add_region(parsed_globals)
cmd_params.add_endpoint_url(parsed_globals)
cmd_params.add_verify_ssl(parsed_globals)
@@ -791,7 +989,8 @@ def _get_params(self, parsed_args, parsed_globals, session):
def _get_source_and_transfer_clients(self, params):
client_factory = ClientFactory(self._session)
source_client = client_factory.create_client(
- params, is_source_client=True)
+ params, is_source_client=True
+ )
transfer_client = client_factory.create_client(params)
return source_client, transfer_client
@@ -805,52 +1004,98 @@ def _get_transfer_manager(self, params, botocore_transfer_client):
def _get_runtime_config(self):
return transferconfig.RuntimeConfig().build_config(
- **self._session.get_scoped_config().get('s3', {}))
+ **self._session.get_scoped_config().get('s3', {})
+ )
class CpCommand(S3TransferCommand):
NAME = 'cp'
- DESCRIPTION = "Copies a local file or S3 object to another location " \
- "locally or in S3."
- USAGE = " or " \
- "or "
- ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
- 'synopsis': USAGE}] + TRANSFER_ARGS + \
- [METADATA, COPY_PROPS, METADATA_DIRECTIVE, EXPECTED_SIZE,
- RECURSIVE]
+ DESCRIPTION = (
+ "Copies a local file or S3 object to another location "
+ "locally or in S3."
+ )
+ USAGE = " or " "or "
+ ARG_TABLE = (
+ [
+ {
+ 'name': 'paths',
+ 'nargs': 2,
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ }
+ ]
+ + TRANSFER_ARGS
+ + [METADATA, COPY_PROPS, METADATA_DIRECTIVE, EXPECTED_SIZE, RECURSIVE]
+ )
class MvCommand(S3TransferCommand):
NAME = 'mv'
DESCRIPTION = BasicCommand.FROM_FILE('s3', 'mv', '_description.rst')
- USAGE = " or " \
- "or "
- ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
- 'synopsis': USAGE}] + TRANSFER_ARGS +\
- [METADATA, COPY_PROPS, METADATA_DIRECTIVE, RECURSIVE,
- VALIDATE_SAME_S3_PATHS]
+ USAGE = " or " "or "
+ ARG_TABLE = (
+ [
+ {
+ 'name': 'paths',
+ 'nargs': 2,
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ }
+ ]
+ + TRANSFER_ARGS
+ + [
+ METADATA,
+ COPY_PROPS,
+ METADATA_DIRECTIVE,
+ RECURSIVE,
+ VALIDATE_SAME_S3_PATHS,
+ ]
+ )
class RmCommand(S3TransferCommand):
NAME = 'rm'
DESCRIPTION = "Deletes an S3 object."
USAGE = ""
- ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True,
- 'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, REQUEST_PAYER,
- INCLUDE, EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE]
+ ARG_TABLE = [
+ {
+ 'name': 'paths',
+ 'nargs': 1,
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ },
+ DRYRUN,
+ QUIET,
+ RECURSIVE,
+ REQUEST_PAYER,
+ INCLUDE,
+ EXCLUDE,
+ ONLY_SHOW_ERRORS,
+ PAGE_SIZE,
+ ]
class SyncCommand(S3TransferCommand):
NAME = 'sync'
- DESCRIPTION = "Syncs directories and S3 prefixes. Recursively copies " \
- "new and updated files from the source directory to " \
- "the destination. Only creates folders in the destination " \
- "if they contain one or more files."
- USAGE = " or " \
- " or "
- ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
- 'synopsis': USAGE}] + TRANSFER_ARGS + \
- [METADATA, COPY_PROPS, METADATA_DIRECTIVE]
+ DESCRIPTION = (
+ "Syncs directories and S3 prefixes. Recursively copies "
+ "new and updated files from the source directory to "
+ "the destination. Only creates folders in the destination "
+ "if they contain one or more files."
+ )
+ USAGE = " or " " or "
+ ARG_TABLE = (
+ [
+ {
+ 'name': 'paths',
+ 'nargs': 2,
+ 'positional_arg': True,
+ 'synopsis': USAGE,
+ }
+ ]
+ + TRANSFER_ARGS
+ + [METADATA, COPY_PROPS, METADATA_DIRECTIVE]
+ )
class MbCommand(S3Command):
@@ -886,7 +1131,7 @@ def _run_main(self, parsed_args, parsed_globals):
except Exception as e:
uni_print(
"make_bucket failed: %s %s\n" % (parsed_args.path, e),
- sys.stderr
+ sys.stderr,
)
return 1
@@ -901,8 +1146,10 @@ class RbCommand(S3Command):
"deleted."
)
USAGE = ""
- ARG_TABLE = [{'name': 'path', 'positional_arg': True,
- 'synopsis': USAGE}, FORCE]
+ ARG_TABLE = [
+ {'name': 'path', 'positional_arg': True, 'synopsis': USAGE},
+ FORCE,
+ ]
def _run_main(self, parsed_args, parsed_globals):
super(RbCommand, self)._run_main(parsed_args, parsed_globals)
@@ -929,7 +1176,7 @@ def _run_main(self, parsed_args, parsed_globals):
except Exception as e:
uni_print(
"remove_bucket failed: %s %s\n" % (parsed_args.path, e),
- sys.stderr
+ sys.stderr,
)
return 1
@@ -940,7 +1187,8 @@ def _force(self, path, parsed_globals):
if rc != 0:
raise RuntimeError(
"remove_bucket failed: Unable to delete all objects in the "
- "bucket, bucket will not be deleted.")
+ "bucket, bucket will not be deleted."
+ )
class CommandArchitecture(object):
@@ -953,8 +1201,16 @@ class CommandArchitecture(object):
list of instructions to wire together an assortment of generators to
perform the command.
"""
- def __init__(self, session, cmd, parameters, transfer_manager,
- source_client, transfer_client):
+
+ def __init__(
+ self,
+ session,
+ cmd,
+ parameters,
+ transfer_manager,
+ source_client,
+ transfer_client,
+ ):
self.session = session
self.cmd = cmd
self.parameters = parameters
@@ -992,14 +1248,16 @@ def choose_sync_strategies(self):
"""
sync_strategies = {}
# Set the default strategies.
- sync_strategies['file_at_src_and_dest_sync_strategy'] = \
+ sync_strategies['file_at_src_and_dest_sync_strategy'] = (
SizeAndLastModifiedSync()
+ )
sync_strategies['file_not_at_dest_sync_strategy'] = MissingFileSync()
sync_strategies['file_not_at_src_sync_strategy'] = NeverSync()
# Determine what strategies to override if any.
responses = self.session.emit(
- 'choosing-s3-sync-strategy', params=self.parameters)
+ 'choosing-s3-sync-strategy', params=self.parameters
+ )
if responses is not None:
for response in responses:
override_sync_strategy = response[1]
@@ -1042,85 +1300,106 @@ def run(self):
'locals3': 'upload',
's3s3': 'copy',
's3local': 'download',
- 's3': 'delete'
+ 's3': 'delete',
}
result_queue = queue.Queue()
operation_name = cmd_translation[paths_type]
fgen_kwargs = {
- 'client': self._source_client, 'operation_name': operation_name,
+ 'client': self._source_client,
+ 'operation_name': operation_name,
'follow_symlinks': self.parameters['follow_symlinks'],
'page_size': self.parameters['page_size'],
'result_queue': result_queue,
}
rgen_kwargs = {
- 'client': self._client, 'operation_name': '',
+ 'client': self._client,
+ 'operation_name': '',
'follow_symlinks': self.parameters['follow_symlinks'],
'page_size': self.parameters['page_size'],
'result_queue': result_queue,
}
- fgen_request_parameters = \
+ fgen_request_parameters = (
self._get_file_generator_request_parameters_skeleton()
+ )
self._map_request_payer_params(fgen_request_parameters)
self._map_sse_c_params(fgen_request_parameters, paths_type)
fgen_kwargs['request_parameters'] = fgen_request_parameters
- rgen_request_parameters = \
+ rgen_request_parameters = (
self._get_file_generator_request_parameters_skeleton()
+ )
self._map_request_payer_params(rgen_request_parameters)
rgen_kwargs['request_parameters'] = rgen_request_parameters
file_generator = FileGenerator(**fgen_kwargs)
rev_generator = FileGenerator(**rgen_kwargs)
stream_dest_path, stream_compare_key = find_dest_path_comp_key(files)
- stream_file_info = [FileInfo(src=files['src']['path'],
- dest=stream_dest_path,
- compare_key=stream_compare_key,
- src_type=files['src']['type'],
- dest_type=files['dest']['type'],
- operation_name=operation_name,
- client=self._client,
- is_stream=True)]
+ stream_file_info = [
+ FileInfo(
+ src=files['src']['path'],
+ dest=stream_dest_path,
+ compare_key=stream_compare_key,
+ src_type=files['src']['type'],
+ dest_type=files['dest']['type'],
+ operation_name=operation_name,
+ client=self._client,
+ is_stream=True,
+ )
+ ]
file_info_builder = FileInfoBuilder(
- self._client, self._source_client, self.parameters)
+ self._client, self._source_client, self.parameters
+ )
s3_transfer_handler = S3TransferHandlerFactory(self.parameters)(
- self._transfer_manager, result_queue)
+ self._transfer_manager, result_queue
+ )
sync_strategies = self.choose_sync_strategies()
command_dict = {}
if self.cmd == 'sync':
- command_dict = {'setup': [files, rev_files],
- 'file_generator': [file_generator,
- rev_generator],
- 'filters': [create_filter(self.parameters),
- create_filter(self.parameters)],
- 'comparator': [Comparator(**sync_strategies)],
- 'file_info_builder': [file_info_builder],
- 's3_handler': [s3_transfer_handler]}
+ command_dict = {
+ 'setup': [files, rev_files],
+ 'file_generator': [file_generator, rev_generator],
+ 'filters': [
+ create_filter(self.parameters),
+ create_filter(self.parameters),
+ ],
+ 'comparator': [Comparator(**sync_strategies)],
+ 'file_info_builder': [file_info_builder],
+ 's3_handler': [s3_transfer_handler],
+ }
elif self.cmd == 'cp' and self.parameters['is_stream']:
- command_dict = {'setup': [stream_file_info],
- 's3_handler': [s3_transfer_handler]}
+ command_dict = {
+ 'setup': [stream_file_info],
+ 's3_handler': [s3_transfer_handler],
+ }
elif self.cmd == 'cp':
- command_dict = {'setup': [files],
- 'file_generator': [file_generator],
- 'filters': [create_filter(self.parameters)],
- 'file_info_builder': [file_info_builder],
- 's3_handler': [s3_transfer_handler]}
+ command_dict = {
+ 'setup': [files],
+ 'file_generator': [file_generator],
+ 'filters': [create_filter(self.parameters)],
+ 'file_info_builder': [file_info_builder],
+ 's3_handler': [s3_transfer_handler],
+ }
elif self.cmd == 'rm':
- command_dict = {'setup': [files],
- 'file_generator': [file_generator],
- 'filters': [create_filter(self.parameters)],
- 'file_info_builder': [file_info_builder],
- 's3_handler': [s3_transfer_handler]}
+ command_dict = {
+ 'setup': [files],
+ 'file_generator': [file_generator],
+ 'filters': [create_filter(self.parameters)],
+ 'file_info_builder': [file_info_builder],
+ 's3_handler': [s3_transfer_handler],
+ }
elif self.cmd == 'mv':
- command_dict = {'setup': [files],
- 'file_generator': [file_generator],
- 'filters': [create_filter(self.parameters)],
- 'file_info_builder': [file_info_builder],
- 's3_handler': [s3_transfer_handler]}
+ command_dict = {
+ 'setup': [files],
+ 'file_generator': [file_generator],
+ 'filters': [create_filter(self.parameters)],
+ 'file_info_builder': [file_info_builder],
+ 's3_handler': [s3_transfer_handler],
+ }
files = command_dict['setup']
while self.instructions:
@@ -1151,22 +1430,16 @@ def run(self):
return rc
def _get_file_generator_request_parameters_skeleton(self):
- return {
- 'HeadObject': {},
- 'ListObjects': {},
- 'ListObjectsV2': {}
- }
+ return {'HeadObject': {}, 'ListObjects': {}, 'ListObjectsV2': {}}
def _map_request_payer_params(self, request_parameters):
RequestParamsMapper.map_head_object_params(
- request_parameters['HeadObject'], {
- 'request_payer': self.parameters.get('request_payer')
- }
+ request_parameters['HeadObject'],
+ {'request_payer': self.parameters.get('request_payer')},
)
RequestParamsMapper.map_list_objects_v2_params(
- request_parameters['ListObjectsV2'], {
- 'request_payer': self.parameters.get('request_payer')
- }
+ request_parameters['ListObjectsV2'],
+ {'request_payer': self.parameters.get('request_payer')},
)
def _map_sse_c_params(self, request_parameters, paths_type):
@@ -1177,13 +1450,15 @@ def _map_sse_c_params(self, request_parameters, paths_type):
# not need any of these because it is used only for sync operations
# which only use ListObjects which does not require HeadObject.
RequestParamsMapper.map_head_object_params(
- request_parameters['HeadObject'], self.parameters)
+ request_parameters['HeadObject'], self.parameters
+ )
if paths_type == 's3s3':
RequestParamsMapper.map_head_object_params(
- request_parameters['HeadObject'], {
+ request_parameters['HeadObject'],
+ {
'sse_c': self.parameters.get('sse_c_copy_source'),
- 'sse_c_key': self.parameters.get('sse_c_copy_source_key')
- }
+ 'sse_c_key': self.parameters.get('sse_c_copy_source_key'),
+ },
)
@@ -1202,8 +1477,10 @@ class CommandParameters(object):
This class is used to do some initial error based on the
parameters and arguments passed to the command line.
"""
- def __init__(self, cmd, parameters, usage,
- session=None, parsed_globals=None):
+
+ def __init__(
+ self, cmd, parameters, usage, session=None, parsed_globals=None
+ ):
"""
Stores command name and parameters. Ensures that the ``dir_op`` flag
is true if a certain command is being used.
@@ -1252,9 +1529,10 @@ def add_paths(self, paths):
self._validate_not_s3_express_bucket_for_sync()
def _validate_not_s3_express_bucket_for_sync(self):
- if self.cmd == 'sync' and \
- (self._is_s3express_path(self.parameters['src']) or
- self._is_s3express_path(self.parameters['dest'])):
+ if self.cmd == 'sync' and (
+ self._is_s3express_path(self.parameters['src'])
+ or self._is_s3express_path(self.parameters['dest'])
+ ):
raise ParamValidationError(
"Cannot use sync command with a directory bucket."
)
@@ -1280,7 +1558,7 @@ def _validate_streaming_paths(self):
def _validate_path_args(self):
# If we're using a mv command, you can't copy the object onto itself.
params = self.parameters
- if self.cmd == 'mv' and params['paths_type']=='s3s3':
+ if self.cmd == 'mv' and params['paths_type'] == 's3s3':
self._raise_if_mv_same_paths(params['src'], params['dest'])
if self._should_validate_same_underlying_s3_paths():
self._validate_same_underlying_s3_paths()
@@ -1291,20 +1569,20 @@ def _validate_path_args(self):
self._raise_if_paths_type_incorrect_for_param(
CHECKSUM_ALGORITHM['name'],
params['paths_type'],
- ['locals3', 's3s3'])
+ ['locals3', 's3s3'],
+ )
if params.get('checksum_mode'):
self._raise_if_paths_type_incorrect_for_param(
- CHECKSUM_MODE['name'],
- params['paths_type'],
- ['s3local'])
+ CHECKSUM_MODE['name'], params['paths_type'], ['s3local']
+ )
# If the user provided local path does not exist, hard fail because
# we know that we will not be able to upload the file.
if 'locals3' == params['paths_type'] and not params['is_stream']:
if not os.path.exists(params['src']):
raise RuntimeError(
- 'The user-provided path %s does not exist.' %
- params['src'])
+ 'The user-provided path %s does not exist.' % params['src']
+ )
# If the operation is downloading to a directory that does not exist,
# create the directories so no warnings are thrown during the syncing
# process.
@@ -1328,19 +1606,27 @@ def _same_key(self, src, dest):
def _validate_same_s3_paths_enabled(self):
validate_env_var = ensure_boolean(
- os.environ.get('AWS_CLI_S3_MV_VALIDATE_SAME_S3_PATHS'))
- return (self.parameters.get('validate_same_s3_paths') or
- validate_env_var)
+ os.environ.get('AWS_CLI_S3_MV_VALIDATE_SAME_S3_PATHS')
+ )
+ return (
+ self.parameters.get('validate_same_s3_paths') or validate_env_var
+ )
def _should_emit_validate_s3_paths_warning(self):
is_same_key = self._same_key(
- self.parameters['src'], self.parameters['dest'])
+ self.parameters['src'], self.parameters['dest']
+ )
src_has_underlying_path = S3PathResolver.has_underlying_s3_path(
- self.parameters['src'])
+ self.parameters['src']
+ )
dest_has_underlying_path = S3PathResolver.has_underlying_s3_path(
- self.parameters['dest'])
- return (is_same_key and not self._validate_same_s3_paths_enabled() and
- (src_has_underlying_path or dest_has_underlying_path))
+ self.parameters['dest']
+ )
+ return (
+ is_same_key
+ and not self._validate_same_s3_paths_enabled()
+ and (src_has_underlying_path or dest_has_underlying_path)
+ )
def _emit_validate_s3_paths_warning(self):
msg = (
@@ -1356,19 +1642,20 @@ def _emit_validate_s3_paths_warning(self):
def _should_validate_same_underlying_s3_paths(self):
is_same_key = self._same_key(
- self.parameters['src'], self.parameters['dest'])
+ self.parameters['src'], self.parameters['dest']
+ )
return is_same_key and self._validate_same_s3_paths_enabled()
def _validate_same_underlying_s3_paths(self):
src_paths = S3PathResolver.from_session(
self._session,
self.parameters.get('source_region', self._parsed_globals.region),
- self._parsed_globals.verify_ssl
+ self._parsed_globals.verify_ssl,
).resolve_underlying_s3_paths(self.parameters['src'])
dest_paths = S3PathResolver.from_session(
self._session,
self._parsed_globals.region,
- self._parsed_globals.verify_ssl
+ self._parsed_globals.verify_ssl,
).resolve_underlying_s3_paths(self.parameters['dest'])
for src_path in src_paths:
for dest_path in dest_paths:
@@ -1381,13 +1668,15 @@ def _raise_if_mv_same_paths(self, src, dest):
f"{self.parameters['src']} - {self.parameters['dest']}"
)
- def _raise_if_paths_type_incorrect_for_param(self, param, paths_type, allowed_paths):
+ def _raise_if_paths_type_incorrect_for_param(
+ self, param, paths_type, allowed_paths
+ ):
if paths_type not in allowed_paths:
expected_usage_map = {
'locals3': ' ',
's3s3': ' ',
's3local': ' ',
- 's3': ''
+ 's3': '',
}
raise ParamValidationError(
f"Expected {param} parameter to be used with one of following path formats: "
@@ -1410,14 +1699,16 @@ def check_path_type(self, paths):
This initial check ensures that the path types for the specified
command is correct.
"""
- template_type = {'s3s3': ['cp', 'sync', 'mv'],
- 's3local': ['cp', 'sync', 'mv'],
- 'locals3': ['cp', 'sync', 'mv'],
- 's3': ['mb', 'rb', 'rm'],
- 'local': [], 'locallocal': []}
+ template_type = {
+ 's3s3': ['cp', 'sync', 'mv'],
+ 's3local': ['cp', 'sync', 'mv'],
+ 'locals3': ['cp', 'sync', 'mv'],
+ 's3': ['mb', 'rb', 'rm'],
+ 'local': [],
+ 'locallocal': [],
+ }
paths_type = ''
- usage = "usage: aws s3 %s %s" % (self.cmd,
- self.usage)
+ usage = "usage: aws s3 %s %s" % (self.cmd, self.usage)
for i in range(len(paths)):
if paths[i].startswith('s3://'):
paths_type = paths_type + 's3'
@@ -1438,8 +1729,9 @@ def add_endpoint_url(self, parsed_globals):
Adds endpoint_url to the parameters.
"""
if 'endpoint_url' in parsed_globals:
- self.parameters['endpoint_url'] = getattr(parsed_globals,
- 'endpoint_url')
+ self.parameters['endpoint_url'] = getattr(
+ parsed_globals, 'endpoint_url'
+ )
else:
self.parameters['endpoint_url'] = None
@@ -1448,7 +1740,8 @@ def add_verify_ssl(self, parsed_globals):
def add_sign_request(self, parsed_globals):
self.parameters['sign_request'] = getattr(
- parsed_globals, 'sign_request', True)
+ parsed_globals, 'sign_request', True
+ )
def add_page_size(self, parsed_args):
self.parameters['page_size'] = getattr(parsed_args, 'page_size', None)
diff --git a/awscli/customizations/s3/subscribers.py b/awscli/customizations/s3/subscribers.py
index 34bb2815d97d..73bd4c61e47b 100644
--- a/awscli/customizations/s3/subscribers.py
+++ b/awscli/customizations/s3/subscribers.py
@@ -15,12 +15,10 @@
import os
import time
+from awscli.customizations.s3 import utils
from botocore.utils import percent_encode_sequence
from s3transfer.subscribers import BaseSubscriber
-from awscli.customizations.s3 import utils
-
-
LOGGER = logging.getLogger(__name__)
@@ -37,10 +35,10 @@ class OnDoneFilteredSubscriber(BaseSubscriber):
It is really a convenience class so developers do not have to have
to constantly remember to have a general try/except around future.result()
"""
+
def on_done(self, future, **kwargs):
future_exception = None
try:
-
future.result()
except Exception as e:
future_exception = e
@@ -62,6 +60,7 @@ class ProvideSizeSubscriber(BaseSubscriber):
"""
A subscriber which provides the transfer size before it's queued.
"""
+
def __init__(self, size):
self.size = size
@@ -73,12 +72,14 @@ def on_queued(self, future, **kwargs):
else:
LOGGER.debug(
'Not providing transfer size. Future: %s does not offer'
- 'the capability to notify the size of a transfer', future
+ 'the capability to notify the size of a transfer',
+ future,
)
class DeleteSourceSubscriber(OnDoneFilteredSubscriber):
"""A subscriber which deletes the source of the transfer."""
+
def _on_success(self, future):
try:
self._delete_source(future)
@@ -91,6 +92,7 @@ def _delete_source(self, future):
class DeleteSourceObjectSubscriber(DeleteSourceSubscriber):
"""A subscriber which deletes an object."""
+
def __init__(self, client):
self._client = client
@@ -104,16 +106,18 @@ def _delete_source(self, future):
call_args = future.meta.call_args
delete_object_kwargs = {
'Bucket': self._get_bucket(call_args),
- 'Key': self._get_key(call_args)
+ 'Key': self._get_key(call_args),
}
if call_args.extra_args.get('RequestPayer'):
delete_object_kwargs['RequestPayer'] = call_args.extra_args[
- 'RequestPayer']
+ 'RequestPayer'
+ ]
self._client.delete_object(**delete_object_kwargs)
class DeleteCopySourceObjectSubscriber(DeleteSourceObjectSubscriber):
"""A subscriber which deletes the copy source."""
+
def _get_bucket(self, call_args):
return call_args.copy_source['Bucket']
@@ -123,6 +127,7 @@ def _get_key(self, call_args):
class DeleteSourceFileSubscriber(DeleteSourceSubscriber):
"""A subscriber which deletes a file."""
+
def _delete_source(self, future):
os.remove(future.meta.call_args.fileobj)
@@ -146,6 +151,7 @@ def _get_filename(self, future):
class ProvideLastModifiedTimeSubscriber(OnDoneFilteredSubscriber):
"""Sets utime for a downloaded file"""
+
def __init__(self, last_modified_time, result_queue):
self._last_modified_time = last_modified_time
self._result_queue = result_queue
@@ -159,13 +165,16 @@ def _on_success(self, future, **kwargs):
except Exception as e:
warning_message = (
'Successfully Downloaded %s but was unable to update the '
- 'last modified time. %s' % (filename, e))
+ 'last modified time. %s' % (filename, e)
+ )
self._result_queue.put(
- utils.create_warning(filename, warning_message))
+ utils.create_warning(filename, warning_message)
+ )
class DirectoryCreatorSubscriber(BaseSubscriber):
"""Creates a directory to download if it does not exist"""
+
def on_queued(self, future, **kwargs):
d = os.path.dirname(future.meta.call_args.fileobj)
try:
@@ -174,7 +183,8 @@ def on_queued(self, future, **kwargs):
except OSError as e:
if not e.errno == errno.EEXIST:
raise CreateDirectoryError(
- "Could not create directory %s: %s" % (d, e))
+ "Could not create directory %s: %s" % (d, e)
+ )
class CopyPropsSubscriberFactory(object):
@@ -184,8 +194,9 @@ def __init__(self, client, transfer_config, cli_params):
self._cli_params = cli_params
def get_subscribers(self, fileinfo):
- copy_props = self._cli_params.get(
- 'copy_props', 'default').replace('-', '_')
+ copy_props = self._cli_params.get('copy_props', 'default').replace(
+ '-', '_'
+ )
return getattr(self, '_get_%s_subscribers' % copy_props)(fileinfo)
def _get_none_subscribers(self, fileinfo):
@@ -197,16 +208,18 @@ def _get_none_subscribers(self, fileinfo):
def _get_metadata_directive_subscribers(self, fileinfo):
return [
self._create_metadata_directive_props_subscriber(fileinfo),
- ReplaceTaggingDirectiveSubscriber()
+ ReplaceTaggingDirectiveSubscriber(),
]
def _get_default_subscribers(self, fileinfo):
return [
self._create_metadata_directive_props_subscriber(fileinfo),
SetTagsSubscriber(
- self._client, self._transfer_config, self._cli_params,
+ self._client,
+ self._transfer_config,
+ self._cli_params,
source_client=fileinfo.source_client,
- )
+ ),
]
def _create_metadata_directive_props_subscriber(self, fileinfo):
@@ -216,8 +229,9 @@ def _create_metadata_directive_props_subscriber(self, fileinfo):
'cli_params': self._cli_params,
}
if not self._cli_params.get('dir_op'):
- subscriber_kwargs[
- 'head_object_response'] = fileinfo.associated_response_data
+ subscriber_kwargs['head_object_response'] = (
+ fileinfo.associated_response_data
+ )
return SetMetadataDirectivePropsSubscriber(**subscriber_kwargs)
@@ -247,8 +261,9 @@ class SetMetadataDirectivePropsSubscriber(BaseSubscriber):
'Metadata',
]
- def __init__(self, client, transfer_config, cli_params,
- head_object_response=None):
+ def __init__(
+ self, client, transfer_config, cli_params, head_object_response=None
+ ):
self._client = client
self._transfer_config = transfer_config
self._cli_params = cli_params
@@ -280,7 +295,8 @@ def _get_head_object_response(self, future):
'Key': copy_source['Key'],
}
utils.RequestParamsMapper.map_head_object_params(
- head_object_params, self._cli_params)
+ head_object_params, self._cli_params
+ )
return self._client.head_object(**head_object_params)
def _inject_metadata_props(self, future, head_object_response):
@@ -333,19 +349,14 @@ def _put_object_tagging(self, bucket, key, tag_set):
extra_args, self._cli_params
)
self._client.put_object_tagging(
- Bucket=bucket,
- Key=key,
- Tagging={'TagSet': tag_set},
- **extra_args
+ Bucket=bucket, Key=key, Tagging={'TagSet': tag_set}, **extra_args
)
def _delete_object(self, bucket, key):
- params = {
- 'Bucket': bucket,
- 'Key': key
- }
+ params = {'Bucket': bucket, 'Key': key}
utils.RequestParamsMapper.map_delete_object_params(
- params, self._cli_params)
+ params, self._cli_params
+ )
self._client.delete_object(**params)
def _get_bucket_key_from_copy_source(self, future):
@@ -358,16 +369,20 @@ def _get_tags(self, bucket, key):
extra_args, self._cli_params
)
get_tags_response = self._source_client.get_object_tagging(
- Bucket=bucket, Key=key, **extra_args)
+ Bucket=bucket, Key=key, **extra_args
+ )
return get_tags_response['TagSet']
def _fits_in_tagging_header(self, tagging_header):
- return len(
- tagging_header.encode('utf-8')) <= self._MAX_TAGGING_HEADER_SIZE
+ return (
+ len(tagging_header.encode('utf-8'))
+ <= self._MAX_TAGGING_HEADER_SIZE
+ )
def _serialize_to_header_value(self, tags):
return percent_encode_sequence(
- [(tag['Key'], tag['Value']) for tag in tags])
+ [(tag['Key'], tag['Value']) for tag in tags]
+ )
def _is_multipart_copy(self, future):
return future.meta.size >= self._transfer_config.multipart_threshold
diff --git a/awscli/customizations/s3/syncstrategy/base.py b/awscli/customizations/s3/syncstrategy/base.py
index 6f134082358c..e903516cf24e 100644
--- a/awscli/customizations/s3/syncstrategy/base.py
+++ b/awscli/customizations/s3/syncstrategy/base.py
@@ -14,11 +14,13 @@
from awscli.customizations.exceptions import ParamValidationError
-
LOG = logging.getLogger(__name__)
-VALID_SYNC_TYPES = ['file_at_src_and_dest', 'file_not_at_dest',
- 'file_not_at_src']
+VALID_SYNC_TYPES = [
+ 'file_at_src_and_dest',
+ 'file_not_at_dest',
+ 'file_not_at_src',
+]
class BaseSync(object):
@@ -69,8 +71,7 @@ def _check_sync_type(self, sync_type):
if sync_type not in VALID_SYNC_TYPES:
raise ParamValidationError(
"Unknown sync_type: %s.\n"
- "Valid options are %s." %
- (sync_type, VALID_SYNC_TYPES)
+ "Valid options are %s." % (sync_type, VALID_SYNC_TYPES)
)
@property
@@ -80,8 +81,7 @@ def sync_type(self):
def register_strategy(self, session):
"""Registers the sync strategy class to the given session."""
- session.register('building-arg-table.s3_sync',
- self.add_sync_argument)
+ session.register('building-arg-table.s3_sync', self.add_sync_argument)
session.register('choosing-s3-sync-strategy', self.use_sync_strategy)
def determine_should_sync(self, src_file, dest_file):
@@ -118,7 +118,7 @@ def determine_should_sync(self, src_file, dest_file):
'file_not_at_dest': refers to ``src_file``
'file_not_at_src': refers to ``dest_file``
- """
+ """
raise NotImplementedError("determine_should_sync")
@@ -187,8 +187,9 @@ def total_seconds(self, td):
:param td: The difference between two datetime objects.
"""
- return (td.microseconds + (td.seconds + td.days * 24 *
- 3600) * 10**6) / 10**6
+ return (
+ td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6
+ ) / 10**6
def compare_size(self, src_file, dest_file):
"""
@@ -218,7 +219,6 @@ def compare_time(self, src_file, dest_file):
# at the source location.
return False
elif cmd == "download":
-
if self.total_seconds(delta) <= 0:
return True
else:
@@ -228,7 +228,6 @@ def compare_time(self, src_file, dest_file):
class SizeAndLastModifiedSync(BaseSync):
-
def determine_should_sync(self, src_file, dest_file):
same_size = self.compare_size(src_file, dest_file)
same_last_modified_time = self.compare_time(src_file, dest_file)
@@ -236,9 +235,13 @@ def determine_should_sync(self, src_file, dest_file):
if should_sync:
LOG.debug(
"syncing: %s -> %s, size: %s -> %s, modified time: %s -> %s",
- src_file.src, src_file.dest,
- src_file.size, dest_file.size,
- src_file.last_update, dest_file.last_update)
+ src_file.src,
+ src_file.dest,
+ src_file.size,
+ dest_file.size,
+ src_file.last_update,
+ dest_file.last_update,
+ )
return should_sync
@@ -255,6 +258,9 @@ def __init__(self, sync_type='file_not_at_dest'):
super(MissingFileSync, self).__init__(sync_type)
def determine_should_sync(self, src_file, dest_file):
- LOG.debug("syncing: %s -> %s, file does not exist at destination",
- src_file.src, src_file.dest)
+ LOG.debug(
+ "syncing: %s -> %s, file does not exist at destination",
+ src_file.src,
+ src_file.dest,
+ )
return True
diff --git a/awscli/customizations/s3/syncstrategy/delete.py b/awscli/customizations/s3/syncstrategy/delete.py
index 9858b264e44c..e5512b17c116 100644
--- a/awscli/customizations/s3/syncstrategy/delete.py
+++ b/awscli/customizations/s3/syncstrategy/delete.py
@@ -14,24 +14,29 @@
from awscli.customizations.s3.syncstrategy.base import BaseSync
-
LOG = logging.getLogger(__name__)
-DELETE = {'name': 'delete', 'action': 'store_true',
- 'help_text': (
- "Files that exist in the destination but not in the source are "
- "deleted during sync. Note that files excluded by filters are "
- "excluded from deletion.")}
+DELETE = {
+ 'name': 'delete',
+ 'action': 'store_true',
+ 'help_text': (
+ "Files that exist in the destination but not in the source are "
+ "deleted during sync. Note that files excluded by filters are "
+ "excluded from deletion."
+ ),
+}
class DeleteSync(BaseSync):
-
ARGUMENT = DELETE
def determine_should_sync(self, src_file, dest_file):
dest_file.operation_name = 'delete'
- LOG.debug("syncing: (None) -> %s (remove), file does not "
- "exist at source (%s) and delete mode enabled",
- dest_file.src, dest_file.dest)
+ LOG.debug(
+ "syncing: (None) -> %s (remove), file does not "
+ "exist at source (%s) and delete mode enabled",
+ dest_file.src,
+ dest_file.dest,
+ )
return True
diff --git a/awscli/customizations/s3/syncstrategy/exacttimestamps.py b/awscli/customizations/s3/syncstrategy/exacttimestamps.py
index 564e6eeb4bf4..7921e9ad94a7 100644
--- a/awscli/customizations/s3/syncstrategy/exacttimestamps.py
+++ b/awscli/customizations/s3/syncstrategy/exacttimestamps.py
@@ -14,21 +14,23 @@
from awscli.customizations.s3.syncstrategy.base import SizeAndLastModifiedSync
-
LOG = logging.getLogger(__name__)
-EXACT_TIMESTAMPS = {'name': 'exact-timestamps', 'action': 'store_true',
- 'help_text': (
- 'When syncing from S3 to local, same-sized '
- 'items will be ignored only when the timestamps '
- 'match exactly. The default behavior is to ignore '
- 'same-sized items unless the local version is newer '
- 'than the S3 version.')}
+EXACT_TIMESTAMPS = {
+ 'name': 'exact-timestamps',
+ 'action': 'store_true',
+ 'help_text': (
+ 'When syncing from S3 to local, same-sized '
+ 'items will be ignored only when the timestamps '
+ 'match exactly. The default behavior is to ignore '
+ 'same-sized items unless the local version is newer '
+ 'than the S3 version.'
+ ),
+}
class ExactTimestampsSync(SizeAndLastModifiedSync):
-
ARGUMENT = EXACT_TIMESTAMPS
def compare_time(self, src_file, dest_file):
@@ -39,5 +41,6 @@ def compare_time(self, src_file, dest_file):
if cmd == 'download':
return self.total_seconds(delta) == 0
else:
- return super(ExactTimestampsSync, self).compare_time(src_file,
- dest_file)
+ return super(ExactTimestampsSync, self).compare_time(
+ src_file, dest_file
+ )
diff --git a/awscli/customizations/s3/syncstrategy/register.py b/awscli/customizations/s3/syncstrategy/register.py
index b75674dcb99b..13f2c35c0620 100644
--- a/awscli/customizations/s3/syncstrategy/register.py
+++ b/awscli/customizations/s3/syncstrategy/register.py
@@ -10,14 +10,16 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.s3.syncstrategy.sizeonly import SizeOnlySync
-from awscli.customizations.s3.syncstrategy.exacttimestamps import \
- ExactTimestampsSync
from awscli.customizations.s3.syncstrategy.delete import DeleteSync
+from awscli.customizations.s3.syncstrategy.exacttimestamps import (
+ ExactTimestampsSync,
+)
+from awscli.customizations.s3.syncstrategy.sizeonly import SizeOnlySync
-def register_sync_strategy(session, strategy_cls,
- sync_type='file_at_src_and_dest'):
+def register_sync_strategy(
+ session, strategy_cls, sync_type='file_at_src_and_dest'
+):
"""Registers a single sync strategy
:param session: The session that the sync strategy is being registered to.
diff --git a/awscli/customizations/s3/syncstrategy/sizeonly.py b/awscli/customizations/s3/syncstrategy/sizeonly.py
index e83d0fd7be5d..109e21fa4e7c 100644
--- a/awscli/customizations/s3/syncstrategy/sizeonly.py
+++ b/awscli/customizations/s3/syncstrategy/sizeonly.py
@@ -14,24 +14,30 @@
from awscli.customizations.s3.syncstrategy.base import BaseSync
-
LOG = logging.getLogger(__name__)
-SIZE_ONLY = {'name': 'size-only', 'action': 'store_true',
- 'help_text': (
- 'Makes the size of each key the only criteria used to '
- 'decide whether to sync from source to destination.')}
+SIZE_ONLY = {
+ 'name': 'size-only',
+ 'action': 'store_true',
+ 'help_text': (
+ 'Makes the size of each key the only criteria used to '
+ 'decide whether to sync from source to destination.'
+ ),
+}
class SizeOnlySync(BaseSync):
-
ARGUMENT = SIZE_ONLY
def determine_should_sync(self, src_file, dest_file):
same_size = self.compare_size(src_file, dest_file)
should_sync = not same_size
if should_sync:
- LOG.debug("syncing: %s -> %s, size_changed: %s",
- src_file.src, src_file.dest, not same_size)
+ LOG.debug(
+ "syncing: %s -> %s, size_changed: %s",
+ src_file.src,
+ src_file.dest,
+ not same_size,
+ )
return should_sync
diff --git a/awscli/customizations/s3/transferconfig.py b/awscli/customizations/s3/transferconfig.py
index 5764c6f9772d..95057ea6f1e8 100644
--- a/awscli/customizations/s3/transferconfig.py
+++ b/awscli/customizations/s3/transferconfig.py
@@ -10,21 +10,20 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from s3transfer.manager import TransferConfig
-
-from awscli.customizations.s3 import constants
-from awscli.customizations.s3.utils import human_readable_to_int
# If the user does not specify any overrides,
# these are the default values we use for the s3 transfer
# commands.
import logging
+from awscli.customizations.s3 import constants
+from awscli.customizations.s3.utils import human_readable_to_int
+from s3transfer.manager import TransferConfig
LOGGER = logging.getLogger(__name__)
DEFAULTS = {
- 'multipart_threshold': 8 * (1024 ** 2),
- 'multipart_chunksize': 8 * (1024 ** 2),
+ 'multipart_threshold': 8 * (1024**2),
+ 'multipart_chunksize': 8 * (1024**2),
'max_concurrent_requests': 10,
'max_queue_size': 1000,
'max_bandwidth': None,
@@ -38,10 +37,14 @@ class InvalidConfigError(Exception):
class RuntimeConfig(object):
-
- POSITIVE_INTEGERS = ['multipart_chunksize', 'multipart_threshold',
- 'max_concurrent_requests', 'max_queue_size',
- 'max_bandwidth', 'target_bandwidth']
+ POSITIVE_INTEGERS = [
+ 'multipart_chunksize',
+ 'multipart_threshold',
+ 'max_concurrent_requests',
+ 'max_queue_size',
+ 'max_bandwidth',
+ 'target_bandwidth',
+ ]
HUMAN_READABLE_SIZES = ['multipart_chunksize', 'multipart_threshold']
HUMAN_READABLE_RATES = ['max_bandwidth', 'target_bandwidth']
SUPPORTED_CHOICES = {
@@ -107,7 +110,8 @@ def _convert_human_readable_rates(self, runtime_config):
'as an integer in terms of bytes per second '
'(e.g. 10485760) or a rate in terms of bytes '
'per second (e.g. 10MB/s or 800KB/s) or bits per '
- 'second (e.g. 10Mb/s or 800Kb/s)' % value)
+ 'second (e.g. 10Mb/s or 800Kb/s)' % value
+ )
def _human_readable_rate_to_int(self, value):
# The human_readable_to_int() utility only supports integers (e.g. 1024)
@@ -145,7 +149,9 @@ def _resolve_choice_aliases(self, runtime_config):
resolved_value = self.CHOICE_ALIASES[attr][current_value]
LOGGER.debug(
'Resolved %s configuration alias value "%s" to "%s"',
- attr, current_value, resolved_value
+ attr,
+ current_value,
+ resolved_value,
)
runtime_config[attr] = resolved_value
@@ -173,7 +179,8 @@ def _validate_choices(self, runtime_config):
def _error_positive_value(self, name, value):
raise InvalidConfigError(
- "Value for %s must be a positive integer: %s" % (name, value))
+ "Value for %s must be a positive integer: %s" % (name, value)
+ )
def _error_invalid_choice(self, name, value):
raise InvalidConfigError(
diff --git a/awscli/customizations/s3/utils.py b/awscli/customizations/s3/utils.py
index d8dedb710f98..a5d954d3ec7f 100644
--- a/awscli/customizations/s3/utils.py
+++ b/awscli/customizations/s3/utils.py
@@ -11,19 +11,18 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
+import errno
import logging
-from datetime import datetime
import mimetypes
-import errno
import os
import re
-from collections import namedtuple, deque
+from collections import deque, namedtuple
+from datetime import datetime
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
-from awscli.compat import bytes_print
-from awscli.compat import queue
+from awscli.compat import bytes_print, queue
from awscli.customizations.exceptions import ParamValidationError
LOGGER = logging.getLogger(__name__)
@@ -31,16 +30,16 @@
EPOCH_TIME = datetime(1970, 1, 1, tzinfo=tzutc())
# Maximum object size allowed in S3.
# See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
-MAX_UPLOAD_SIZE = 5 * (1024 ** 4)
+MAX_UPLOAD_SIZE = 5 * (1024**4)
SIZE_SUFFIX = {
'kb': 1024,
- 'mb': 1024 ** 2,
- 'gb': 1024 ** 3,
- 'tb': 1024 ** 4,
+ 'mb': 1024**2,
+ 'gb': 1024**3,
+ 'tb': 1024**4,
'kib': 1024,
- 'mib': 1024 ** 2,
- 'gib': 1024 ** 3,
- 'tib': 1024 ** 4,
+ 'mib': 1024**2,
+ 'gib': 1024**3,
+ 'tib': 1024**4,
}
_S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile(
r'^(?Parn:(aws).*:s3:[a-z\-0-9]*:[0-9]{12}:accesspoint[:/][^/]+)/?'
@@ -90,7 +89,7 @@ def human_readable_size(value):
return '%d Bytes' % bytes_int
for i, suffix in enumerate(HUMANIZE_SUFFIXES):
- unit = base ** (i+2)
+ unit = base ** (i + 2)
if round((bytes_int / unit) * base) < base:
return '%.1f %s' % ((base * bytes_int / unit), suffix)
@@ -110,8 +109,7 @@ def human_readable_to_int(value):
suffix = value[-3:].lower()
else:
suffix = value[-2:].lower()
- has_size_identifier = (
- len(value) >= 2 and suffix in SIZE_SUFFIX)
+ has_size_identifier = len(value) >= 2 and suffix in SIZE_SUFFIX
if not has_size_identifier:
try:
return int(value)
@@ -119,7 +117,7 @@ def human_readable_to_int(value):
raise ValueError("Invalid size value: %s" % value)
else:
multiplier = SIZE_SUFFIX[suffix]
- return int(value[:-len(suffix)]) * multiplier
+ return int(value[: -len(suffix)]) * multiplier
class AppendFilter(argparse.Action):
@@ -136,6 +134,7 @@ class AppendFilter(argparse.Action):
appear later in the command line take preference over rulers that
appear earlier.
"""
+
def __call__(self, parser, namespace, values, option_string=None):
filter_list = getattr(namespace, self.dest)
if filter_list:
@@ -170,6 +169,7 @@ class outside of that context. In order for this to be the case,
(least important) priority available.
"""
+
def __init__(self, maxsize=0, max_priority=20):
queue.Queue.__init__(self, maxsize=maxsize)
self.priorities = [deque([]) for i in range(max_priority + 1)]
@@ -182,8 +182,10 @@ def _qsize(self):
return size
def _put(self, item):
- priority = min(getattr(item, 'PRIORITY', self.default_priority),
- self.default_priority)
+ priority = min(
+ getattr(item, 'PRIORITY', self.default_priority),
+ self.default_priority,
+ )
self.priorities[priority].append(item)
def _get(self):
@@ -252,8 +254,9 @@ def get_file_stat(path):
try:
stats = os.stat(path)
except IOError as e:
- raise ValueError('Could not retrieve file stat of "%s": %s' % (
- path, e))
+ raise ValueError(
+ 'Could not retrieve file stat of "%s": %s' % (path, e)
+ )
try:
update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal())
@@ -284,14 +287,15 @@ def find_dest_path_comp_key(files, src_path=None):
sep_table = {'s3': '/', 'local': os.sep}
if files['dir_op']:
- rel_path = src_path[len(src['path']):]
+ rel_path = src_path[len(src['path']) :]
else:
rel_path = src_path.split(sep_table[src_type])[-1]
compare_key = rel_path.replace(sep_table[src_type], '/')
if files['use_src_name']:
dest_path = dest['path']
- dest_path += rel_path.replace(sep_table[src_type],
- sep_table[dest_type])
+ dest_path += rel_path.replace(
+ sep_table[src_type], sep_table[dest_type]
+ )
else:
dest_path = dest['path']
return dest_path, compare_key
@@ -305,8 +309,9 @@ def create_warning(path, error_message, skip_file=True):
if skip_file:
print_string = print_string + "Skipping file " + path + ". "
print_string = print_string + error_message
- warning_message = WarningResult(message=print_string, error=False,
- warning=True)
+ warning_message = WarningResult(
+ message=print_string, error=False, warning=True
+ )
return warning_message
@@ -315,6 +320,7 @@ class StdoutBytesWriter(object):
This class acts as a file-like object that performs the bytes_print
function on write.
"""
+
def __init__(self, stdout=None):
self._stdout = stdout
@@ -345,7 +351,9 @@ def guess_content_type(filename):
except UnicodeDecodeError:
LOGGER.debug(
'Unable to guess content type for %s due to '
- 'UnicodeDecodeError: ', filename, exc_info=True
+ 'UnicodeDecodeError: ',
+ filename,
+ exc_info=True,
)
@@ -381,8 +389,11 @@ def set_file_utime(filename, desired_time):
if e.errno != errno.EPERM:
raise e
raise SetFileUtimeError(
- ("The file was downloaded, but attempting to modify the "
- "utime of the file failed. Is the file owned by another user?"))
+ (
+ "The file was downloaded, but attempting to modify the "
+ "utime of the file failed. Is the file owned by another user?"
+ )
+ )
class SetFileUtimeError(Exception):
@@ -395,13 +406,18 @@ def _date_parser(date_string):
class BucketLister(object):
"""List keys in a bucket."""
+
def __init__(self, client, date_parser=_date_parser):
self._client = client
self._date_parser = date_parser
- def list_objects(self, bucket, prefix=None, page_size=None,
- extra_args=None):
- kwargs = {'Bucket': bucket, 'PaginationConfig': {'PageSize': page_size}}
+ def list_objects(
+ self, bucket, prefix=None, page_size=None, extra_args=None
+ ):
+ kwargs = {
+ 'Bucket': bucket,
+ 'PaginationConfig': {'PageSize': page_size},
+ }
if prefix is not None:
kwargs['Prefix'] = prefix
if extra_args is not None:
@@ -414,12 +430,14 @@ def list_objects(self, bucket, prefix=None, page_size=None,
for content in contents:
source_path = bucket + '/' + content['Key']
content['LastModified'] = self._date_parser(
- content['LastModified'])
+ content['LastModified']
+ )
yield source_path, content
-class PrintTask(namedtuple('PrintTask',
- ['message', 'error', 'total_parts', 'warning'])):
+class PrintTask(
+ namedtuple('PrintTask', ['message', 'error', 'total_parts', 'warning'])
+):
def __new__(cls, message, error=False, total_parts=None, warning=None):
"""
:param message: An arbitrary string associated with the entry. This
@@ -428,8 +446,10 @@ def __new__(cls, message, error=False, total_parts=None, warning=None):
:param total_parts: The total number of parts for multipart transfers.
:param warning: Boolean indicating a warning
"""
- return super(PrintTask, cls).__new__(cls, message, error, total_parts,
- warning)
+ return super(PrintTask, cls).__new__(
+ cls, message, error, total_parts, warning
+ )
+
WarningResult = PrintTask
@@ -462,6 +482,7 @@ class RequestParamsMapper(object):
Note that existing parameters in ``request_params`` will be overriden if
a parameter in ``cli_params`` maps to the existing parameter.
"""
+
@classmethod
def map_put_object_params(cls, request_params, cli_params):
"""Map CLI params to PutObject request params"""
@@ -498,7 +519,8 @@ def map_copy_object_params(cls, request_params, cli_params):
cls._auto_populate_metadata_directive(request_params)
cls._set_sse_request_params(request_params, cli_params)
cls._set_sse_c_and_copy_source_request_params(
- request_params, cli_params)
+ request_params, cli_params
+ )
cls._set_request_payer_param(request_params, cli_params)
cls._set_checksum_algorithm_param(request_params, cli_params)
@@ -527,7 +549,8 @@ def map_upload_part_params(cls, request_params, cli_params):
def map_upload_part_copy_params(cls, request_params, cli_params):
"""Map CLI params to UploadPartCopy request params"""
cls._set_sse_c_and_copy_source_request_params(
- request_params, cli_params)
+ request_params, cli_params
+ )
cls._set_request_payer_param(request_params, cli_params)
@classmethod
@@ -551,7 +574,9 @@ def _set_checksum_mode_param(cls, request_params, cli_params):
@classmethod
def _set_checksum_algorithm_param(cls, request_params, cli_params):
if cli_params.get('checksum_algorithm'):
- request_params['ChecksumAlgorithm'] = cli_params['checksum_algorithm']
+ request_params['ChecksumAlgorithm'] = cli_params[
+ 'checksum_algorithm'
+ ]
@classmethod
def _set_general_object_params(cls, request_params, cli_params):
@@ -567,7 +592,7 @@ def _set_general_object_params(cls, request_params, cli_params):
'content_disposition': 'ContentDisposition',
'content_encoding': 'ContentEncoding',
'content_language': 'ContentLanguage',
- 'expires': 'Expires'
+ 'expires': 'Expires',
}
for cli_param_name in general_param_translation:
if cli_params.get(cli_param_name):
@@ -608,21 +633,23 @@ def _set_metadata_params(cls, request_params, cli_params):
@classmethod
def _auto_populate_metadata_directive(cls, request_params):
- if request_params.get('Metadata') and \
- not request_params.get('MetadataDirective'):
+ if request_params.get('Metadata') and not request_params.get(
+ 'MetadataDirective'
+ ):
request_params['MetadataDirective'] = 'REPLACE'
@classmethod
def _set_metadata_directive_param(cls, request_params, cli_params):
if cli_params.get('metadata_directive'):
request_params['MetadataDirective'] = cli_params[
- 'metadata_directive']
+ 'metadata_directive'
+ ]
@classmethod
def _set_sse_request_params(cls, request_params, cli_params):
if cli_params.get('sse'):
request_params['ServerSideEncryption'] = cli_params['sse']
- if cli_params.get('sse_kms_key_id'):
+ if cli_params.get('sse_kms_key_id'):
request_params['SSEKMSKeyId'] = cli_params['sse_kms_key_id']
@classmethod
@@ -635,13 +662,16 @@ def _set_sse_c_request_params(cls, request_params, cli_params):
def _set_sse_c_copy_source_request_params(cls, request_params, cli_params):
if cli_params.get('sse_c_copy_source'):
request_params['CopySourceSSECustomerAlgorithm'] = cli_params[
- 'sse_c_copy_source']
+ 'sse_c_copy_source'
+ ]
request_params['CopySourceSSECustomerKey'] = cli_params[
- 'sse_c_copy_source_key']
+ 'sse_c_copy_source_key'
+ ]
@classmethod
- def _set_sse_c_and_copy_source_request_params(cls, request_params,
- cli_params):
+ def _set_sse_c_and_copy_source_request_params(
+ cls, request_params, cli_params
+ ):
cls._set_sse_c_request_params(request_params, cli_params)
cls._set_sse_c_copy_source_request_params(request_params, cli_params)
@@ -664,6 +694,7 @@ class NonSeekableStream(object):
for certain that a fileobj is non seekable.
"""
+
def __init__(self, fileobj):
self._fileobj = fileobj
@@ -696,10 +727,12 @@ def __init__(self, s3control_client, sts_client):
def has_underlying_s3_path(self, path):
bucket, _ = split_s3_bucket_key(path)
return bool(
- self._S3_ACCESSPOINT_ARN_TO_ACCOUNT_NAME_REGEX.match(bucket) or
- self._S3_OUTPOST_ACCESSPOINT_ARN_TO_ACCOUNT_REGEX.match(bucket) or
- self._S3_MRAP_ARN_TO_ACCOUNT_ALIAS_REGEX.match(bucket) or
- bucket.endswith('-s3alias') or bucket.endswith('--op-s3'))
+ self._S3_ACCESSPOINT_ARN_TO_ACCOUNT_NAME_REGEX.match(bucket)
+ or self._S3_OUTPOST_ACCESSPOINT_ARN_TO_ACCOUNT_REGEX.match(bucket)
+ or self._S3_MRAP_ARN_TO_ACCOUNT_ALIAS_REGEX.match(bucket)
+ or bucket.endswith('-s3alias')
+ or bucket.endswith('--op-s3')
+ )
@classmethod
def from_session(cls, session, region, verify_ssl):
@@ -756,8 +789,7 @@ def _resolve_mrap_alias(self, account, alias, key):
def _get_access_point_bucket(self, account, name):
return self._s3control_client.get_access_point(
- AccountId=account,
- Name=name
+ AccountId=account, Name=name
)['Bucket']
def _get_account_id(self):
diff --git a/awscli/customizations/s3errormsg.py b/awscli/customizations/s3errormsg.py
index a7a0b9eb4f32..e4eabf442d96 100644
--- a/awscli/customizations/s3errormsg.py
+++ b/awscli/customizations/s3errormsg.py
@@ -10,9 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-"""Give better S3 error messages.
-"""
-
+"""Give better S3 error messages."""
REGION_ERROR_MSG = (
'You can fix this issue by explicitly providing the correct region '
@@ -54,8 +52,9 @@ def enhance_error_msg(parsed, **kwargs):
def _is_sigv4_error_message(parsed):
- return ('Please use AWS4-HMAC-SHA256' in
- parsed.get('Error', {}).get('Message', ''))
+ return 'Please use AWS4-HMAC-SHA256' in parsed.get('Error', {}).get(
+ 'Message', ''
+ )
def _is_permanent_redirect_message(parsed):
@@ -63,5 +62,7 @@ def _is_permanent_redirect_message(parsed):
def _is_kms_sigv4_error_message(parsed):
- return ('AWS KMS managed keys require AWS Signature Version 4' in
- parsed.get('Error', {}).get('Message', ''))
+ return (
+ 'AWS KMS managed keys require AWS Signature Version 4'
+ in parsed.get('Error', {}).get('Message', '')
+ )
diff --git a/awscli/customizations/s3events.py b/awscli/customizations/s3events.py
index 122c4ca14be7..2a0c31d307a7 100644
--- a/awscli/customizations/s3events.py
+++ b/awscli/customizations/s3events.py
@@ -11,8 +11,8 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Add S3 specific event streaming output arg."""
-from awscli.arguments import CustomArgument
+from awscli.arguments import CustomArgument
STREAM_HELP_TEXT = 'Filename where the records will be saved'
@@ -24,28 +24,29 @@ class DocSectionNotFoundError(Exception):
def register_event_stream_arg(event_handlers):
event_handlers.register(
'building-argument-table.s3api.select-object-content',
- add_event_stream_output_arg)
+ add_event_stream_output_arg,
+ )
event_handlers.register_last(
- 'doc-output.s3api.select-object-content',
- replace_event_stream_docs
+ 'doc-output.s3api.select-object-content', replace_event_stream_docs
)
def register_document_expires_string(event_handlers):
- event_handlers.register_last(
- 'doc-output.s3api',
- document_expires_string
- )
+ event_handlers.register_last('doc-output.s3api', document_expires_string)
-def add_event_stream_output_arg(argument_table, operation_model,
- session, **kwargs):
+def add_event_stream_output_arg(
+ argument_table, operation_model, session, **kwargs
+):
argument_table['outfile'] = S3SelectStreamOutputArgument(
- name='outfile', help_text=STREAM_HELP_TEXT,
- cli_type_name='string', positional_arg=True,
+ name='outfile',
+ help_text=STREAM_HELP_TEXT,
+ cli_type_name='string',
+ positional_arg=True,
stream_key=operation_model.output_shape.serialization['payload'],
- session=session)
+ session=session,
+ )
def replace_event_stream_docs(help_command, **kwargs):
@@ -59,10 +60,13 @@ def replace_event_stream_docs(help_command, **kwargs):
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
- % help_command)
+ % help_command
+ )
doc.write('======\nOutput\n======\n')
- doc.write("This command generates no output. The selected "
- "object content is written to the specified outfile.\n")
+ doc.write(
+ "This command generates no output. The selected "
+ "object content is written to the specified outfile.\n"
+ )
def document_expires_string(help_command, **kwargs):
@@ -81,7 +85,7 @@ def document_expires_string(help_command, **kwargs):
f'\n\n{" " * doc.style.indentation * doc.style.indent_width}',
'ExpiresString -> (string)\n\n',
'\tThe raw, unparsed value of the ``Expires`` field.',
- f'\n\n{" " * doc.style.indentation * doc.style.indent_width}'
+ f'\n\n{" " * doc.style.indentation * doc.style.indent_width}',
]
for idx, write in enumerate(deprecation_note_and_expires_string):
@@ -103,8 +107,9 @@ def __init__(self, stream_key, session, **kwargs):
def add_to_params(self, parameters, value):
self._output_file = value
- self._session.register('after-call.s3.SelectObjectContent',
- self.save_file)
+ self._session.register(
+ 'after-call.s3.SelectObjectContent', self.save_file
+ )
def save_file(self, parsed, **kwargs):
# This method is hooked into after-call which fires
diff --git a/awscli/customizations/s3uploader.py b/awscli/customizations/s3uploader.py
index e640b94ba55a..da9cf4f01264 100644
--- a/awscli/customizations/s3uploader.py
+++ b/awscli/customizations/s3uploader.py
@@ -13,17 +13,16 @@
import hashlib
import logging
-import threading
import os
import sys
+import threading
import botocore
import botocore.exceptions
+from awscli.compat import collections_abc
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
-from awscli.compat import collections_abc
-
LOG = logging.getLogger(__name__)
@@ -33,11 +32,12 @@ def __init__(self, **kwargs):
Exception.__init__(self, msg)
self.kwargs = kwargs
-
- fmt = ("S3 Bucket does not exist. "
- "Execute the command to create a new bucket"
- "\n"
- "aws s3 mb s3://{bucket_name}")
+ fmt = (
+ "S3 Bucket does not exist. "
+ "Execute the command to create a new bucket"
+ "\n"
+ "aws s3 mb s3://{bucket_name}"
+ )
class S3Uploader(object):
@@ -59,12 +59,15 @@ def artifact_metadata(self, val):
raise TypeError("Artifact metadata should be in dict type")
self._artifact_metadata = val
- def __init__(self, s3_client,
- bucket_name,
- prefix=None,
- kms_key_id=None,
- force_upload=False,
- transfer_manager=None):
+ def __init__(
+ self,
+ s3_client,
+ bucket_name,
+ prefix=None,
+ kms_key_id=None,
+ force_upload=False,
+ transfer_manager=None,
+ ):
self.bucket_name = bucket_name
self.prefix = prefix
self.kms_key_id = kms_key_id or None
@@ -90,17 +93,16 @@ def upload(self, file_name, remote_path):
# Check if a file with same data exists
if not self.force_upload and self.file_exists(remote_path):
- LOG.debug("File with same data already exists at {0}. "
- "Skipping upload".format(remote_path))
+ LOG.debug(
+ "File with same data already exists at {0}. "
+ "Skipping upload".format(remote_path)
+ )
return self.make_url(remote_path)
try:
-
# Default to regular server-side encryption unless customer has
# specified their own KMS keys
- additional_args = {
- "ServerSideEncryption": "AES256"
- }
+ additional_args = {"ServerSideEncryption": "AES256"}
if self.kms_key_id:
additional_args["ServerSideEncryption"] = "aws:kms"
@@ -109,13 +111,16 @@ def upload(self, file_name, remote_path):
if self.artifact_metadata:
additional_args["Metadata"] = self.artifact_metadata
- print_progress_callback = \
- ProgressPercentage(file_name, remote_path)
- future = self.transfer_manager.upload(file_name,
- self.bucket_name,
- remote_path,
- additional_args,
- [print_progress_callback])
+ print_progress_callback = ProgressPercentage(
+ file_name, remote_path
+ )
+ future = self.transfer_manager.upload(
+ file_name,
+ self.bucket_name,
+ remote_path,
+ additional_args,
+ [print_progress_callback],
+ )
future.result()
return self.make_url(remote_path)
@@ -157,8 +162,7 @@ def file_exists(self, remote_path):
try:
# Find the object that matches this ETag
- self.s3.head_object(
- Bucket=self.bucket_name, Key=remote_path)
+ self.s3.head_object(Bucket=self.bucket_name, Key=remote_path)
return True
except botocore.exceptions.ClientError:
# Either File does not exist or we are unable to get
@@ -166,11 +170,9 @@ def file_exists(self, remote_path):
return False
def make_url(self, obj_path):
- return "s3://{0}/{1}".format(
- self.bucket_name, obj_path)
+ return "s3://{0}/{1}".format(self.bucket_name, obj_path)
def file_checksum(self, file_name):
-
with open(file_name, "rb") as file_handle:
md5 = hashlib.md5()
# Read file in chunks of 4096 bytes
@@ -192,8 +194,8 @@ def file_checksum(self, file_name):
def to_path_style_s3_url(self, key, version=None):
"""
- This link describes the format of Path Style URLs
- http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+ This link describes the format of Path Style URLs
+ http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
"""
base = self.s3.meta.endpoint_url
result = "{0}/{1}/{2}".format(base, self.bucket_name, key)
@@ -214,14 +216,18 @@ def __init__(self, filename, remote_path):
self._lock = threading.Lock()
def on_progress(self, future, bytes_transferred, **kwargs):
-
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
sys.stderr.write(
- "\rUploading to %s %s / %s (%.2f%%)" %
- (self._remote_path, self._seen_so_far,
- self._size, percentage))
+ "\rUploading to %s %s / %s (%.2f%%)"
+ % (
+ self._remote_path,
+ self._seen_so_far,
+ self._size,
+ percentage,
+ )
+ )
sys.stderr.flush()
diff --git a/awscli/customizations/servicecatalog/__init__.py b/awscli/customizations/servicecatalog/__init__.py
index 18cf606449f0..01eaa95e987d 100644
--- a/awscli/customizations/servicecatalog/__init__.py
+++ b/awscli/customizations/servicecatalog/__init__.py
@@ -11,13 +11,13 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.servicecatalog.generate \
- import GenerateCommand
+from awscli.customizations.servicecatalog.generate import GenerateCommand
def register_servicecatalog_commands(event_emitter):
- event_emitter.register('building-command-table.servicecatalog',
- inject_commands)
+ event_emitter.register(
+ 'building-command-table.servicecatalog', inject_commands
+ )
def inject_commands(command_table, session, **kwargs):
diff --git a/awscli/customizations/servicecatalog/generate.py b/awscli/customizations/servicecatalog/generate.py
index 51e69ed821c6..3c8b58d482e8 100644
--- a/awscli/customizations/servicecatalog/generate.py
+++ b/awscli/customizations/servicecatalog/generate.py
@@ -13,20 +13,23 @@
from awscli.customizations.commands import BasicCommand
from awscli.customizations.servicecatalog import helptext
-from awscli.customizations.servicecatalog.generateproduct \
- import GenerateProductCommand
-from awscli.customizations.servicecatalog.generateprovisioningartifact \
- import GenerateProvisioningArtifactCommand
+from awscli.customizations.servicecatalog.generateproduct import (
+ GenerateProductCommand,
+)
+from awscli.customizations.servicecatalog.generateprovisioningartifact import (
+ GenerateProvisioningArtifactCommand,
+)
class GenerateCommand(BasicCommand):
NAME = "generate"
DESCRIPTION = helptext.GENERATE_COMMAND
SUBCOMMANDS = [
- {'name': 'product',
- 'command_class': GenerateProductCommand},
- {'name': 'provisioning-artifact',
- 'command_class': GenerateProvisioningArtifactCommand}
+ {'name': 'product', 'command_class': GenerateProductCommand},
+ {
+ 'name': 'provisioning-artifact',
+ 'command_class': GenerateProvisioningArtifactCommand,
+ },
]
def _run_main(self, parsed_args, parsed_globals):
diff --git a/awscli/customizations/servicecatalog/generatebase.py b/awscli/customizations/servicecatalog/generatebase.py
index 3e7b59373f7a..eb160762341a 100644
--- a/awscli/customizations/servicecatalog/generatebase.py
+++ b/awscli/customizations/servicecatalog/generatebase.py
@@ -12,28 +12,27 @@
# language governing permissions and limitations under the License.
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.servicecatalog.utils \
- import make_url, get_s3_path
from awscli.customizations.s3uploader import S3Uploader
from awscli.customizations.servicecatalog import exceptions
+from awscli.customizations.servicecatalog.utils import get_s3_path, make_url
class GenerateBaseCommand(BasicCommand):
-
def _run_main(self, parsed_args, parsed_globals):
self.region = self.get_and_validate_region(parsed_globals)
self.s3_client = self._session.create_client(
's3',
region_name=self.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
+ )
+ self.s3_uploader = S3Uploader(
+ self.s3_client, parsed_args.bucket_name, force_upload=True
)
- self.s3_uploader = S3Uploader(self.s3_client,
- parsed_args.bucket_name,
- force_upload=True)
try:
- self.s3_uploader.upload(parsed_args.file_path,
- get_s3_path(parsed_args.file_path))
+ self.s3_uploader.upload(
+ parsed_args.file_path, get_s3_path(parsed_args.file_path)
+ )
except OSError as ex:
raise RuntimeError("%s cannot be found" % parsed_args.file_path)
@@ -44,10 +43,10 @@ def get_and_validate_region(self, parsed_globals):
if region not in self._session.get_available_regions('servicecatalog'):
raise exceptions.InvalidParametersException(
message="Region {0} is not supported".format(
- parsed_globals.region))
+ parsed_globals.region
+ )
+ )
return region
def create_s3_url(self, bucket_name, file_path):
- return make_url(self.region,
- bucket_name,
- get_s3_path(file_path))
+ return make_url(self.region, bucket_name, get_s3_path(file_path))
diff --git a/awscli/customizations/servicecatalog/generateproduct.py b/awscli/customizations/servicecatalog/generateproduct.py
index 2f3786563c83..c84243c21a4e 100644
--- a/awscli/customizations/servicecatalog/generateproduct.py
+++ b/awscli/customizations/servicecatalog/generateproduct.py
@@ -14,8 +14,9 @@
import sys
from awscli.customizations.servicecatalog import helptext
-from awscli.customizations.servicecatalog.generatebase \
- import GenerateBaseCommand
+from awscli.customizations.servicecatalog.generatebase import (
+ GenerateBaseCommand,
+)
from botocore.compat import json
@@ -26,71 +27,66 @@ class GenerateProductCommand(GenerateBaseCommand):
{
'name': 'product-name',
'required': True,
- 'help_text': helptext.PRODUCT_NAME
+ 'help_text': helptext.PRODUCT_NAME,
},
{
'name': 'product-owner',
'required': True,
- 'help_text': helptext.OWNER
+ 'help_text': helptext.OWNER,
},
{
'name': 'product-type',
'required': True,
'help_text': helptext.PRODUCT_TYPE,
- 'choices': ['CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE']
+ 'choices': ['CLOUD_FORMATION_TEMPLATE', 'MARKETPLACE'],
},
{
'name': 'product-description',
'required': False,
- 'help_text': helptext.PRODUCT_DESCRIPTION
+ 'help_text': helptext.PRODUCT_DESCRIPTION,
},
{
'name': 'product-distributor',
'required': False,
- 'help_text': helptext.DISTRIBUTOR
+ 'help_text': helptext.DISTRIBUTOR,
},
{
'name': 'tags',
'required': False,
- 'schema': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- }
- },
+ 'schema': {'type': 'array', 'items': {'type': 'string'}},
'default': [],
'synopsis': '--tags Key=key1,Value=value1 Key=key2,Value=value2',
- 'help_text': helptext.TAGS
+ 'help_text': helptext.TAGS,
},
{
'name': 'file-path',
'required': True,
- 'help_text': helptext.FILE_PATH
+ 'help_text': helptext.FILE_PATH,
},
{
'name': 'bucket-name',
'required': True,
- 'help_text': helptext.BUCKET_NAME
+ 'help_text': helptext.BUCKET_NAME,
},
{
'name': 'support-description',
'required': False,
- 'help_text': helptext.SUPPORT_DESCRIPTION
+ 'help_text': helptext.SUPPORT_DESCRIPTION,
},
{
'name': 'support-email',
'required': False,
- 'help_text': helptext.SUPPORT_EMAIL
+ 'help_text': helptext.SUPPORT_EMAIL,
},
{
'name': 'provisioning-artifact-name',
'required': True,
- 'help_text': helptext.PA_NAME
+ 'help_text': helptext.PA_NAME,
},
{
'name': 'provisioning-artifact-description',
'required': True,
- 'help_text': helptext.PA_DESCRIPTION
+ 'help_text': helptext.PA_DESCRIPTION,
},
{
'name': 'provisioning-artifact-type',
@@ -99,27 +95,30 @@ class GenerateProductCommand(GenerateBaseCommand):
'choices': [
'CLOUD_FORMATION_TEMPLATE',
'MARKETPLACE_AMI',
- 'MARKETPLACE_CAR'
- ]
- }
+ 'MARKETPLACE_CAR',
+ ],
+ },
]
def _run_main(self, parsed_args, parsed_globals):
- super(GenerateProductCommand, self)._run_main(parsed_args,
- parsed_globals)
+ super(GenerateProductCommand, self)._run_main(
+ parsed_args, parsed_globals
+ )
self.region = self.get_and_validate_region(parsed_globals)
- self.s3_url = self.create_s3_url(parsed_args.bucket_name,
- parsed_args.file_path)
+ self.s3_url = self.create_s3_url(
+ parsed_args.bucket_name, parsed_args.file_path
+ )
self.scs_client = self._session.create_client(
- 'servicecatalog', region_name=self.region,
+ 'servicecatalog',
+ region_name=self.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
- response = self.create_product(self.build_args(parsed_args,
- self.s3_url),
- parsed_globals)
+ response = self.create_product(
+ self.build_args(parsed_args, self.s3_url), parsed_globals
+ )
sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False))
return 0
@@ -145,11 +144,9 @@ def build_args(self, parsed_args, s3_url):
"ProvisioningArtifactParameters": {
'Name': parsed_args.provisioning_artifact_name,
'Description': parsed_args.provisioning_artifact_description,
- 'Info': {
- 'LoadTemplateFromURL': s3_url
- },
- 'Type': parsed_args.provisioning_artifact_type
- }
+ 'Info': {'LoadTemplateFromURL': s3_url},
+ 'Type': parsed_args.provisioning_artifact_type,
+ },
}
# Non-required args
diff --git a/awscli/customizations/servicecatalog/generateprovisioningartifact.py b/awscli/customizations/servicecatalog/generateprovisioningartifact.py
index e79a378357a3..8cb7aa33b9df 100644
--- a/awscli/customizations/servicecatalog/generateprovisioningartifact.py
+++ b/awscli/customizations/servicecatalog/generateprovisioningartifact.py
@@ -14,8 +14,9 @@
import sys
from awscli.customizations.servicecatalog import helptext
-from awscli.customizations.servicecatalog.generatebase \
- import GenerateBaseCommand
+from awscli.customizations.servicecatalog.generatebase import (
+ GenerateBaseCommand,
+)
from botocore.compat import json
@@ -26,22 +27,22 @@ class GenerateProvisioningArtifactCommand(GenerateBaseCommand):
{
'name': 'file-path',
'required': True,
- 'help_text': helptext.FILE_PATH
+ 'help_text': helptext.FILE_PATH,
},
{
'name': 'bucket-name',
'required': True,
- 'help_text': helptext.BUCKET_NAME
+ 'help_text': helptext.BUCKET_NAME,
},
{
'name': 'provisioning-artifact-name',
'required': True,
- 'help_text': helptext.PA_NAME
+ 'help_text': helptext.PA_NAME,
},
{
'name': 'provisioning-artifact-description',
'required': True,
- 'help_text': helptext.PA_DESCRIPTION
+ 'help_text': helptext.PA_DESCRIPTION,
},
{
'name': 'provisioning-artifact-type',
@@ -50,31 +51,33 @@ class GenerateProvisioningArtifactCommand(GenerateBaseCommand):
'choices': [
'CLOUD_FORMATION_TEMPLATE',
'MARKETPLACE_AMI',
- 'MARKETPLACE_CAR'
- ]
+ 'MARKETPLACE_CAR',
+ ],
},
{
'name': 'product-id',
'required': True,
- 'help_text': helptext.PRODUCT_ID
- }
+ 'help_text': helptext.PRODUCT_ID,
+ },
]
def _run_main(self, parsed_args, parsed_globals):
super(GenerateProvisioningArtifactCommand, self)._run_main(
- parsed_args, parsed_globals)
+ parsed_args, parsed_globals
+ )
self.region = self.get_and_validate_region(parsed_globals)
- self.s3_url = self.create_s3_url(parsed_args.bucket_name,
- parsed_args.file_path)
+ self.s3_url = self.create_s3_url(
+ parsed_args.bucket_name, parsed_args.file_path
+ )
self.scs_client = self._session.create_client(
- 'servicecatalog', region_name=self.region,
+ 'servicecatalog',
+ region_name=self.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl
+ verify=parsed_globals.verify_ssl,
)
- response = self.create_provisioning_artifact(parsed_args,
- self.s3_url)
+ response = self.create_provisioning_artifact(parsed_args, self.s3_url)
sys.stdout.write(json.dumps(response, indent=2, ensure_ascii=False))
@@ -86,11 +89,9 @@ def create_provisioning_artifact(self, parsed_args, s3_url):
Parameters={
'Name': parsed_args.provisioning_artifact_name,
'Description': parsed_args.provisioning_artifact_description,
- 'Info': {
- 'LoadTemplateFromURL': s3_url
- },
- 'Type': parsed_args.provisioning_artifact_type
- }
+ 'Info': {'LoadTemplateFromURL': s3_url},
+ 'Type': parsed_args.provisioning_artifact_type,
+ },
)
if 'ResponseMetadata' in response:
diff --git a/awscli/customizations/servicecatalog/helptext.py b/awscli/customizations/servicecatalog/helptext.py
index 7c4c72961206..446e8b7d676b 100644
--- a/awscli/customizations/servicecatalog/helptext.py
+++ b/awscli/customizations/servicecatalog/helptext.py
@@ -14,8 +14,10 @@
TAGS = "Tags to associate with the new product."
-BUCKET_NAME = ("Name of the S3 bucket name where the CloudFormation "
- "template will be uploaded to")
+BUCKET_NAME = (
+ "Name of the S3 bucket name where the CloudFormation "
+ "template will be uploaded to"
+)
SUPPORT_DESCRIPTION = "Support information about the product"
@@ -39,15 +41,21 @@
PRODUCT_DESCRIPTION = "The text description of the product"
-PRODUCT_COMMAND_DESCRIPTION = ("Create a new product using a CloudFormation "
- "template specified as a local file path")
-
-PA_COMMAND_DESCRIPTION = ("Create a new provisioning artifact for the "
- "specified product using a CloudFormation template "
- "specified as a local file path")
-
-GENERATE_COMMAND = ("Generate a Service Catalog product or provisioning "
- "artifact using a CloudFormation template specified "
- "as a local file path")
+PRODUCT_COMMAND_DESCRIPTION = (
+ "Create a new product using a CloudFormation "
+ "template specified as a local file path"
+)
+
+PA_COMMAND_DESCRIPTION = (
+ "Create a new provisioning artifact for the "
+ "specified product using a CloudFormation template "
+ "specified as a local file path"
+)
+
+GENERATE_COMMAND = (
+ "Generate a Service Catalog product or provisioning "
+ "artifact using a CloudFormation template specified "
+ "as a local file path"
+)
FILE_PATH = "A local file path that references the CloudFormation template"
diff --git a/awscli/customizations/servicecatalog/utils.py b/awscli/customizations/servicecatalog/utils.py
index 510ebb7d85f1..a6826ac36b87 100644
--- a/awscli/customizations/servicecatalog/utils.py
+++ b/awscli/customizations/servicecatalog/utils.py
@@ -16,8 +16,8 @@
def make_url(region, bucket_name, obj_path, version=None):
"""
- This link describes the format of Path Style URLs
- http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+ This link describes the format of Path Style URLs
+ http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
"""
base = "https://s3.amazonaws.com"
if region and region != "us-east-1":
diff --git a/awscli/customizations/sessendemail.py b/awscli/customizations/sessendemail.py
index 8215342982bf..ea290a63b685 100644
--- a/awscli/customizations/sessendemail.py
+++ b/awscli/customizations/sessendemail.py
@@ -22,52 +22,61 @@
"""
-from awscli.customizations import utils
from awscli.arguments import CustomArgument
+from awscli.customizations import utils
from awscli.customizations.utils import validate_mutually_exclusive_handler
-
-TO_HELP = ('The email addresses of the primary recipients. '
- 'You can specify multiple recipients as space-separated values')
-CC_HELP = ('The email addresses of copy recipients (Cc). '
- 'You can specify multiple recipients as space-separated values')
-BCC_HELP = ('The email addresses of blind-carbon-copy recipients (Bcc). '
- 'You can specify multiple recipients as space-separated values')
+TO_HELP = (
+ 'The email addresses of the primary recipients. '
+ 'You can specify multiple recipients as space-separated values'
+)
+CC_HELP = (
+ 'The email addresses of copy recipients (Cc). '
+ 'You can specify multiple recipients as space-separated values'
+)
+BCC_HELP = (
+ 'The email addresses of blind-carbon-copy recipients (Bcc). '
+ 'You can specify multiple recipients as space-separated values'
+)
SUBJECT_HELP = 'The subject of the message'
TEXT_HELP = 'The raw text body of the message'
HTML_HELP = 'The HTML body of the message'
def register_ses_send_email(event_handler):
- event_handler.register('building-argument-table.ses.send-email',
- _promote_args)
+ event_handler.register(
+ 'building-argument-table.ses.send-email', _promote_args
+ )
event_handler.register(
'operation-args-parsed.ses.send-email',
validate_mutually_exclusive_handler(
- ['destination'], ['to', 'cc', 'bcc']))
+ ['destination'], ['to', 'cc', 'bcc']
+ ),
+ )
event_handler.register(
'operation-args-parsed.ses.send-email',
- validate_mutually_exclusive_handler(
- ['message'], ['text', 'html']))
+ validate_mutually_exclusive_handler(['message'], ['text', 'html']),
+ )
def _promote_args(argument_table, **kwargs):
argument_table['message'].required = False
argument_table['destination'].required = False
- utils.rename_argument(argument_table, 'source',
- new_name='from')
+ utils.rename_argument(argument_table, 'source', new_name='from')
argument_table['to'] = AddressesArgument(
- 'to', 'ToAddresses', help_text=TO_HELP)
+ 'to', 'ToAddresses', help_text=TO_HELP
+ )
argument_table['cc'] = AddressesArgument(
- 'cc', 'CcAddresses', help_text=CC_HELP)
+ 'cc', 'CcAddresses', help_text=CC_HELP
+ )
argument_table['bcc'] = AddressesArgument(
- 'bcc', 'BccAddresses', help_text=BCC_HELP)
+ 'bcc', 'BccAddresses', help_text=BCC_HELP
+ )
argument_table['subject'] = BodyArgument(
- 'subject', 'Subject', help_text=SUBJECT_HELP)
- argument_table['text'] = BodyArgument(
- 'text', 'Text', help_text=TEXT_HELP)
- argument_table['html'] = BodyArgument(
- 'html', 'Html', help_text=HTML_HELP)
+ 'subject', 'Subject', help_text=SUBJECT_HELP
+ )
+ argument_table['text'] = BodyArgument('text', 'Text', help_text=TEXT_HELP)
+ argument_table['html'] = BodyArgument('html', 'Html', help_text=HTML_HELP)
def _build_destination(params, key, value):
@@ -88,11 +97,21 @@ def _build_message(params, key, value):
class AddressesArgument(CustomArgument):
-
- def __init__(self, name, json_key, help_text='', dest=None, default=None,
- action=None, required=None, choices=None, cli_type_name=None):
- super(AddressesArgument, self).__init__(name=name, help_text=help_text,
- required=required, nargs='+')
+ def __init__(
+ self,
+ name,
+ json_key,
+ help_text='',
+ dest=None,
+ default=None,
+ action=None,
+ required=None,
+ choices=None,
+ cli_type_name=None,
+ ):
+ super(AddressesArgument, self).__init__(
+ name=name, help_text=help_text, required=required, nargs='+'
+ )
self._json_key = json_key
def add_to_params(self, parameters, value):
@@ -101,13 +120,12 @@ def add_to_params(self, parameters, value):
class BodyArgument(CustomArgument):
-
def __init__(self, name, json_key, help_text='', required=None):
- super(BodyArgument, self).__init__(name=name, help_text=help_text,
- required=required)
+ super(BodyArgument, self).__init__(
+ name=name, help_text=help_text, required=required
+ )
self._json_key = json_key
def add_to_params(self, parameters, value):
if value:
_build_message(parameters, self._json_key, value)
-
diff --git a/awscli/customizations/sessionmanager.py b/awscli/customizations/sessionmanager.py
index cfbffe22a298..16e0868cb0e2 100644
--- a/awscli/customizations/sessionmanager.py
+++ b/awscli/customizations/sessionmanager.py
@@ -10,15 +10,15 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import logging
-import json
import errno
+import json
+import logging
import os
import re
-
from subprocess import check_call, check_output
+
+from awscli.clidriver import CLIOperationCaller, ServiceOperation
from awscli.compat import ignore_user_entered_signals
-from awscli.clidriver import ServiceOperation, CLIOperationCaller
logger = logging.getLogger(__name__)
@@ -26,13 +26,14 @@
'SessionManagerPlugin is not found. ',
'Please refer to SessionManager Documentation here: ',
'http://docs.aws.amazon.com/console/systems-manager/',
- 'session-manager-plugin-not-found'
+ 'session-manager-plugin-not-found',
)
def register_ssm_session(event_handlers):
- event_handlers.register('building-command-table.ssm',
- add_custom_start_session)
+ event_handlers.register(
+ 'building-command-table.ssm', add_custom_start_session
+ )
def add_custom_start_session(session, command_table, **kwargs):
@@ -40,8 +41,9 @@ def add_custom_start_session(session, command_table, **kwargs):
name='start-session',
parent_name='ssm',
session=session,
- operation_model=session.get_service_model(
- 'ssm').operation_model('StartSession'),
+ operation_model=session.get_service_model('ssm').operation_model(
+ 'StartSession'
+ ),
operation_caller=StartSessionCaller(session),
)
@@ -84,8 +86,7 @@ def _normalize(self, v1, v2):
class StartSessionCommand(ServiceOperation):
def create_help_command(self):
- help_command = super(
- StartSessionCommand, self).create_help_command()
+ help_command = super(StartSessionCommand, self).create_help_command()
# Change the output shape because the command provides no output.
self._operation_model.output_shape = None
return help_command
@@ -95,12 +96,13 @@ class StartSessionCaller(CLIOperationCaller):
LAST_PLUGIN_VERSION_WITHOUT_ENV_VAR = "1.2.497.0"
DEFAULT_SSM_ENV_NAME = "AWS_SSM_START_SESSION_RESPONSE"
- def invoke(self, service_name, operation_name, parameters,
- parsed_globals):
+ def invoke(self, service_name, operation_name, parameters, parsed_globals):
client = self._session.create_client(
- service_name, region_name=parsed_globals.region,
+ service_name,
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl)
+ verify=parsed_globals.verify_ssl,
+ )
response = client.start_session(**parameters)
session_id = response['SessionId']
region_name = client.meta.region_name
@@ -108,8 +110,11 @@ def invoke(self, service_name, operation_name, parameters,
# to fetch same profile credentials to make an api call in the plugin.
# If --profile flag is configured, pass it to Session Manager plugin.
# If not, set empty string.
- profile_name = parsed_globals.profile \
- if parsed_globals.profile is not None else ''
+ profile_name = (
+ parsed_globals.profile
+ if parsed_globals.profile is not None
+ else ''
+ )
endpoint_url = client.meta.endpoint_url
ssm_env_name = self.DEFAULT_SSM_ENV_NAME
@@ -147,19 +152,25 @@ def invoke(self, service_name, operation_name, parameters,
# and handling in there
with ignore_user_entered_signals():
# call executable with necessary input
- check_call(["session-manager-plugin",
- start_session_response,
- region_name,
- "StartSession",
- profile_name,
- json.dumps(parameters),
- endpoint_url], env=env)
+ check_call(
+ [
+ "session-manager-plugin",
+ start_session_response,
+ region_name,
+ "StartSession",
+ profile_name,
+ json.dumps(parameters),
+ endpoint_url,
+ ],
+ env=env,
+ )
return 0
except OSError as ex:
if ex.errno == errno.ENOENT:
- logger.debug('SessionManagerPlugin is not present',
- exc_info=True)
+ logger.debug(
+ 'SessionManagerPlugin is not present', exc_info=True
+ )
# start-session api call returns response and starts the
# session on ssm-agent and response is forwarded to
# session-manager-plugin. If plugin is not present, terminate
diff --git a/awscli/customizations/sso/__init__.py b/awscli/customizations/sso/__init__.py
index b5e2a6cc2219..563f6bc0b44e 100644
--- a/awscli/customizations/sso/__init__.py
+++ b/awscli/customizations/sso/__init__.py
@@ -10,22 +10,22 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from botocore.exceptions import ProfileNotFound
-from botocore.exceptions import UnknownCredentialError
-from botocore.credentials import JSONFileCache
-
from awscli.customizations.sso.login import LoginCommand
from awscli.customizations.sso.logout import LogoutCommand
from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR
+from botocore.credentials import JSONFileCache
+from botocore.exceptions import ProfileNotFound, UnknownCredentialError
def register_sso_commands(event_emitter):
event_emitter.register(
- 'building-command-table.sso', add_sso_commands,
+ 'building-command-table.sso',
+ add_sso_commands,
)
event_emitter.register(
- 'session-initialized', inject_json_file_cache,
- unique_id='inject_sso_json_file_cache'
+ 'session-initialized',
+ inject_json_file_cache,
+ unique_id='inject_sso_json_file_cache',
)
diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py
index 6a298cf3985f..a16a946ed260 100644
--- a/awscli/customizations/sso/login.py
+++ b/awscli/customizations/sso/login.py
@@ -11,7 +11,10 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.customizations.sso.utils import (
- do_sso_login, PrintOnlyHandler, LOGIN_ARGS, BaseSSOCommand,
+ LOGIN_ARGS,
+ BaseSSOCommand,
+ PrintOnlyHandler,
+ do_sso_login,
)
from awscli.customizations.utils import uni_print
@@ -31,11 +34,11 @@ class LoginCommand(BaseSSOCommand):
{
'name': 'sso-session',
'help_text': (
- 'An explicit SSO session to use to login. By default, this '
- 'command will login using the SSO session configured as part '
- 'of the requested profile and generally does not require this '
- 'argument to be set.'
- )
+ 'An explicit SSO session to use to login. By default, this '
+ 'command will login using the SSO session configured as part '
+ 'of the requested profile and generally does not require this '
+ 'argument to be set.'
+ ),
}
]
diff --git a/awscli/customizations/sso/logout.py b/awscli/customizations/sso/logout.py
index be2e6e356e2c..1d101f3fdc6f 100644
--- a/awscli/customizations/sso/logout.py
+++ b/awscli/customizations/sso/logout.py
@@ -14,12 +14,9 @@
import logging
import os
-from botocore.exceptions import ClientError
-
from awscli.customizations.commands import BasicCommand
-from awscli.customizations.sso.utils import SSO_TOKEN_DIR
-from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR
-
+from awscli.customizations.sso.utils import AWS_CREDS_CACHE_DIR, SSO_TOKEN_DIR
+from botocore.exceptions import ClientError
LOG = logging.getLogger(__name__)
@@ -35,7 +32,9 @@ class LogoutCommand(BasicCommand):
ARG_TABLE = []
def _run_main(self, parsed_args, parsed_globals):
- SSOTokenSweeper(self._session, parsed_globals).delete_credentials(SSO_TOKEN_DIR)
+ SSOTokenSweeper(self._session, parsed_globals).delete_credentials(
+ SSO_TOKEN_DIR
+ )
SSOCredentialSweeper().delete_credentials(AWS_CREDS_CACHE_DIR)
return 0
diff --git a/awscli/customizations/sso/utils.py b/awscli/customizations/sso/utils.py
index 5c947d15406c..d0acf6e3753e 100644
--- a/awscli/customizations/sso/utils.py
+++ b/awscli/customizations/sso/utils.py
@@ -18,28 +18,28 @@
import time
import webbrowser
from functools import partial
-from http.server import HTTPServer, BaseHTTPRequestHandler
-
-from botocore.compat import urlparse, parse_qs
-from botocore.credentials import JSONFileCache
-from botocore.exceptions import (
- AuthCodeFetcherError,
- PendingAuthorizationExpiredError,
-)
-from botocore.utils import SSOTokenFetcher, SSOTokenFetcherAuth
-from botocore.utils import original_ld_library_path
+from http.server import BaseHTTPRequestHandler, HTTPServer
from awscli import __version__ as awscli_version
from awscli.customizations.assumerole import CACHE_DIR as AWS_CREDS_CACHE_DIR
from awscli.customizations.commands import BasicCommand
from awscli.customizations.exceptions import ConfigurationError
from awscli.customizations.utils import uni_print
+from botocore.compat import parse_qs, urlparse
+from botocore.credentials import JSONFileCache
+from botocore.exceptions import (
+ AuthCodeFetcherError,
+ PendingAuthorizationExpiredError,
+)
+from botocore.utils import (
+ SSOTokenFetcher,
+ SSOTokenFetcherAuth,
+ original_ld_library_path,
+)
LOG = logging.getLogger(__name__)
-SSO_TOKEN_DIR = os.path.expanduser(
- os.path.join('~', '.aws', 'sso', 'cache')
-)
+SSO_TOKEN_DIR = os.path.expanduser(os.path.join('~', '.aws', 'sso', 'cache'))
LOGIN_ARGS = [
{
@@ -49,7 +49,7 @@
'help_text': (
'Disables automatically opening the verification URL in the '
'default browser.'
- )
+ ),
},
{
'name': 'use-device-code',
@@ -58,8 +58,8 @@
'help_text': (
'Uses the Device Code authorization grant and login flow '
'instead of the Authorization Code flow.'
- )
- }
+ ),
+ },
]
@@ -74,16 +74,16 @@ def _sso_json_dumps(obj):
def do_sso_login(
- session,
- sso_region,
- start_url,
- parsed_globals,
- token_cache=None,
- on_pending_authorization=None,
- force_refresh=False,
- registration_scopes=None,
- session_name=None,
- use_device_code=False,
+ session,
+ sso_region,
+ start_url,
+ parsed_globals,
+ token_cache=None,
+ on_pending_authorization=None,
+ force_refresh=False,
+ registration_scopes=None,
+ session_name=None,
+ use_device_code=False,
):
if token_cache is None:
token_cache = JSONFileCache(SSO_TOKEN_DIR, dumps_func=_sso_json_dumps)
@@ -153,7 +153,6 @@ def __call__(
f'Browser will not be automatically opened.\n'
f'Please visit the following URL:\n'
f'\n{verificationUri}\n'
-
)
user_code_msg = (
@@ -187,10 +186,7 @@ def __call__(
f'\n{verificationUri}\n'
)
- user_code_msg = (
- f'\nThen enter the code:\n'
- f'\n{userCode}\n'
- )
+ user_code_msg = f'\nThen enter the code:\n' f'\n{userCode}\n'
uni_print(opening_msg, self._outfile)
if userCode:
uni_print(user_code_msg, self._outfile)
@@ -206,6 +202,7 @@ class AuthCodeFetcher:
"""Manages the local web server that will be used
to retrieve the authorization code from the OAuth callback
"""
+
# How many seconds handle_request should wait for an incoming request
_REQUEST_TIMEOUT = 10
# How long we wait overall for the callback
@@ -229,14 +226,18 @@ def redirect_uri_without_port(self):
return 'http://127.0.0.1/oauth/callback'
def redirect_uri_with_port(self):
- return f'http://127.0.0.1:{self.http_server.server_port}/oauth/callback'
+ return (
+ f'http://127.0.0.1:{self.http_server.server_port}/oauth/callback'
+ )
def get_auth_code_and_state(self):
"""Blocks until the expected redirect request with either the
authorization code/state or and error is handled
"""
start = time.time()
- while not self._is_done and time.time() < start + self._OVERALL_TIMEOUT:
+ while (
+ not self._is_done and time.time() < start + self._OVERALL_TIMEOUT
+ ):
self.http_server.handle_request()
self.http_server.server_close()
@@ -256,6 +257,7 @@ class OAuthCallbackHandler(BaseHTTPRequestHandler):
the auth code and state parameters, and displaying a page directing
the user to return to the CLI.
"""
+
def __init__(self, auth_code_fetcher, *args, **kwargs):
self._auth_code_fetcher = auth_code_fetcher
super().__init__(*args, **kwargs)
diff --git a/awscli/customizations/streamingoutputarg.py b/awscli/customizations/streamingoutputarg.py
index 2cba59a03ff4..1515389ed79b 100644
--- a/awscli/customizations/streamingoutputarg.py
+++ b/awscli/customizations/streamingoutputarg.py
@@ -10,13 +10,13 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from botocore.model import Shape
-
from awscli.arguments import BaseCLIArgument
+from botocore.model import Shape
-def add_streaming_output_arg(argument_table, operation_model,
- session, **kwargs):
+def add_streaming_output_arg(
+ argument_table, operation_model, session, **kwargs
+):
# Implementation detail: hooked up to 'building-argument-table'
# event.
if _has_streaming_output(operation_model):
@@ -24,7 +24,9 @@ def add_streaming_output_arg(argument_table, operation_model,
argument_table['outfile'] = StreamingOutputArgument(
response_key=streaming_argument_name,
operation_model=operation_model,
- session=session, name='outfile')
+ session=session,
+ name='outfile',
+ )
def _has_streaming_output(model):
@@ -36,15 +38,16 @@ def _get_streaming_argument_name(model):
class StreamingOutputArgument(BaseCLIArgument):
-
BUFFER_SIZE = 32768
HELP = 'Filename where the content will be saved'
- def __init__(self, response_key, operation_model, name,
- session, buffer_size=None):
+ def __init__(
+ self, response_key, operation_model, name, session, buffer_size=None
+ ):
self._name = name
- self.argument_model = Shape('StreamingOutputArgument',
- {'type': 'string'})
+ self.argument_model = Shape(
+ 'StreamingOutputArgument', {'type': 'string'}
+ )
if buffer_size is None:
buffer_size = self.BUFFER_SIZE
self._buffer_size = buffer_size
@@ -81,15 +84,15 @@ def documentation(self):
return self.HELP
def add_to_parser(self, parser):
- parser.add_argument(self._name, metavar=self.py_name,
- help=self.HELP)
+ parser.add_argument(self._name, metavar=self.py_name, help=self.HELP)
def add_to_params(self, parameters, value):
self._output_file = value
service_id = self._operation_model.service_model.service_id.hyphenize()
operation_name = self._operation_model.name
- self._session.register('after-call.%s.%s' % (
- service_id, operation_name), self.save_file)
+ self._session.register(
+ 'after-call.%s.%s' % (service_id, operation_name), self.save_file
+ )
def save_file(self, parsed, **kwargs):
if self._response_key not in parsed:
diff --git a/awscli/customizations/timestampformat.py b/awscli/customizations/timestampformat.py
index d7e1987b1fd1..ee3d87062262 100644
--- a/awscli/customizations/timestampformat.py
+++ b/awscli/customizations/timestampformat.py
@@ -27,9 +27,10 @@
in the future.
"""
-from botocore.utils import parse_timestamp
-from botocore.exceptions import ProfileNotFound
+
from awscli.customizations.exceptions import ConfigurationError
+from botocore.exceptions import ProfileNotFound
+from botocore.utils import parse_timestamp
def register_timestamp_format(event_handlers):
diff --git a/awscli/customizations/toplevelbool.py b/awscli/customizations/toplevelbool.py
index 8014d2dd98d5..826d6c5ccb04 100644
--- a/awscli/customizations/toplevelbool.py
+++ b/awscli/customizations/toplevelbool.py
@@ -16,15 +16,14 @@
"""
+
import logging
from functools import partial
-
-from awscli.argprocess import detect_shape_structure
from awscli import arguments
-from awscli.customizations.utils import validate_mutually_exclusive_handler
+from awscli.argprocess import detect_shape_structure
from awscli.customizations.exceptions import ParamValidationError
-
+from awscli.customizations.utils import validate_mutually_exclusive_handler
LOG = logging.getLogger(__name__)
# This sentinel object is used to distinguish when
@@ -34,17 +33,20 @@
def register_bool_params(event_handler):
- event_handler.register('building-argument-table.ec2.*',
- partial(pull_up_bool,
- event_handler=event_handler))
+ event_handler.register(
+ 'building-argument-table.ec2.*',
+ partial(pull_up_bool, event_handler=event_handler),
+ )
def _qualifies_for_simplification(arg_model):
if detect_shape_structure(arg_model) == 'structure(scalar)':
members = arg_model.members
- if (len(members) == 1 and
- list(members.keys())[0] == 'Value' and
- list(members.values())[0].type_name == 'boolean'):
+ if (
+ len(members) == 1
+ and list(members.keys())[0] == 'Value'
+ and list(members.values())[0].type_name == 'boolean'
+ ):
return True
return False
@@ -56,8 +58,8 @@ def pull_up_bool(argument_table, event_handler, **kwargs):
boolean_pairs = []
event_handler.register(
'operation-args-parsed.ec2.*',
- partial(validate_boolean_mutex_groups,
- boolean_pairs=boolean_pairs))
+ partial(validate_boolean_mutex_groups, boolean_pairs=boolean_pairs),
+ )
for value in list(argument_table.values()):
if hasattr(value, 'argument_model'):
arg_model = value.argument_model
@@ -66,18 +68,25 @@ def pull_up_bool(argument_table, event_handler, **kwargs):
# one that supports --option and --option
# and another arg of --no-option.
new_arg = PositiveBooleanArgument(
- value.name, arg_model, value._operation_model,
+ value.name,
+ arg_model,
+ value._operation_model,
value._event_emitter,
group_name=value.name,
- serialized_name=value._serialized_name)
+ serialized_name=value._serialized_name,
+ )
argument_table[value.name] = new_arg
negative_name = 'no-%s' % value.name
negative_arg = NegativeBooleanParameter(
- negative_name, arg_model, value._operation_model,
+ negative_name,
+ arg_model,
+ value._operation_model,
value._event_emitter,
- action='store_true', dest='no_%s' % new_arg.py_name,
+ action='store_true',
+ dest='no_%s' % new_arg.py_name,
group_name=value.name,
- serialized_name=value._serialized_name)
+ serialized_name=value._serialized_name,
+ )
argument_table[negative_name] = negative_arg
# If we've pulled up a structure(scalar) arg
# into a pair of top level boolean args, we need
@@ -90,19 +99,33 @@ def pull_up_bool(argument_table, event_handler, **kwargs):
def validate_boolean_mutex_groups(boolean_pairs, parsed_args, **kwargs):
# Validate we didn't pass in an --option and a --no-option.
for positive, negative in boolean_pairs:
- if getattr(parsed_args, positive.py_name) is not _NOT_SPECIFIED and \
- getattr(parsed_args, negative.py_name) is not _NOT_SPECIFIED:
+ if (
+ getattr(parsed_args, positive.py_name) is not _NOT_SPECIFIED
+ and getattr(parsed_args, negative.py_name) is not _NOT_SPECIFIED
+ ):
raise ParamValidationError(
'Cannot specify both the "%s" option and '
- 'the "%s" option.' % (positive.cli_name, negative.cli_name))
+ 'the "%s" option.' % (positive.cli_name, negative.cli_name)
+ )
class PositiveBooleanArgument(arguments.CLIArgument):
- def __init__(self, name, argument_model, operation_model,
- event_emitter, serialized_name, group_name):
+ def __init__(
+ self,
+ name,
+ argument_model,
+ operation_model,
+ event_emitter,
+ serialized_name,
+ group_name,
+ ):
super(PositiveBooleanArgument, self).__init__(
- name, argument_model, operation_model, event_emitter,
- serialized_name=serialized_name)
+ name,
+ argument_model,
+ operation_model,
+ event_emitter,
+ serialized_name=serialized_name,
+ )
self._group_name = group_name
@property
@@ -113,11 +136,13 @@ def add_to_parser(self, parser):
# We need to support three forms:
# --option-name
# --option-name Value=(true|false)
- parser.add_argument(self.cli_name,
- help=self.documentation,
- action='store',
- default=_NOT_SPECIFIED,
- nargs='?')
+ parser.add_argument(
+ self.cli_name,
+ help=self.documentation,
+ action='store',
+ default=_NOT_SPECIFIED,
+ nargs='?',
+ )
def add_to_params(self, parameters, value):
if value is _NOT_SPECIFIED:
@@ -131,17 +156,29 @@ def add_to_params(self, parameters, value):
parameters[self._serialized_name] = {'Value': True}
else:
# Otherwise the arg was specified with a value.
- parameters[self._serialized_name] = self._unpack_argument(
- value)
+ parameters[self._serialized_name] = self._unpack_argument(value)
class NegativeBooleanParameter(arguments.BooleanArgument):
- def __init__(self, name, argument_model, operation_model,
- event_emitter, serialized_name, action='store_true',
- dest=None, group_name=None):
+ def __init__(
+ self,
+ name,
+ argument_model,
+ operation_model,
+ event_emitter,
+ serialized_name,
+ action='store_true',
+ dest=None,
+ group_name=None,
+ ):
super(NegativeBooleanParameter, self).__init__(
- name, argument_model, operation_model, event_emitter,
- default=_NOT_SPECIFIED, serialized_name=serialized_name)
+ name,
+ argument_model,
+ operation_model,
+ event_emitter,
+ default=_NOT_SPECIFIED,
+ serialized_name=serialized_name,
+ )
self._group_name = group_name
def add_to_params(self, parameters, value):
diff --git a/awscli/customizations/translate.py b/awscli/customizations/translate.py
index 38add1564dc1..f398dd94183b 100644
--- a/awscli/customizations/translate.py
+++ b/awscli/customizations/translate.py
@@ -12,10 +12,10 @@
# language governing permissions and limitations under the License.
import copy
-from awscli.arguments import CustomArgument, CLIArgument
+from awscli.arguments import CLIArgument, CustomArgument
from awscli.customizations.binaryhoist import (
- BinaryBlobArgumentHoister,
ArgumentParameters,
+ BinaryBlobArgumentHoister,
)
FILE_DOCSTRING = (
@@ -41,22 +41,24 @@
def register_translate_import_terminology(cli):
- cli.register(
- "building-argument-table.translate.import-terminology",
- BinaryBlobArgumentHoister(
- new_argument=ArgumentParameters(
- name="data-file",
- help_text=FILE_DOCSTRING,
- required=True,
- ),
- original_argument=ArgumentParameters(
- name="terminology-data",
- member="File",
- required=False,
+ (
+ cli.register(
+ "building-argument-table.translate.import-terminology",
+ BinaryBlobArgumentHoister(
+ new_argument=ArgumentParameters(
+ name="data-file",
+ help_text=FILE_DOCSTRING,
+ required=True,
+ ),
+ original_argument=ArgumentParameters(
+ name="terminology-data",
+ member="File",
+ required=False,
+ ),
+ error_if_original_used=FILE_ERRORSTRING,
),
- error_if_original_used=FILE_ERRORSTRING,
),
- ),
+ )
cli.register(
"building-argument-table.translate.translate-document",
diff --git a/awscli/customizations/utils.py b/awscli/customizations/utils.py
index 2c281cf0e53e..4bccb54207f8 100644
--- a/awscli/customizations/utils.py
+++ b/awscli/customizations/utils.py
@@ -14,20 +14,18 @@
Utility functions to make it easier to work with customizations.
"""
+
import copy
import re
import sys
import xml
-from botocore.exceptions import ClientError
from awscli.customizations.exceptions import ParamValidationError
-
+from botocore.exceptions import ClientError
_SENTENCE_DELIMETERS_REGEX = re.compile(r'[.:]+')
-_LINE_BREAK_CHARS = [
- '\n',
- '\u2028'
-]
+_LINE_BREAK_CHARS = ['\n', '\u2028']
+
def rename_argument(argument_table, existing_name, new_name):
current = argument_table[existing_name]
@@ -93,6 +91,7 @@ def alias_command(command_table, existing_name, new_name):
def validate_mutually_exclusive_handler(*groups):
def _handler(parsed_args, **kwargs):
return validate_mutually_exclusive(parsed_args, *groups)
+
return _handler
@@ -140,8 +139,9 @@ def s3_bucket_exists(s3_client, bucket_name):
return bucket_exists
-def create_client_from_parsed_globals(session, service_name, parsed_globals,
- overrides=None):
+def create_client_from_parsed_globals(
+ session, service_name, parsed_globals, overrides=None
+):
"""Creates a service client, taking parsed_globals into account
Any values specified in overrides will override the returned dict. Note
@@ -197,8 +197,9 @@ def uni_print(statement, out_file=None):
# ``sys.stdout.encoding`` is ``None``.
if new_encoding is None:
new_encoding = 'ascii'
- new_statement = statement.encode(
- new_encoding, 'replace').decode(new_encoding)
+ new_statement = statement.encode(new_encoding, 'replace').decode(
+ new_encoding
+ )
out_file.write(new_statement)
out_file.flush()
diff --git a/awscli/customizations/waiters.py b/awscli/customizations/waiters.py
index dd90bd8aa0f8..468dfbbbfdcc 100644
--- a/awscli/customizations/waiters.py
+++ b/awscli/customizations/waiters.py
@@ -10,13 +10,15 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+from awscli.clidriver import ServiceOperation
+from awscli.customizations.commands import (
+ BasicCommand,
+ BasicDocHandler,
+ BasicHelp,
+)
from botocore import xform_name
from botocore.exceptions import DataNotFoundError
-from awscli.clidriver import ServiceOperation
-from awscli.customizations.commands import BasicCommand, BasicHelp, \
- BasicDocHandler
-
def register_add_waiters(cli):
cli.register('building-command-table', add_waiters)
@@ -29,15 +31,17 @@ def add_waiters(command_table, session, command_object, **kwargs):
service_model = getattr(command_object, 'service_model', None)
if service_model is not None:
# Get a client out of the service object.
- waiter_model = get_waiter_model_from_service_model(session,
- service_model)
+ waiter_model = get_waiter_model_from_service_model(
+ session, service_model
+ )
if waiter_model is None:
return
waiter_names = waiter_model.waiter_names
# If there are waiters make a wait command.
if waiter_names:
command_table['wait'] = WaitCommand(
- session, waiter_model, service_model)
+ session, waiter_model, service_model
+ )
def get_waiter_model_from_service_model(session, service_model):
@@ -50,9 +54,11 @@ def get_waiter_model_from_service_model(session, service_model):
class WaitCommand(BasicCommand):
NAME = 'wait'
- DESCRIPTION = ('Wait until a particular condition is satisfied. Each '
- 'subcommand polls an API until the listed requirement '
- 'is met.')
+ DESCRIPTION = (
+ 'Wait until a particular condition is satisfied. Each '
+ 'subcommand polls an API until the listed requirement '
+ 'is met.'
+ )
def __init__(self, session, waiter_model, service_model):
self._model = waiter_model
@@ -60,7 +66,7 @@ def __init__(self, session, waiter_model, service_model):
self.waiter_cmd_builder = WaiterStateCommandBuilder(
session=session,
model=self._model,
- service_model=self._service_model
+ service_model=self._service_model,
)
super(WaitCommand, self).__init__(session)
@@ -76,10 +82,13 @@ def _build_subcommand_table(self):
return subcommand_table
def create_help_command(self):
- return BasicHelp(self._session, self,
- command_table=self.subcommand_table,
- arg_table=self.arg_table,
- event_handler_class=WaiterCommandDocHandler)
+ return BasicHelp(
+ self._session,
+ self,
+ command_table=self.subcommand_table,
+ arg_table=self.arg_table,
+ event_handler_class=WaiterCommandDocHandler,
+ )
class WaiterStateCommandBuilder(object):
@@ -97,8 +106,9 @@ def build_all_waiter_state_cmds(self, subcommand_table):
waiter_names = self._model.waiter_names
for waiter_name in waiter_names:
waiter_cli_name = xform_name(waiter_name, '-')
- subcommand_table[waiter_cli_name] = \
- self._build_waiter_state_cmd(waiter_name)
+ subcommand_table[waiter_cli_name] = self._build_waiter_state_cmd(
+ waiter_name
+ )
def _build_waiter_state_cmd(self, waiter_name):
# Get the waiter
@@ -117,7 +127,8 @@ def _build_waiter_state_cmd(self, waiter_name):
operation_model = self._service_model.operation_model(operation_name)
waiter_state_command = WaiterStateCommand(
- name=waiter_cli_name, parent_name='wait',
+ name=waiter_cli_name,
+ parent_name='wait',
operation_caller=WaiterCaller(self._session, waiter_name),
session=self._session,
operation_model=operation_model,
@@ -133,11 +144,11 @@ def _build_waiter_state_cmd(self, waiter_name):
class WaiterStateDocBuilder(object):
SUCCESS_DESCRIPTIONS = {
- 'error': u'%s is thrown ',
- 'path': u'%s ',
- 'pathAll': u'%s for all elements ',
- 'pathAny': u'%s for any element ',
- 'status': u'%s response is received '
+ 'error': '%s is thrown ',
+ 'path': '%s ',
+ 'pathAll': '%s for all elements ',
+ 'pathAny': '%s for any element ',
+ 'status': '%s response is received ',
}
def __init__(self, waiter_config):
@@ -149,7 +160,7 @@ def build_waiter_state_description(self):
# description is provided, use a heuristic to generate a description
# for the waiter.
if not description:
- description = u'Wait until '
+ description = 'Wait until '
# Look at all of the acceptors and find the success state
# acceptor.
for acceptor in self._waiter_config.acceptors:
@@ -159,9 +170,11 @@ def build_waiter_state_description(self):
break
# Include what operation is being used.
description += self._build_operation_description(
- self._waiter_config.operation)
+ self._waiter_config.operation
+ )
description += self._build_polling_description(
- self._waiter_config.delay, self._waiter_config.max_attempts)
+ self._waiter_config.delay, self._waiter_config.max_attempts
+ )
return description
def _build_success_description(self, acceptor):
@@ -172,8 +185,9 @@ def _build_success_description(self, acceptor):
# If success is based off of the state of a resource include the
# description about what resource is looked at.
if matcher in ['path', 'pathAny', 'pathAll']:
- resource_description = u'JMESPath query %s returns ' % \
- acceptor.argument
+ resource_description = (
+ 'JMESPath query %s returns ' % acceptor.argument
+ )
# Prepend the resource description to the template description
success_description = resource_description + success_description
# Complete the description by filling in the expected success state.
@@ -182,14 +196,14 @@ def _build_success_description(self, acceptor):
def _build_operation_description(self, operation):
operation_name = xform_name(operation).replace('_', '-')
- return u'when polling with ``%s``.' % operation_name
+ return 'when polling with ``%s``.' % operation_name
def _build_polling_description(self, delay, max_attempts):
description = (
' It will poll every %s seconds until a successful state '
'has been reached. This will exit with a return code of 255 '
- 'after %s failed checks.'
- % (delay, max_attempts))
+ 'after %s failed checks.' % (delay, max_attempts)
+ )
return description
@@ -200,9 +214,11 @@ def __init__(self, session, waiter_name):
def invoke(self, service_name, operation_name, parameters, parsed_globals):
client = self._session.create_client(
- service_name, region_name=parsed_globals.region,
+ service_name,
+ region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
- verify=parsed_globals.verify_ssl)
+ verify=parsed_globals.verify_ssl,
+ )
waiter = client.get_waiter(xform_name(self._waiter_name))
waiter.wait(**parameters)
return 0
diff --git a/awscli/customizations/wizard/app.py b/awscli/customizations/wizard/app.py
index f62b7f0f93b1..da4f14eafa55 100644
--- a/awscli/customizations/wizard/app.py
+++ b/awscli/customizations/wizard/app.py
@@ -18,11 +18,13 @@
from prompt_toolkit.application import Application
from awscli.customizations.wizard import core
-from awscli.customizations.wizard.ui.style import get_default_style
-from awscli.customizations.wizard.ui.keybindings import get_default_keybindings
from awscli.customizations.wizard.exceptions import (
- UnexpectedWizardException, UnableToRunWizardError, InvalidChoiceException
+ InvalidChoiceException,
+ UnableToRunWizardError,
+ UnexpectedWizardException,
)
+from awscli.customizations.wizard.ui.keybindings import get_default_keybindings
+from awscli.customizations.wizard.ui.style import get_default_style
from awscli.utils import json_encoder
@@ -39,9 +41,19 @@ def run(self, loaded):
class WizardApp(Application):
- def __init__(self, layout, values, traverser, executor, style=None,
- key_bindings=None, full_screen=True, output=None,
- app_input=None, file_io=None):
+ def __init__(
+ self,
+ layout,
+ values,
+ traverser,
+ executor,
+ style=None,
+ key_bindings=None,
+ full_screen=True,
+ output=None,
+ app_input=None,
+ file_io=None,
+ ):
self.values = values
self.traverser = traverser
self.executor = executor
@@ -56,8 +68,12 @@ def __init__(self, layout, values, traverser, executor, style=None,
file_io = FileIO()
self.file_io = file_io
super().__init__(
- layout=layout, style=style, key_bindings=key_bindings,
- full_screen=full_screen, output=output, input=app_input,
+ layout=layout,
+ style=style,
+ key_bindings=key_bindings,
+ full_screen=full_screen,
+ output=output,
+ input=app_input,
)
def run(self, pre_run=None, **kwargs):
@@ -71,9 +87,7 @@ def run(self, pre_run=None, **kwargs):
loop.close()
def _handle_exception(self, loop, context):
- self.exit(
- exception=UnexpectedWizardException(context['exception'])
- )
+ self.exit(exception=UnexpectedWizardException(context['exception']))
class WizardTraverser:
@@ -106,18 +120,19 @@ def get_current_prompt_choices(self):
def current_prompt_has_details(self):
return 'details' in self._prompt_definitions.get(
- self._current_prompt, {})
+ self._current_prompt, {}
+ )
def submit_prompt_answer(self, answer):
definition = self._prompt_definitions[self._current_prompt]
if 'choices' in definition:
answer = self._convert_display_value_to_actual_value(
- self._get_choices(self._current_prompt),
- answer
+ self._get_choices(self._current_prompt), answer
)
if 'datatype' in definition:
answer = core.DataTypeConverter.convert(
- definition['datatype'], answer)
+ definition['datatype'], answer
+ )
self._values[self._current_prompt] = answer
@@ -153,8 +168,11 @@ def is_prompt_visible(self, value_name):
return self._prompt_meets_condition(value_name)
def is_prompt_details_visible_by_default(self, value_name):
- return self._prompt_definitions[value_name].get(
- 'details', {}).get('visible', False)
+ return (
+ self._prompt_definitions[value_name]
+ .get('details', {})
+ .get('visible', False)
+ )
def has_visited_section(self, section_name):
return section_name in self._visited_sections
@@ -233,10 +251,7 @@ def _get_normalized_choice_values(self, choices):
for choice in choices:
if isinstance(choice, str):
normalized_choices.append(
- {
- 'display': choice,
- 'actual_value': choice
- }
+ {'display': choice, 'actual_value': choice}
)
else:
normalized_choices.append(choice)
@@ -255,7 +270,7 @@ def _convert_display_value_to_actual_value(self, choices, display_value):
def _get_next_prompt(self):
prompts = list(self._prompt_definitions)
current_pos = prompts.index(self._current_prompt)
- for prompt in prompts[current_pos+1:]:
+ for prompt in prompts[current_pos + 1 :]:
if self._prompt_meets_condition(prompt):
return prompt
return self.DONE
@@ -271,12 +286,14 @@ def _prompt_meets_condition(self, value_name):
def get_output(self):
template_step = core.TemplateStep()
return template_step.run_step(
- self._definition[self.OUTPUT], self._values)
+ self._definition[self.OUTPUT], self._values
+ )
class WizardValues(MutableMapping):
- def __init__(self, definition, value_retrieval_steps=None,
- exception_handler=None):
+ def __init__(
+ self, definition, value_retrieval_steps=None, exception_handler=None
+ ):
self._definition = definition
if value_retrieval_steps is None:
value_retrieval_steps = {}
diff --git a/awscli/customizations/wizard/commands.py b/awscli/customizations/wizard/commands.py
index 3c1d7f9f3cba..1ab72137aaa2 100644
--- a/awscli/customizations/wizard/commands.py
+++ b/awscli/customizations/wizard/commands.py
@@ -10,10 +10,10 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.wizard import devcommands, factory
-from awscli.customizations.wizard.loader import WizardLoader
from awscli.customizations.commands import BasicCommand, BasicHelp
from awscli.customizations.exceptions import ParamValidationError
+from awscli.customizations.wizard import devcommands, factory
+from awscli.customizations.wizard.loader import WizardLoader
def register_wizard_commands(event_handlers):
@@ -25,8 +25,9 @@ def register_wizard_commands(event_handlers):
def _register_wizards_for_commands(commands, event_handlers):
for command in commands:
- event_handlers.register('building-command-table.%s' % command,
- _add_wizard_command)
+ event_handlers.register(
+ 'building-command-table.%s' % command, _add_wizard_command
+ )
def _add_wizard_command(session, command_object, command_table, **kwargs):
@@ -36,7 +37,7 @@ def _add_wizard_command(session, command_object, command_table, **kwargs):
session=session,
loader=WizardLoader(),
parent_command=command_object.name,
- runner={'0.1': v1_runner, '0.2': v2_runner}
+ runner={'0.1': v1_runner, '0.2': v2_runner},
)
command_table['wizard'] = cmd
@@ -47,8 +48,9 @@ class TopLevelWizardCommand(BasicCommand):
'Interactive command for creating and configuring AWS resources.'
)
- def __init__(self, session, loader, parent_command, runner,
- wizard_name='_main'):
+ def __init__(
+ self, session, loader, parent_command, runner, wizard_name='_main'
+ ):
super(TopLevelWizardCommand, self).__init__(session)
self._session = session
self._loader = loader
@@ -58,12 +60,17 @@ def __init__(self, session, loader, parent_command, runner,
def _build_subcommand_table(self):
subcommand_table = super(
- TopLevelWizardCommand, self)._build_subcommand_table()
+ TopLevelWizardCommand, self
+ )._build_subcommand_table()
wizards = self._get_available_wizards()
for name in wizards:
- cmd = SingleWizardCommand(self._session, self._loader,
- self._parent_command, self._runner,
- wizard_name=name)
+ cmd = SingleWizardCommand(
+ self._session,
+ self._loader,
+ self._parent_command,
+ self._runner,
+ wizard_name=name,
+ )
subcommand_table[name] = cmd
self._add_lineage(subcommand_table)
return subcommand_table
@@ -80,12 +87,14 @@ def _run_main(self, parsed_args, parsed_globals):
self._raise_usage_error()
def _wizard_exists(self):
- return self._loader.wizard_exists(self._parent_command,
- self._wizard_name)
+ return self._loader.wizard_exists(
+ self._parent_command, self._wizard_name
+ )
def _run_wizard(self):
loaded = self._loader.load_wizard(
- self._parent_command, self._wizard_name)
+ self._parent_command, self._wizard_name
+ )
version = loaded.get('version')
if version in self._runner:
self._runner[version].run(loaded)
@@ -95,15 +104,19 @@ def _run_wizard(self):
)
def create_help_command(self):
- return BasicHelp(self._session, self,
- command_table=self.subcommand_table,
- arg_table=self.arg_table)
+ return BasicHelp(
+ self._session,
+ self,
+ command_table=self.subcommand_table,
+ arg_table=self.arg_table,
+ )
class SingleWizardCommand(TopLevelWizardCommand):
def __init__(self, session, loader, parent_command, runner, wizard_name):
super(SingleWizardCommand, self).__init__(
- session, loader, parent_command, runner, wizard_name)
+ session, loader, parent_command, runner, wizard_name
+ )
self._session = session
self._loader = loader
self._runner = runner
@@ -119,15 +132,19 @@ def _run_main(self, parsed_args, parsed_globals):
def create_help_command(self):
loaded = self._loader.load_wizard(
- self._parent_command, self._wizard_name,
+ self._parent_command,
+ self._wizard_name,
+ )
+ return WizardHelpCommand(
+ self._session, self, self.subcommand_table, self.arg_table, loaded
)
- return WizardHelpCommand(self._session, self, self.subcommand_table,
- self.arg_table, loaded)
class WizardHelpCommand(BasicHelp):
- def __init__(self, session, command_object, command_table, arg_table,
- loaded_wizard):
- super(WizardHelpCommand, self).__init__(session, command_object,
- command_table, arg_table)
+ def __init__(
+ self, session, command_object, command_table, arg_table, loaded_wizard
+ ):
+ super(WizardHelpCommand, self).__init__(
+ session, command_object, command_table, arg_table
+ )
self._description = loaded_wizard.get('description', '')
diff --git a/awscli/customizations/wizard/core.py b/awscli/customizations/wizard/core.py
index 97f1185c026f..bb2290e0009f 100644
--- a/awscli/customizations/wizard/core.py
+++ b/awscli/customizations/wizard/core.py
@@ -11,19 +11,19 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Core planner and executor for wizards."""
-import re
+
import json
import os
+import re
from functools import partial
-from botocore import xform_name
import jmespath
-from awscli.utils import json_encoder
from awscli.customizations.wizard.exceptions import (
- InvalidDataTypeConversionException
+ InvalidDataTypeConversionException,
)
-
+from awscli.utils import json_encoder
+from botocore import xform_name
DONE_SECTION_NAME = '__DONE__'
OUTPUT_SECTION_NAME = '__OUTPUT__'
@@ -118,7 +118,6 @@ def run_step(self, step_definition, parameters):
class StaticStep(BaseStep):
-
NAME = 'static'
def run_step(self, step_definition, parameters):
@@ -126,7 +125,6 @@ def run_step(self, step_definition, parameters):
class PromptStep(BaseStep):
-
NAME = 'prompt'
def __init__(self, prompter):
@@ -135,13 +133,14 @@ def __init__(self, prompter):
'int': int,
'float': float,
'str': str,
- 'bool': lambda x: True if x.lower() == 'true' else False
+ 'bool': lambda x: True if x.lower() == 'true' else False,
}
def run_step(self, step_definition, parameters):
choices = self._get_choices(step_definition, parameters)
- response = self._prompter.prompt(step_definition['description'],
- choices=choices)
+ response = self._prompter.prompt(
+ step_definition['description'], choices=choices
+ )
return self._convert_data_type_if_needed(response, step_definition)
def _get_choices(self, step_definition, parameters):
@@ -180,13 +179,13 @@ def run_step(self, step_definition, parameters):
# They want the "No" choice to be the starting value so we
# need to reverse the choices.
choices[:] = choices[::-1]
- response = self._prompter.prompt(step_definition['question'],
- choices=choices)
+ response = self._prompter.prompt(
+ step_definition['question'], choices=choices
+ )
return response
class FilePromptStep(BaseStep):
-
NAME = 'fileprompt'
def __init__(self, prompter):
@@ -198,13 +197,12 @@ def run_step(self, step_definition, parameters):
class TemplateStep(BaseStep):
-
NAME = 'template'
CONDITION_PATTERN = re.compile(
r'(?:^[ \t]*)?{%\s*if\s+(?P.+?)\s+%}(?:\s*[$|\n])?'
r'(?P.+?)[ \t]*{%\s*endif\s*%}[$|\n]?',
- re.DOTALL | re.MULTILINE | re.IGNORECASE
+ re.DOTALL | re.MULTILINE | re.IGNORECASE,
)
_SUPPORTED_CONDITION_OPERATORS = [
'==',
@@ -247,7 +245,6 @@ def run_step(self, step_definition, parameters):
class APICallStep(BaseStep):
-
NAME = 'apicall'
def __init__(self, api_invoker):
@@ -256,18 +253,18 @@ def __init__(self, api_invoker):
def run_step(self, step_definition, parameters):
service, op_name = step_definition['operation'].split('.', 1)
return self._api_invoker.invoke(
- service=service, operation=op_name,
+ service=service,
+ operation=op_name,
api_params=step_definition['params'],
plan_variables=parameters,
optional_api_params=step_definition.get('optional_params'),
query=step_definition.get('query'),
cache=step_definition.get('cache', False),
- paginate=step_definition.get('paginate', False)
+ paginate=step_definition.get('paginate', False),
)
class SharedConfigStep(BaseStep):
-
NAME = 'sharedconfig'
def __init__(self, config_api):
@@ -289,7 +286,8 @@ class LoadDataStep(BaseStep):
def run_step(self, step_definition, parameters):
var_resolver = VariableResolver()
value = var_resolver.resolve_variables(
- parameters, step_definition['value'],
+ parameters,
+ step_definition['value'],
)
load_type = step_definition['load_type']
if load_type == 'json':
@@ -304,7 +302,8 @@ class DumpDataStep(BaseStep):
def run_step(self, step_definition, parameters):
var_resolver = VariableResolver()
value = var_resolver.resolve_variables(
- parameters, step_definition['value'],
+ parameters,
+ step_definition['value'],
)
dump_type = step_definition['dump_type']
if dump_type == 'json':
@@ -314,7 +313,6 @@ def run_step(self, step_definition, parameters):
class VariableResolver(object):
-
_VAR_MATCH = re.compile(r'^{(.*?)}$')
def resolve_variables(self, variables, params):
@@ -372,37 +370,49 @@ class APIInvoker(object):
between the two steps.
"""
+
def __init__(self, session):
self._session = session
self._response_cache = {}
- def invoke(self, service, operation, api_params, plan_variables,
- optional_api_params=None, query=None, cache=False,
- paginate=False):
+ def invoke(
+ self,
+ service,
+ operation,
+ api_params,
+ plan_variables,
+ optional_api_params=None,
+ query=None,
+ cache=False,
+ paginate=False,
+ ):
# TODO: All of the params that come from prompting the user
# are strings. We need a way to convert values to their
# appropriate types. We can either add typing into the wizard
# spec or we possibly auto-convert based on the service
# model (or both).
resolved_params = self._resolve_params(
- api_params, optional_api_params, plan_variables)
+ api_params, optional_api_params, plan_variables
+ )
if cache:
response = self._get_cached_api_call(
- service, operation, resolved_params, paginate)
+ service, operation, resolved_params, paginate
+ )
else:
response = self._make_api_call(
- service, operation, resolved_params, paginate)
+ service, operation, resolved_params, paginate
+ )
if query is not None:
response = jmespath.search(query, response)
return response
def _resolve_params(self, api_params, optional_params, plan_vars):
resolver = VariableResolver()
- api_params_resolved = resolver.resolve_variables(
- plan_vars, api_params)
+ api_params_resolved = resolver.resolve_variables(plan_vars, api_params)
if optional_params is not None:
optional_params_resolved = resolver.resolve_variables(
- plan_vars, optional_params)
+ plan_vars, optional_params
+ )
for key, value in optional_params_resolved.items():
if key not in api_params_resolved and value is not None:
api_params_resolved[key] = value
@@ -417,14 +427,14 @@ def _make_api_call(self, service, operation, resolved_params, paginate):
else:
return getattr(client, client_method_name)(**resolved_params)
- def _get_cached_api_call(self, service, operation, resolved_params,
- paginate):
- cache_key = self._get_cache_key(
- service, operation, resolved_params
- )
+ def _get_cached_api_call(
+ self, service, operation, resolved_params, paginate
+ ):
+ cache_key = self._get_cache_key(service, operation, resolved_params)
if cache_key not in self._response_cache:
response = self._make_api_call(
- service, operation, resolved_params, paginate)
+ service, operation, resolved_params, paginate
+ )
self._response_cache[cache_key] = response
return self._response_cache[cache_key]
@@ -432,12 +442,11 @@ def _get_cache_key(self, service_name, operation, resolved_params):
return (
service_name,
operation,
- json.dumps(resolved_params, default=json_encoder)
+ json.dumps(resolved_params, default=json_encoder),
)
class Executor(object):
-
def __init__(self, step_handlers):
self._step_handlers = step_handlers
@@ -466,8 +475,7 @@ def evaluate(self, condition, parameters):
if not isinstance(condition, list):
condition = [condition]
for single in condition:
- statuses.append(self._check_single_condition(
- single, parameters))
+ statuses.append(self._check_single_condition(single, parameters))
return all(statuses)
def _check_single_condition(self, single, parameters):
@@ -483,7 +491,7 @@ class DataTypeConverter:
'int': int,
'float': float,
'str': str,
- 'bool': lambda x: x.lower() == 'true'
+ 'bool': lambda x: x.lower() == 'true',
}
@classmethod
@@ -495,7 +503,6 @@ def convert(cls, datatype, value):
class ExecutorStep(object):
-
# Subclasses must implement this to specify what name to use
# for the `type` in a wizard definition.
NAME = ''
@@ -505,7 +512,6 @@ def run_step(self, step_definition, parameters):
class APICallExecutorStep(ExecutorStep):
-
NAME = 'apicall'
def __init__(self, api_invoker):
@@ -514,7 +520,8 @@ def __init__(self, api_invoker):
def run_step(self, step_definition, parameters):
service, op_name = step_definition['operation'].split('.', 1)
response = self._api_invoker.invoke(
- service=service, operation=op_name,
+ service=service,
+ operation=op_name,
api_params=step_definition['params'],
plan_variables=parameters,
optional_api_params=step_definition.get('optional_params'),
@@ -525,7 +532,6 @@ def run_step(self, step_definition, parameters):
class SharedConfigExecutorStep(ExecutorStep):
-
NAME = 'sharedconfig'
def __init__(self, config_api):
@@ -535,8 +541,9 @@ def run_step(self, step_definition, parameters):
config_params = {}
profile = None
if 'profile' in step_definition:
- profile = self._resolve_params(step_definition['profile'],
- parameters)
+ profile = self._resolve_params(
+ step_definition['profile'], parameters
+ )
config_params = self._resolve_params(
step_definition['params'], parameters
)
@@ -555,6 +562,7 @@ class SharedConfigAPI(object):
This allows similar logic to be shared by the planner and executor.
"""
+
def __init__(self, session, config_writer):
self._session = session
self._config_writer = config_writer
@@ -575,24 +583,24 @@ def set_values(self, values, profile=None):
config_params['__section__'] = section
config_params.update(values)
config_filename = os.path.expanduser(
- self._session.get_config_variable('config_file'))
+ self._session.get_config_variable('config_file')
+ )
self._config_writer.update_config(config_params, config_filename)
class DefineVariableStep(ExecutorStep):
-
NAME = 'define-variable'
def run_step(self, step_definition, parameters):
value = step_definition['value']
resolved_value = VariableResolver().resolve_variables(
- parameters, value)
+ parameters, value
+ )
key = step_definition['varname']
parameters[key] = resolved_value
class MergeDictStep(ExecutorStep):
-
NAME = 'merge-dict'
def run_step(self, step_definition, parameters):
@@ -600,7 +608,8 @@ def run_step(self, step_definition, parameters):
result = {}
for overlay in step_definition['overlays']:
resolved_overlay = var_resolver.resolve_variables(
- parameters, overlay,
+ parameters,
+ overlay,
)
result = self._deep_merge(result, resolved_overlay)
parameters[step_definition['output_var']] = result
diff --git a/awscli/customizations/wizard/devcommands.py b/awscli/customizations/wizard/devcommands.py
index 27e6b20dd628..a3e4a64867d0 100644
--- a/awscli/customizations/wizard/devcommands.py
+++ b/awscli/customizations/wizard/devcommands.py
@@ -11,13 +11,15 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ruamel.yaml import YAML
+
from awscli.customizations.commands import BasicCommand
from awscli.customizations.wizard.factory import create_wizard_app
def register_dev_commands(event_handlers):
- event_handlers.register('building-command-table.cli-dev',
- WizardDev.add_command)
+ event_handlers.register(
+ 'building-command-table.cli-dev', WizardDev.add_command
+ )
def create_default_wizard_dev_runner(session):
@@ -57,8 +59,10 @@ class WizardDev(BasicCommand):
'future versions.\n'
)
ARG_TABLE = [
- {'name': 'run-wizard',
- 'help_text': 'Run a wizard given a wizard file.'}
+ {
+ 'name': 'run-wizard',
+ 'help_text': 'Run a wizard given a wizard file.',
+ }
]
def __init__(self, session, dev_runner=None):
diff --git a/awscli/customizations/wizard/exceptions.py b/awscli/customizations/wizard/exceptions.py
index c27ff63e98be..b4bfc809b1d8 100644
--- a/awscli/customizations/wizard/exceptions.py
+++ b/awscli/customizations/wizard/exceptions.py
@@ -45,6 +45,6 @@ def __init__(self, original_exception):
message = self.MSG_FORMAT.format(
original_tb=''.join(format_tb(original_exception.__traceback__)),
original_exception_cls=self.original_exception.__class__.__name__,
- original_exception=self.original_exception
+ original_exception=self.original_exception,
)
super().__init__(message)
diff --git a/awscli/customizations/wizard/factory.py b/awscli/customizations/wizard/factory.py
index 612e9fa483f1..ec8222f1f284 100644
--- a/awscli/customizations/wizard/factory.py
+++ b/awscli/customizations/wizard/factory.py
@@ -10,21 +10,26 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from awscli.customizations.wizard.ui.layout import WizardLayoutFactory
+from awscli.customizations.configure.writer import ConfigFileWriter
from awscli.customizations.wizard import core, ui
from awscli.customizations.wizard.app import (
- WizardAppRunner, WizardApp, WizardValues, WizardTraverser,
+ WizardApp,
+ WizardAppRunner,
+ WizardTraverser,
+ WizardValues,
)
-from awscli.customizations.configure.writer import ConfigFileWriter
+from awscli.customizations.wizard.ui.layout import WizardLayoutFactory
def create_default_executor(api_invoker, shared_config):
return core.Executor(
step_handlers={
core.APICallExecutorStep.NAME: core.APICallExecutorStep(
- api_invoker),
+ api_invoker
+ ),
core.SharedConfigExecutorStep.NAME: core.SharedConfigExecutorStep(
- shared_config),
+ shared_config
+ ),
core.DefineVariableStep.NAME: core.DefineVariableStep(),
core.MergeDictStep.NAME: core.MergeDictStep(),
core.LoadDataExecutorStep.NAME: core.LoadDataExecutorStep(),
@@ -35,19 +40,22 @@ def create_default_executor(api_invoker, shared_config):
def create_default_wizard_v1_runner(session):
api_invoker = core.APIInvoker(session=session)
- shared_config = core.SharedConfigAPI(session=session,
- config_writer=ConfigFileWriter())
+ shared_config = core.SharedConfigAPI(
+ session=session, config_writer=ConfigFileWriter()
+ )
planner = core.Planner(
step_handlers={
core.StaticStep.NAME: core.StaticStep(),
core.PromptStep.NAME: core.PromptStep(ui.UIPrompter()),
core.YesNoPrompt.NAME: core.YesNoPrompt(ui.UIPrompter()),
core.FilePromptStep.NAME: core.FilePromptStep(
- ui.UIFilePrompter(ui.FileCompleter())),
+ ui.UIFilePrompter(ui.FileCompleter())
+ ),
core.TemplateStep.NAME: core.TemplateStep(),
core.APICallStep.NAME: core.APICallStep(api_invoker=api_invoker),
core.SharedConfigStep.NAME: core.SharedConfigStep(
- config_api=shared_config),
+ config_api=shared_config
+ ),
}
)
executor = create_default_executor(api_invoker, shared_config)
@@ -61,24 +69,30 @@ def create_default_wizard_v2_runner(session):
def create_wizard_app(definition, session, output=None, app_input=None):
api_invoker = core.APIInvoker(session=session)
- shared_config = core.SharedConfigAPI(session=session,
- config_writer=ConfigFileWriter())
+ shared_config = core.SharedConfigAPI(
+ session=session, config_writer=ConfigFileWriter()
+ )
layout = WizardLayoutFactory().create_wizard_layout(definition)
values = WizardValues(
definition,
value_retrieval_steps={
core.APICallStep.NAME: core.APICallStep(api_invoker=api_invoker),
core.SharedConfigStep.NAME: core.SharedConfigStep(
- config_api=shared_config),
+ config_api=shared_config
+ ),
core.TemplateStep.NAME: core.TemplateStep(),
core.LoadDataStep.NAME: core.LoadDataStep(),
core.DumpDataStep.NAME: core.DumpDataStep(),
},
- exception_handler=layout.error_bar.display_error
+ exception_handler=layout.error_bar.display_error,
)
executor = create_default_executor(api_invoker, shared_config)
traverser = WizardTraverser(definition, values, executor)
return WizardApp(
- layout=layout, values=values, traverser=traverser,
- executor=executor, output=output, app_input=app_input
+ layout=layout,
+ values=values,
+ traverser=traverser,
+ executor=executor,
+ output=output,
+ app_input=app_input,
)
diff --git a/awscli/customizations/wizard/loader.py b/awscli/customizations/wizard/loader.py
index 8a4b977bfd6b..f58b0c33dd58 100644
--- a/awscli/customizations/wizard/loader.py
+++ b/awscli/customizations/wizard/loader.py
@@ -38,12 +38,14 @@
"""
+
import os
-from ruamel.yaml import YAML
+from ruamel.yaml import YAML
WIZARD_SPEC_DIR = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'wizards',
+ os.path.dirname(os.path.abspath(__file__)),
+ 'wizards',
)
@@ -60,8 +62,7 @@ def __init__(self, spec_dir=None):
self._yaml = YAML(typ='rt')
def list_commands_with_wizards(self):
- """Returns a list of commands with at least one wizard.
- """
+ """Returns a list of commands with at least one wizard."""
return os.listdir(self._spec_dir)
def list_available_wizards(self, command_name):
@@ -81,21 +82,24 @@ def load_wizard(self, command_name, wizard_name):
of the file.
"""
- filename = os.path.join(self._spec_dir, command_name,
- wizard_name + '.yml')
+ filename = os.path.join(
+ self._spec_dir, command_name, wizard_name + '.yml'
+ )
try:
with open(filename) as f:
return self._load_yaml(f.read())
except (OSError, IOError):
- raise WizardNotExistError("Wizard does not exist for command "
- "'%s', name: '%s'" % (command_name,
- wizard_name))
+ raise WizardNotExistError(
+ "Wizard does not exist for command "
+ "'%s', name: '%s'" % (command_name, wizard_name)
+ )
def _load_yaml(self, contents):
data = self._yaml.load(contents)
return data
def wizard_exists(self, command_name, wizard_name):
- filename = os.path.join(self._spec_dir, command_name,
- wizard_name + '.yml')
+ filename = os.path.join(
+ self._spec_dir, command_name, wizard_name + '.yml'
+ )
return os.path.isfile(filename)
diff --git a/awscli/customizations/wizard/ui/__init__.py b/awscli/customizations/wizard/ui/__init__.py
index 1924999b6d8f..cd14f697e34e 100644
--- a/awscli/customizations/wizard/ui/__init__.py
+++ b/awscli/customizations/wizard/ui/__init__.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
+
import os
import prompt_toolkit
@@ -34,7 +35,8 @@ def prompt(self, display_text, choices=None):
return selectmenu.select_menu(choices)
else:
response = selectmenu.select_menu(
- choices, display_format=self._display_text)
+ choices, display_format=self._display_text
+ )
result = response['actual_value']
return result
@@ -52,8 +54,7 @@ def get_completions(self, document, complete_event):
for child in sorted(children):
if child.startswith(partial):
result = os.path.join(dirname, child)
- yield Completion(result,
- start_position=-len(result))
+ yield Completion(result, start_position=-len(result))
except OSError:
return
@@ -64,4 +65,5 @@ def __init__(self, completer):
def prompt(self, display_text):
return prompt_toolkit.prompt(
- '%s: ' % display_text, completer=self._completer)
+ '%s: ' % display_text, completer=self._completer
+ )
diff --git a/awscli/customizations/wizard/ui/keybindings.py b/awscli/customizations/wizard/ui/keybindings.py
index cd120a6683bc..c52970b8bc5a 100644
--- a/awscli/customizations/wizard/ui/keybindings.py
+++ b/awscli/customizations/wizard/ui/keybindings.py
@@ -11,15 +11,17 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from prompt_toolkit.application import get_app
-from prompt_toolkit.filters import has_focus, Condition
+from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
+from awscli.customizations.wizard.exceptions import BaseWizardException
from awscli.customizations.wizard.ui.utils import (
- get_ui_control_by_buffer_name, move_to_previous_prompt,
- show_details_if_visible_by_default, refresh_details_view
+ get_ui_control_by_buffer_name,
+ move_to_previous_prompt,
+ refresh_details_view,
+ show_details_if_visible_by_default,
)
-from awscli.customizations.wizard.exceptions import BaseWizardException
@Condition
@@ -52,11 +54,13 @@ def exit_(event):
def submit_current_answer(event):
current_prompt = event.app.traverser.get_current_prompt()
prompt_buffer = get_ui_control_by_buffer_name(
- event.app.layout, current_prompt).buffer
+ event.app.layout, current_prompt
+ ).buffer
try:
event.app.traverser.submit_prompt_answer(prompt_buffer.text)
- if isinstance(event.app.layout.error_bar.current_error,
- BaseWizardException):
+ if isinstance(
+ event.app.layout.error_bar.current_error, BaseWizardException
+ ):
event.app.layout.error_bar.clear()
except BaseWizardException as e:
event.app.layout.error_bar.display_error(e)
@@ -91,11 +95,14 @@ def previous_prompt(event):
def focus_on_details_panel(event):
if event.app.details_visible:
layout = event.app.layout
- if layout.current_buffer and \
- layout.current_buffer.name == 'details_buffer':
+ if (
+ layout.current_buffer
+ and layout.current_buffer.name == 'details_buffer'
+ ):
current_prompt = event.app.traverser.get_current_prompt()
current_control = get_ui_control_by_buffer_name(
- layout, current_prompt)
+ layout, current_prompt
+ )
layout.focus(current_control)
else:
details_buffer = layout.get_buffer_by_name('details_buffer')
@@ -106,8 +113,7 @@ def show_details(event):
event.app.details_visible = not event.app.details_visible
layout = event.app.layout
current_prompt = event.app.traverser.get_current_prompt()
- current_control = get_ui_control_by_buffer_name(
- layout, current_prompt)
+ current_control = get_ui_control_by_buffer_name(layout, current_prompt)
if not event.app.details_visible:
layout.focus(current_control)
else:
@@ -121,7 +127,8 @@ def show_save_details_dialogue(event):
refresh_details_view(event.app, current_prompt)
event.app.save_details_visible = True
save_dialogue = get_ui_control_by_buffer_name(
- event.app.layout, 'save_details_dialogue')
+ event.app.layout, 'save_details_dialogue'
+ )
event.app.layout.focus(save_dialogue)
@kb.add(Keys.F4, filter=error_bar_enabled)
diff --git a/awscli/customizations/wizard/ui/layout.py b/awscli/customizations/wizard/ui/layout.py
index d0b41fcbbc3b..6eab5e796833 100644
--- a/awscli/customizations/wizard/ui/layout.py
+++ b/awscli/customizations/wizard/ui/layout.py
@@ -15,36 +15,55 @@
from prompt_toolkit.application import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.completion import PathCompleter
-from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
-from prompt_toolkit.filters import has_focus, Condition
+from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.formatted_text import HTML, to_formatted_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.focus import (
- focus_next, focus_previous
+ focus_next,
+ focus_previous,
)
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.containers import (
- Window, HSplit, Dimension, ConditionalContainer, WindowAlign, VSplit,
- to_container, to_filter
+ ConditionalContainer,
+ Dimension,
+ HSplit,
+ VSplit,
+ Window,
+ WindowAlign,
+ to_container,
+ to_filter,
)
+from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
+from prompt_toolkit.utils import is_windows
from prompt_toolkit.widgets import (
- HorizontalLine, Box, Button, Label, Shadow, Frame, VerticalLine,
- Dialog, TextArea
+ Box,
+ Button,
+ Dialog,
+ Frame,
+ HorizontalLine,
+ Label,
+ Shadow,
+ TextArea,
+ VerticalLine,
)
-from prompt_toolkit.utils import is_windows
from awscli.autoprompt.widgets import BaseToolbarView, TitleLine
from awscli.customizations.wizard import core
-from awscli.customizations.wizard.ui.section import (
- WizardSectionTab, WizardSectionBody
-)
from awscli.customizations.wizard.ui.keybindings import (
- details_visible, prompt_has_details, error_bar_enabled,
- save_details_visible
+ details_visible,
+ error_bar_enabled,
+ prompt_has_details,
+ save_details_visible,
+)
+from awscli.customizations.wizard.ui.section import (
+ WizardSectionBody,
+ WizardSectionTab,
)
from awscli.customizations.wizard.ui.utils import (
- move_to_previous_prompt, Spacer, get_ui_control_by_buffer_name
+ Spacer,
+ get_ui_control_by_buffer_name,
+ move_to_previous_prompt,
)
@@ -56,13 +75,15 @@ def create_wizard_layout(self, definition):
[
self._create_title(definition),
self._create_sections(
- definition, run_wizard_dialog, error_bar),
- HorizontalLine()
+ definition, run_wizard_dialog, error_bar
+ ),
+ HorizontalLine(),
]
)
return WizardLayout(
- container=container, run_wizard_dialog=run_wizard_dialog,
- error_bar=error_bar
+ container=container,
+ run_wizard_dialog=run_wizard_dialog,
+ error_bar=error_bar,
)
def _create_title(self, definition):
@@ -90,17 +111,19 @@ def _create_sections(self, definition, run_wizard_dialog, error_bar):
return VSplit(
[
HSplit(
- section_tabs,
- padding=1,
- style='class:wizard.section.tab'
+ section_tabs, padding=1, style='class:wizard.section.tab'
),
ConditionalContainer(
VerticalLine(), filter=Condition(is_windows)
),
- HSplit([*section_bodies,
+ HSplit(
+ [
+ *section_bodies,
WizardDetailsPanel(),
error_bar,
- ToolbarView()])
+ ToolbarView(),
+ ]
+ ),
]
)
@@ -144,25 +167,33 @@ def _get_title(self):
def _get_container(self):
return ConditionalContainer(
- HSplit([
- TitleLine(self._get_title),
- VSplit([
- Window(
- content=BufferControl(
- buffer=Buffer(
- name='details_buffer', read_only=True),
- ),
- height=Dimension(
- max=self.DIMENSIONS['details_window_height_max'],
- preferred=self.DIMENSIONS[
- 'details_window_height_pref']
- ),
- wrap_lines=True
+ HSplit(
+ [
+ TitleLine(self._get_title),
+ VSplit(
+ [
+ Window(
+ content=BufferControl(
+ buffer=Buffer(
+ name='details_buffer', read_only=True
+ ),
+ ),
+ height=Dimension(
+ max=self.DIMENSIONS[
+ 'details_window_height_max'
+ ],
+ preferred=self.DIMENSIONS[
+ 'details_window_height_pref'
+ ],
+ ),
+ wrap_lines=True,
+ ),
+ SaveFileDialogue(),
+ ]
),
- SaveFileDialogue(),
- ])
- ]),
- details_visible
+ ]
+ ),
+ details_visible,
)
def __pt_container__(self):
@@ -175,8 +206,7 @@ def __init__(self):
def _get_container(self):
return ConditionalContainer(
- self._create_dialog(),
- save_details_visible
+ self._create_dialog(), save_details_visible
)
def __pt_container__(self):
@@ -188,17 +218,21 @@ def _create_dialog(self):
cancel_button = self._create_cancel_button(textfield)
dialog = Dialog(
title='Save to file',
- body=HSplit([
- Label(text='Filename', dont_extend_height=True),
- textfield,
- ], padding=Dimension(preferred=1, max=1)),
+ body=HSplit(
+ [
+ Label(text='Filename', dont_extend_height=True),
+ textfield,
+ ],
+ padding=Dimension(preferred=1, max=1),
+ ),
buttons=[save_button, cancel_button],
- with_background=True)
+ with_background=True,
+ )
dialog.container.container.style = 'class:wizard.dialog.save'
dialog.container.body.container.style = 'class:wizard.dialog.body'
dialog.container.body.container.content.key_bindings.add(
- Keys.Enter, filter=has_focus('save_details_dialogue'))(
- save_button.handler)
+ Keys.Enter, filter=has_focus('save_details_dialogue')
+ )(save_button.handler)
return dialog
def _create_textfield(self):
@@ -214,11 +248,13 @@ def _create_save_button(self, textfield):
def save_handler(*args, **kwargs):
app = get_app()
contents = app.layout.get_buffer_by_name(
- 'details_buffer').document.text
+ 'details_buffer'
+ ).document.text
app.file_io.write_file_contents(textfield.text, contents)
app.save_details_visible = False
current_control = get_ui_control_by_buffer_name(
- app.layout, app.traverser.get_current_prompt())
+ app.layout, app.traverser.get_current_prompt()
+ )
app.layout.focus(current_control)
return Button(text='Save', handler=save_handler)
@@ -228,6 +264,7 @@ def cancel_handler(*args, **kwargs):
app = get_app()
app.save_details_visible = False
app.layout.focus('details_buffer')
+
return Button(text='Cancel', handler=cancel_handler)
def _create_key_bindings(self, save_button, cancel_button):
@@ -252,29 +289,29 @@ def __init__(self):
def create_window(self, help_text):
text_control = FormattedTextControl(text=lambda: help_text)
text_control.name = 'toolbar_panel'
- return HSplit([
- HorizontalLine(),
- Window(
- content=text_control,
- wrap_lines=True,
- **self.DIMENSIONS
- )
- ])
+ return HSplit(
+ [
+ HorizontalLine(),
+ Window(
+ content=text_control, wrap_lines=True, **self.DIMENSIONS
+ ),
+ ]
+ )
def help_text(self):
app = get_app()
output = []
if prompt_has_details():
title = getattr(app, 'details_title', 'Details panel')
- output.extend([
- f'{self.STYLE}[F2] Switch to {title}',
- f'{self.STYLE}[F3] Show/Hide {title}',
- f'{self.STYLE}[CTRL+S] Save {title}',
- ])
- if error_bar_enabled():
- output.append(
- f'{self.STYLE}[F4] Show/Hide error message'
+ output.extend(
+ [
+ f'{self.STYLE}[F2] Switch to {title}',
+ f'{self.STYLE}[F3] Show/Hide {title}',
+ f'{self.STYLE}[CTRL+S] Save {title}',
+ ]
)
+ if error_bar_enabled():
+ output.append(f'{self.STYLE}[F4] Show/Hide error message')
return to_formatted_text(HTML(f'{self.SPACING}'.join(output)))
@@ -367,7 +404,7 @@ def _create_default_buttons(self):
def _create_dialog_frame(self):
frame_body = Box(
body=self._create_buttons_container(),
- height=Dimension(min=1, max=3, preferred=3)
+ height=Dimension(min=1, max=3, preferred=3),
)
return Shadow(
body=Frame(
@@ -405,8 +442,7 @@ def __init__(self):
def display_error(self, exception):
self.current_error = exception
self._error_bar_buffer.text = (
- 'Encountered following error in wizard:\n\n'
- f'{exception}'
+ 'Encountered following error in wizard:\n\n' f'{exception}'
)
get_app().error_bar_visible = True
@@ -420,19 +456,20 @@ def _get_error_bar_buffer(self):
def _get_container(self):
return ConditionalContainer(
- HSplit([
- TitleLine('Wizard exception'),
- Window(
- content=BufferControl(
- buffer=self._error_bar_buffer,
- focusable=False
+ HSplit(
+ [
+ TitleLine('Wizard exception'),
+ Window(
+ content=BufferControl(
+ buffer=self._error_bar_buffer, focusable=False
+ ),
+ style='class:wizard.error',
+ dont_extend_height=True,
+ wrap_lines=True,
),
- style='class:wizard.error',
- dont_extend_height=True,
- wrap_lines=True,
- ),
- ]),
- Condition(self._is_visible)
+ ]
+ ),
+ Condition(self._is_visible),
)
def _is_visible(self):
diff --git a/awscli/customizations/wizard/ui/prompt.py b/awscli/customizations/wizard/ui/prompt.py
index 6373762ed2d7..e63164b81306 100644
--- a/awscli/customizations/wizard/ui/prompt.py
+++ b/awscli/customizations/wizard/ui/prompt.py
@@ -12,21 +12,26 @@
# language governing permissions and limitations under the License.
from prompt_toolkit.application import get_app
from prompt_toolkit.buffer import Buffer
+from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import (
- Window, VSplit, Dimension, ConditionalContainer, FloatContainer, Float
+ ConditionalContainer,
+ Dimension,
+ Float,
+ FloatContainer,
+ ScrollOffsets,
+ VSplit,
+ Window,
)
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.layout.margins import ScrollbarMargin
-from prompt_toolkit.layout.containers import ScrollOffsets
from prompt_toolkit.layout.menus import CompletionsMenu
-from prompt_toolkit.completion import PathCompleter
from awscli.customizations.wizard.ui.selectmenu import (
- CollapsableSelectionMenuControl
+ CollapsableSelectionMenuControl,
)
from awscli.customizations.wizard.ui.utils import FullyExtendedWidthWindow
@@ -46,7 +51,7 @@ def _get_container(self):
answer = WizardPromptCompletionAnswer(
self._value_name,
default_value=self._value_definition.get('default_value'),
- completer=self._value_definition['completer']
+ completer=self._value_definition['completer'],
)
else:
answer = WizardPromptAnswer(
@@ -57,13 +62,12 @@ def _get_container(self):
VSplit(
[
WizardPromptDescription(
- self._value_name,
- self._value_definition['description']
+ self._value_name, self._value_definition['description']
),
- answer
+ answer,
],
),
- Condition(self._is_visible)
+ Condition(self._is_visible),
)
def _is_visible(self):
@@ -81,14 +85,9 @@ def __init__(self, value_name, value_description):
def _get_container(self):
content = f'{self._value_description}:'
- buffer = Buffer(
- document=Document(content),
- read_only=True
- )
+ buffer = Buffer(document=Document(content), read_only=True)
return Window(
- content=BufferControl(
- buffer=buffer, focusable=False
- ),
+ content=BufferControl(buffer=buffer, focusable=False),
style=self._get_style,
width=Dimension.exact(len(content) + 1),
dont_extend_height=True,
@@ -114,14 +113,13 @@ def __init__(self, value_name, default_value=None):
self.container = self._get_answer_container()
def _get_answer_buffer(self):
- return Buffer(name=self._value_name,
- document=Document(text=self._default_value))
+ return Buffer(
+ name=self._value_name, document=Document(text=self._default_value)
+ )
def _get_answer_container(self):
return FullyExtendedWidthWindow(
- content=BufferControl(
- buffer=self._buffer
- ),
+ content=BufferControl(buffer=self._buffer),
style=self._get_style,
dont_extend_height=True,
)
@@ -144,20 +142,22 @@ def __init__(self, value_name, default_value=None, completer=None):
super().__init__(value_name, default_value)
def _get_completer(self, completer):
- return {
- 'file_completer': PathCompleter(expanduser=True)
- }[completer]
+ return {'file_completer': PathCompleter(expanduser=True)}[completer]
def _get_answer_buffer(self):
- return Buffer(name=self._value_name,
- completer=self._completer,
- complete_while_typing=True,
- document=Document(text=self._default_value))
+ return Buffer(
+ name=self._value_name,
+ completer=self._completer,
+ complete_while_typing=True,
+ document=Document(text=self._default_value),
+ )
def _get_menu_height(self):
if self._buffer.complete_state:
- return min(len(self._buffer.complete_state.completions),
- self.COMPLETION_MENU_MAX_HEIGHT)
+ return min(
+ len(self._buffer.complete_state.completions),
+ self.COMPLETION_MENU_MAX_HEIGHT,
+ )
return 0
def _get_answer_container(self):
@@ -165,16 +165,18 @@ def _get_answer_container(self):
FullyExtendedWidthWindow(
content=BufferControl(buffer=self._buffer),
style=self._get_style,
- wrap_lines=True
+ wrap_lines=True,
),
floats=[
Float(
- xcursor=True, ycursor=True, top=1,
+ xcursor=True,
+ ycursor=True,
+ top=1,
height=self._get_menu_height,
content=CompletionsMenu(),
)
],
- key_bindings=self._get_key_bindings()
+ key_bindings=self._get_key_bindings(),
)
def _get_key_bindings(self):
@@ -205,7 +207,7 @@ def _get_answer_container(self):
content=CollapsableSelectionMenuControl(
items=self._get_choices,
selection_capture_buffer=self._buffer,
- on_toggle=self._show_details
+ on_toggle=self._show_details,
),
style=self._get_style,
always_hide_cursor=True,
@@ -218,8 +220,7 @@ def _get_choices(self):
def _show_details(self, choice):
app = get_app()
- details_buffer = app.layout.get_buffer_by_name(
- 'details_buffer')
+ details_buffer = app.layout.get_buffer_by_name('details_buffer')
details, title = self._get_details(choice)
app.details_title = title
details_buffer.reset()
diff --git a/awscli/customizations/wizard/ui/section.py b/awscli/customizations/wizard/ui/section.py
index fdee3ed5a87b..493ac77e0a6e 100644
--- a/awscli/customizations/wizard/ui/section.py
+++ b/awscli/customizations/wizard/ui/section.py
@@ -15,7 +15,10 @@
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout.containers import (
- Window, HSplit, Dimension, ConditionalContainer
+ ConditionalContainer,
+ Dimension,
+ HSplit,
+ Window,
)
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.widgets import Box
@@ -31,14 +34,9 @@ def __init__(self, section_name, section_definition):
def _get_container(self):
content = f" {self._definition['shortname']}"
- buffer = Buffer(
- document=Document(content),
- read_only=True
- )
+ buffer = Buffer(document=Document(content), read_only=True)
return Window(
- content=BufferControl(
- buffer=buffer, focusable=False
- ),
+ content=BufferControl(buffer=buffer, focusable=False),
style=self._get_style,
width=Dimension.exact(len(content) + 1),
dont_extend_height=True,
@@ -66,12 +64,12 @@ def _get_container(self):
return ConditionalContainer(
Box(
HSplit(
- self._create_prompts_from_section_definition(),
- padding=1
+ self._create_prompts_from_section_definition(), padding=1
),
- padding_left=2, padding_top=1
+ padding_left=2,
+ padding_top=1,
),
- Condition(self._is_current_section)
+ Condition(self._is_current_section),
)
def _is_current_section(self):
diff --git a/awscli/customizations/wizard/ui/selectmenu.py b/awscli/customizations/wizard/ui/selectmenu.py
index 8a257d54357d..3c9e1c150780 100644
--- a/awscli/customizations/wizard/ui/selectmenu.py
+++ b/awscli/customizations/wizard/ui/selectmenu.py
@@ -11,21 +11,22 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
+
from prompt_toolkit import Application
from prompt_toolkit.application import get_app
from prompt_toolkit.buffer import Buffer
-from prompt_toolkit.utils import get_cwidth
-from prompt_toolkit.layout import Layout, FloatContainer, Float
-from prompt_toolkit.layout.controls import UIControl, UIContent
-from prompt_toolkit.layout.screen import Point
-from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.key_binding.key_bindings import KeyBindings
-from prompt_toolkit.layout.margins import ScrollbarMargin
+from prompt_toolkit.layout import Float, FloatContainer, Layout
from prompt_toolkit.layout.containers import ScrollOffsets, Window
+from prompt_toolkit.layout.controls import UIContent, UIControl
+from prompt_toolkit.layout.dimension import Dimension
+from prompt_toolkit.layout.margins import ScrollbarMargin
+from prompt_toolkit.layout.screen import Point
+from prompt_toolkit.utils import get_cwidth
def select_menu(items, display_format=None, max_height=10):
- """ Presents a list of options and allows the user to select one.
+ """Presents a list of options and allows the user to select one.
This presents a static list of options and prompts the user to select one.
This is similar to a completion menu but is different in that it does not
@@ -64,7 +65,8 @@ def exit_app(event):
# limit the height and width of the window.
content = FloatContainer(
Window(height=Dimension(min=min_height, max=min_height)),
- [Float(menu_window, top=0, left=0)])
+ [Float(menu_window, top=0, left=0)],
+ )
app = Application(
layout=Layout(content),
key_bindings=app_bindings,
@@ -84,7 +86,7 @@ def _trim_text(text, max_width):
if width > max_width:
# When there are no double width characters, just use slice operation.
if len(text) == width:
- trimmed_text = (text[:max(1, max_width - 3)] + '...')[:max_width]
+ trimmed_text = (text[: max(1, max_width - 3)] + '...')[:max_width]
return trimmed_text, len(trimmed_text)
# Otherwise, loop until we have the desired width. (Rather
@@ -151,13 +153,13 @@ def _menu_item_fragment(self, item, is_selected, menu_width):
def create_content(self, width, height):
def get_line(i):
item = self._get_items()[i]
- is_selected = (i == self._selection)
+ is_selected = i == self._selection
return self._menu_item_fragment(item, is_selected, width)
return UIContent(
get_line=get_line,
cursor_position=Point(x=0, y=self._selection or 0),
- line_count=len(self._get_items())
+ line_count=len(self._get_items()),
)
def _move_cursor(self, delta):
@@ -190,8 +192,15 @@ def app_result(event):
class CollapsableSelectionMenuControl(SelectionMenuControl):
"""Menu that collapses to text with selection when loses focus"""
- def __init__(self, items, display_format=None, cursor='>',
- selection_capture_buffer=None, on_toggle=None):
+
+ def __init__(
+ self,
+ items,
+ display_format=None,
+ cursor='>',
+ selection_capture_buffer=None,
+ on_toggle=None,
+ ):
super().__init__(items, display_format=display_format, cursor=cursor)
if not selection_capture_buffer:
selection_capture_buffer = Buffer()
@@ -204,6 +213,7 @@ def create_content(self, width, height):
self._has_ever_entered_select_menu = True
return super().create_content(width, height)
else:
+
def get_line(i):
content = ''
if self._has_ever_entered_select_menu:
@@ -212,11 +222,11 @@ def get_line(i):
return UIContent(get_line=get_line, line_count=1)
- def preferred_height(self, width, max_height, wrap_lines,
- get_line_prefix):
+ def preferred_height(self, width, max_height, wrap_lines, get_line_prefix):
if get_app().layout.has_focus(self):
return super().preferred_height(
- width, max_height, wrap_lines, get_line_prefix)
+ width, max_height, wrap_lines, get_line_prefix
+ )
else:
return 1
diff --git a/awscli/customizations/wizard/ui/style.py b/awscli/customizations/wizard/ui/style.py
index c59a6ea744aa..7bd7435bf558 100644
--- a/awscli/customizations/wizard/ui/style.py
+++ b/awscli/customizations/wizard/ui/style.py
@@ -16,26 +16,25 @@
def get_default_style():
basic_styles = [
- # Wizard-specific classes
- ('wizard', ''),
- ('wizard.title', 'underline bold'),
- ('wizard.prompt.description', 'bold'),
- ('wizard.prompt.description.current', 'white'),
- ('wizard.prompt.answer', 'bg:#aaaaaa black'),
- ('wizard.prompt.answer.current', 'white'),
- ('wizard.section.tab.current', 'white'),
- ('wizard.section.tab.unvisited', '#777777'),
- ('wizard.section.tab.visited', ''),
- ('wizard.dialog', ''),
- ('wizard.dialog frame.label', 'white bold'),
- ('wizard.dialog.save frame.label', 'black bold'),
- ('wizard.dialog.body', 'bg:#aaaaaa black'),
- ('wizard.error', 'bg:#550000 #ffffff'),
-
- # Prompt-toolkit built-in classes
- ('button.focused', 'bg:#777777 white'),
- ('completion-menu.completion', 'underline'),
- ]
+ # Wizard-specific classes
+ ('wizard', ''),
+ ('wizard.title', 'underline bold'),
+ ('wizard.prompt.description', 'bold'),
+ ('wizard.prompt.description.current', 'white'),
+ ('wizard.prompt.answer', 'bg:#aaaaaa black'),
+ ('wizard.prompt.answer.current', 'white'),
+ ('wizard.section.tab.current', 'white'),
+ ('wizard.section.tab.unvisited', '#777777'),
+ ('wizard.section.tab.visited', ''),
+ ('wizard.dialog', ''),
+ ('wizard.dialog frame.label', 'white bold'),
+ ('wizard.dialog.save frame.label', 'black bold'),
+ ('wizard.dialog.body', 'bg:#aaaaaa black'),
+ ('wizard.error', 'bg:#550000 #ffffff'),
+ # Prompt-toolkit built-in classes
+ ('button.focused', 'bg:#777777 white'),
+ ('completion-menu.completion', 'underline'),
+ ]
if is_windows():
os_related_styles = [
('wizard.section.tab', 'bold black'),
diff --git a/awscli/customizations/wizard/ui/utils.py b/awscli/customizations/wizard/ui/utils.py
index 0aedb8d049ea..68cc9af6b740 100644
--- a/awscli/customizations/wizard/ui/utils.py
+++ b/awscli/customizations/wizard/ui/utils.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
-from prompt_toolkit.layout.containers import Window, Dimension
+from prompt_toolkit.layout.containers import Dimension, Window
from prompt_toolkit.layout.controls import BufferControl
@@ -28,7 +28,8 @@ def get_ui_control_by_buffer_name(layout, buffer_name):
if hasattr(control, 'buffer') and control.buffer.name == buffer_name:
return control
raise ValueError(
- f"Couldn't find buffer in the current layout: {buffer_name}")
+ f"Couldn't find buffer in the current layout: {buffer_name}"
+ )
def move_to_previous_prompt(app):
@@ -37,13 +38,15 @@ def move_to_previous_prompt(app):
show_details_if_visible_by_default(app, previous_prompt)
refresh_details_view(app, previous_prompt)
previous_control = get_ui_control_by_buffer_name(
- app.layout, previous_prompt)
+ app.layout, previous_prompt
+ )
app.layout.focus(previous_control)
def show_details_if_visible_by_default(app, prompt):
- app.details_visible = \
- app.traverser.is_prompt_details_visible_by_default(prompt)
+ app.details_visible = app.traverser.is_prompt_details_visible_by_default(
+ prompt
+ )
def refresh_details_view(app, prompt):
@@ -59,19 +62,13 @@ class Spacer:
element such as expanding tab column in the wizard app with the
color gray.
"""
+
def __init__(self):
self.container = self._get_container()
def _get_container(self):
- buffer = Buffer(
- document=Document(''),
- read_only=True
- )
- return Window(
- content=BufferControl(
- buffer=buffer, focusable=False
- )
- )
+ buffer = Buffer(document=Document(''), read_only=True)
+ return Window(content=BufferControl(buffer=buffer, focusable=False))
def __pt_container__(self):
return self.container
@@ -79,5 +76,6 @@ def __pt_container__(self):
class FullyExtendedWidthWindow(Window):
"""Window that fully extends its available width"""
+
def preferred_width(self, max_available_width):
return Dimension(preferred=max_available_width)
diff --git a/awscli/customizations/wizard/wizards/lambda/new-function.yml b/awscli/customizations/wizard/wizards/lambda/new-function.yml
index 85aa318f8860..7f755e3d54b3 100644
--- a/awscli/customizations/wizard/wizards/lambda/new-function.yml
+++ b/awscli/customizations/wizard/wizards/lambda/new-function.yml
@@ -132,7 +132,7 @@ __OUTPUT__:
Wizard successfully created Lambda Function:
Function name: {function_name}
Function ARN: {function_arn}
-
+
{% if {preview_type} == preview_cli_command %}
Steps to create function is equivalent to running the following sample AWS CLI commands:
diff --git a/awscli/data/metadata.json b/awscli/data/metadata.json
index ed95c3da54fa..d2763dbe39ee 100644
--- a/awscli/data/metadata.json
+++ b/awscli/data/metadata.json
@@ -1,3 +1,3 @@
{
"distribution_source": "source"
-}
\ No newline at end of file
+}
diff --git a/awscli/errorhandler.py b/awscli/errorhandler.py
index 198f908ad23e..7ac60221191a 100644
--- a/awscli/errorhandler.py
+++ b/awscli/errorhandler.py
@@ -13,24 +13,25 @@
import logging
import signal
-from botocore.exceptions import (
- NoRegionError, NoCredentialsError, ClientError,
- ParamValidationError as BotocoreParamValidationError,
-)
-
+from awscli.argparser import USAGE, ArgParseException
from awscli.argprocess import ParamError, ParamSyntaxError
from awscli.arguments import UnknownArgumentError
-from awscli.argparser import ArgParseException, USAGE
+from awscli.autoprompt.factory import PrompterKeyboardInterrupt
from awscli.constants import (
- PARAM_VALIDATION_ERROR_RC, CONFIGURATION_ERROR_RC, CLIENT_ERROR_RC,
- GENERAL_ERROR_RC
+ CLIENT_ERROR_RC,
+ CONFIGURATION_ERROR_RC,
+ GENERAL_ERROR_RC,
+ PARAM_VALIDATION_ERROR_RC,
)
-from awscli.utils import PagerInitializationException
-from awscli.autoprompt.factory import PrompterKeyboardInterrupt
from awscli.customizations.exceptions import (
- ParamValidationError, ConfigurationError
+ ConfigurationError,
+ ParamValidationError,
+)
+from awscli.utils import PagerInitializationException
+from botocore.exceptions import ClientError, NoCredentialsError, NoRegionError
+from botocore.exceptions import (
+ ParamValidationError as BotocoreParamValidationError,
)
-
LOG = logging.getLogger(__name__)
@@ -40,7 +41,7 @@ def construct_entry_point_handlers_chain():
ParamValidationErrorsHandler(),
PrompterInterruptExceptionHandler(),
InterruptExceptionHandler(),
- GeneralExceptionHandler()
+ GeneralExceptionHandler(),
]
return ChainedExceptionHandler(exception_handlers=handlers)
@@ -55,7 +56,7 @@ def construct_cli_error_handlers_chain():
PagerErrorHandler(),
InterruptExceptionHandler(),
ClientErrorHandler(),
- GeneralExceptionHandler()
+ GeneralExceptionHandler(),
]
return ChainedExceptionHandler(exception_handlers=handlers)
@@ -84,8 +85,11 @@ def _do_handle_exception(self, exception, stdout, stderr):
class ParamValidationErrorsHandler(FilteredExceptionHandler):
EXCEPTIONS_TO_HANDLE = (
- ParamError, ParamSyntaxError, ArgParseException,
- ParamValidationError, BotocoreParamValidationError
+ ParamError,
+ ParamSyntaxError,
+ ArgParseException,
+ ParamValidationError,
+ BotocoreParamValidationError,
)
RC = PARAM_VALIDATION_ERROR_RC
@@ -108,7 +112,9 @@ class ConfigurationErrorHandler(FilteredExceptionHandler):
class NoRegionErrorHandler(FilteredExceptionHandler):
EXCEPTIONS_TO_HANDLE = NoRegionError
RC = CONFIGURATION_ERROR_RC
- MESSAGE = '%s You can also configure your region by running "aws configure".'
+ MESSAGE = (
+ '%s You can also configure your region by running "aws configure".'
+ )
class NoCredentialsErrorHandler(FilteredExceptionHandler):
diff --git a/awscli/formatter.py b/awscli/formatter.py
index 2fbd9a20cbec..44cf017852ec 100644
--- a/awscli/formatter.py
+++ b/awscli/formatter.py
@@ -13,16 +13,14 @@
import logging
from datetime import datetime
-from botocore.compat import json
-from botocore.utils import set_value_from_jmespath
-from botocore.paginate import PageIterator
from ruamel.yaml import YAML
-from awscli.table import MultiTable, Styler, ColorizedStyler
-from awscli import text
-from awscli import compat
+from awscli import compat, text
+from awscli.table import ColorizedStyler, MultiTable, Styler
from awscli.utils import json_encoder
-
+from botocore.compat import json
+from botocore.paginate import PageIterator
+from botocore.utils import set_value_from_jmespath
LOG = logging.getLogger(__name__)
@@ -78,7 +76,8 @@ def __call__(self, command_name, response, stream=None):
else:
response_data = response
response_data = self._get_transformed_response_for_output(
- response_data)
+ response_data
+ )
try:
self._format_response(command_name, response_data, stream)
except IOError as e:
@@ -92,15 +91,19 @@ def __call__(self, command_name, response, stream=None):
class JSONFormatter(FullyBufferedFormatter):
-
def _format_response(self, command_name, response, stream):
# For operations that have no response body (e.g. s3 put-object)
# the response will be an empty string. We don't want to print
# that out to the user but other "falsey" values like an empty
# dictionary should be printed.
if response != {}:
- json.dump(response, stream, indent=4, default=json_encoder,
- ensure_ascii=False)
+ json.dump(
+ response,
+ stream,
+ indent=4,
+ default=json_encoder,
+ ensure_ascii=False,
+ )
stream.write('\n')
@@ -178,7 +181,8 @@ def __call__(self, command_name, response, stream=None):
def _get_response_stream(self, response):
if is_response_paginated(response):
return compat.imap(
- self._get_transformed_response_for_output, response)
+ self._get_transformed_response_for_output, response
+ )
else:
output = self._get_transformed_response_for_output(response)
if output == {}:
@@ -196,19 +200,23 @@ class TableFormatter(FullyBufferedFormatter):
using the output definition from the model.
"""
+
def __init__(self, args, table=None):
super(TableFormatter, self).__init__(args)
if args.color == 'auto':
- self.table = MultiTable(initial_section=False,
- column_separator='|')
+ self.table = MultiTable(
+ initial_section=False, column_separator='|'
+ )
elif args.color == 'off':
styler = Styler()
- self.table = MultiTable(initial_section=False,
- column_separator='|', styler=styler)
+ self.table = MultiTable(
+ initial_section=False, column_separator='|', styler=styler
+ )
elif args.color == 'on':
styler = ColorizedStyler()
- self.table = MultiTable(initial_section=False,
- column_separator='|', styler=styler)
+ self.table = MultiTable(
+ initial_section=False, column_separator='|', styler=styler
+ )
else:
raise ValueError("Unknown color option: %s" % args.color)
@@ -257,8 +265,9 @@ def _build_sub_table_from_dict(self, current, indent_level):
self.table.add_row_header(headers)
self.table.add_row([current[k] for k in headers])
for remaining in more:
- self._build_table(remaining, current[remaining],
- indent_level=indent_level + 1)
+ self._build_table(
+ remaining, current[remaining], indent_level=indent_level + 1
+ )
def _build_sub_table_from_list(self, current, indent_level, title):
headers, more = self._group_scalar_keys_from_list(current)
@@ -266,8 +275,7 @@ def _build_sub_table_from_list(self, current, indent_level, title):
first = True
for element in current:
if not first and more:
- self.table.new_section(title,
- indent_level=indent_level)
+ self.table.new_section(title, indent_level=indent_level)
self.table.add_row_header(headers)
first = False
# Use .get() to account for the fact that sometimes an element
@@ -278,8 +286,11 @@ def _build_sub_table_from_list(self, current, indent_level, title):
# be in every single element of the list, so we need to
# check this condition before recursing.
if remaining in element:
- self._build_table(remaining, element[remaining],
- indent_level=indent_level + 1)
+ self._build_table(
+ remaining,
+ element[remaining],
+ indent_level=indent_level + 1,
+ )
def _scalar_type(self, element):
return not isinstance(element, (list, dict))
@@ -315,7 +326,6 @@ def _group_scalar_keys(self, current):
class TextFormatter(Formatter):
-
def __call__(self, command_name, response, stream=None):
if stream is None:
stream = self._get_default_stream()
@@ -331,9 +341,7 @@ def __call__(self, command_name, response, stream=None):
for result_key in result_keys:
data = result_key.search(page)
set_value_from_jmespath(
- current,
- result_key.expression,
- data
+ current, result_key.expression, data
)
self._format_response(current, stream)
if response.resume_token:
@@ -341,7 +349,8 @@ def __call__(self, command_name, response, stream=None):
# if they want.
self._format_response(
{'NextToken': {'NextToken': response.resume_token}},
- stream)
+ stream,
+ )
else:
self._remove_request_id(response)
self._format_response(response, stream)
diff --git a/awscli/handlers.py b/awscli/handlers.py
index 4cbaa6ea6472..ef3abe07fc3a 100644
--- a/awscli/handlers.py
+++ b/awscli/handlers.py
@@ -16,93 +16,121 @@
registered with the event system.
"""
+
+from awscli.alias import register_alias_commands
from awscli.argprocess import ParamShorthandParser
-from awscli.customizations.ec2instanceconnect import register_ec2_instance_connect_commands
-from awscli.paramfile import register_uri_param_handler
from awscli.clidriver import no_pager_handler
from awscli.customizations import datapipeline
from awscli.customizations.addexamples import add_examples
from awscli.customizations.argrename import register_arg_renames
from awscli.customizations.assumerole import register_assume_role_provider
from awscli.customizations.awslambda import register_lambda_create_function
+from awscli.customizations.binaryformat import add_binary_formatter
from awscli.customizations.cliinput import register_cli_input_args
-from awscli.customizations.cloudformation import initialize as cloudformation_init
+from awscli.customizations.cloudformation import (
+ initialize as cloudformation_init,
+)
from awscli.customizations.cloudfront import register as register_cloudfront
from awscli.customizations.cloudsearch import initialize as cloudsearch_init
from awscli.customizations.cloudsearchdomain import register_cloudsearchdomain
from awscli.customizations.cloudtrail import initialize as cloudtrail_init
from awscli.customizations.codeartifact import register_codeartifact_commands
from awscli.customizations.codecommit import initialize as codecommit_init
-from awscli.customizations.codedeploy.codedeploy import initialize as \
- codedeploy_init
+from awscli.customizations.codedeploy.codedeploy import (
+ initialize as codedeploy_init,
+)
from awscli.customizations.configservice.getstatus import register_get_status
-from awscli.customizations.configservice.putconfigurationrecorder import \
- register_modify_put_configuration_recorder
-from awscli.customizations.configservice.rename_cmd import \
- register_rename_config
+from awscli.customizations.configservice.putconfigurationrecorder import (
+ register_modify_put_configuration_recorder,
+)
+from awscli.customizations.configservice.rename_cmd import (
+ register_rename_config,
+)
from awscli.customizations.configservice.subscribe import register_subscribe
from awscli.customizations.configure.configure import register_configure_cmd
+from awscli.customizations.devcommands import register_dev_commands
+from awscli.customizations.dlm.dlm import dlm_initialize
+from awscli.customizations.dsql import register_dsql_customizations
from awscli.customizations.dynamodb.ddb import register_ddb
-from awscli.customizations.dynamodb.paginatorfix import \
- register_dynamodb_paginator_fix
-from awscli.customizations.history import register_history_mode
-from awscli.customizations.history import register_history_commands
+from awscli.customizations.dynamodb.paginatorfix import (
+ register_dynamodb_paginator_fix,
+)
from awscli.customizations.ec2.addcount import register_count_events
from awscli.customizations.ec2.bundleinstance import register_bundleinstance
from awscli.customizations.ec2.decryptpassword import ec2_add_priv_launch_key
+from awscli.customizations.ec2.paginate import register_ec2_page_size_injector
from awscli.customizations.ec2.protocolarg import register_protocol_args
from awscli.customizations.ec2.runinstances import register_runinstances
from awscli.customizations.ec2.secgroupsimplify import register_secgroup
-from awscli.customizations.ec2.paginate import register_ec2_page_size_injector
+from awscli.customizations.ec2instanceconnect import (
+ register_ec2_instance_connect_commands,
+)
from awscli.customizations.ecr import register_ecr_commands
from awscli.customizations.ecr_public import register_ecr_public_commands
-from awscli.customizations.emr.emr import emr_initialize
-from awscli.customizations.emrcontainers import \
- initialize as emrcontainers_initialize
-from awscli.customizations.eks import initialize as eks_initialize
from awscli.customizations.ecs import initialize as ecs_initialize
+from awscli.customizations.eks import initialize as eks_initialize
+from awscli.customizations.emr.emr import emr_initialize
+from awscli.customizations.emrcontainers import (
+ initialize as emrcontainers_initialize,
+)
from awscli.customizations.gamelift import register_gamelift_commands
-from awscli.customizations.generatecliskeleton import \
- register_generate_cli_skeleton
+from awscli.customizations.generatecliskeleton import (
+ register_generate_cli_skeleton,
+)
from awscli.customizations.globalargs import register_parse_global_args
+from awscli.customizations.history import (
+ register_history_commands,
+ register_history_mode,
+)
from awscli.customizations.iamvirtmfa import IAMVMFAWrapper
-from awscli.customizations.iot import register_create_keys_and_cert_arguments
-from awscli.customizations.iot import register_create_keys_from_csr_arguments
+from awscli.customizations.iot import (
+ register_create_keys_and_cert_arguments,
+ register_create_keys_from_csr_arguments,
+)
from awscli.customizations.iot_data import register_custom_endpoint_note
+from awscli.customizations.kinesis import (
+ register_kinesis_list_streams_pagination_backcompat,
+)
from awscli.customizations.kms import register_fix_kms_create_grant_docs
-from awscli.customizations.dlm.dlm import dlm_initialize
+from awscli.customizations.lightsail import initialize as lightsail_initialize
+from awscli.customizations.logs import register_logs_commands
from awscli.customizations.opsworks import initialize as opsworks_init
+from awscli.customizations.opsworkscm import register_alias_opsworks_cm
from awscli.customizations.paginate import register_pagination
from awscli.customizations.putmetricdata import register_put_metric_data
-from awscli.customizations.rds import register_rds_modify_split
-from awscli.customizations.rds import register_add_generate_db_auth_token
-from awscli.customizations.dsql import register_dsql_customizations
-from awscli.customizations.rekognition import register_rekognition_detect_labels
+from awscli.customizations.quicksight import (
+ register_quicksight_asset_bundle_customizations,
+)
+from awscli.customizations.rds import (
+ register_add_generate_db_auth_token,
+ register_rds_modify_split,
+)
+from awscli.customizations.rekognition import (
+ register_rekognition_detect_labels,
+)
from awscli.customizations.removals import register_removals
from awscli.customizations.route53 import register_create_hosted_zone_doc_fix
from awscli.customizations.s3.s3 import s3_plugin_initialize
from awscli.customizations.s3errormsg import register_s3_error_msg
-from awscli.customizations.timestampformat import register_timestamp_format
+from awscli.customizations.s3events import (
+ register_document_expires_string,
+ register_event_stream_arg,
+)
+from awscli.customizations.servicecatalog import (
+ register_servicecatalog_commands,
+)
from awscli.customizations.sessendemail import register_ses_send_email
+from awscli.customizations.sessionmanager import register_ssm_session
from awscli.customizations.sso import register_sso_commands
from awscli.customizations.streamingoutputarg import add_streaming_output_arg
-from awscli.customizations.translate import register_translate_import_terminology
+from awscli.customizations.timestampformat import register_timestamp_format
from awscli.customizations.toplevelbool import register_bool_params
+from awscli.customizations.translate import (
+ register_translate_import_terminology,
+)
from awscli.customizations.waiters import register_add_waiters
-from awscli.customizations.opsworkscm import register_alias_opsworks_cm
-from awscli.customizations.servicecatalog import register_servicecatalog_commands
-from awscli.customizations.s3events import register_event_stream_arg, register_document_expires_string
-from awscli.customizations.sessionmanager import register_ssm_session
-from awscli.customizations.logs import register_logs_commands
-from awscli.customizations.devcommands import register_dev_commands
from awscli.customizations.wizard.commands import register_wizard_commands
-from awscli.customizations.binaryformat import add_binary_formatter
-from awscli.customizations.lightsail import initialize as lightsail_initialize
-from awscli.alias import register_alias_commands
-from awscli.customizations.kinesis import \
- register_kinesis_list_streams_pagination_backcompat
-from awscli.customizations.quicksight import \
- register_quicksight_asset_bundle_customizations
+from awscli.paramfile import register_uri_param_handler
def awscli_initialize(event_handlers):
@@ -114,23 +142,25 @@ def awscli_initialize(event_handlers):
# The s3 error mesage needs to registered before the
# generic error handler.
register_s3_error_msg(event_handlers)
-# # The following will get fired for every option we are
-# # documenting. It will attempt to add an example_fn on to
-# # the parameter object if the parameter supports shorthand
-# # syntax. The documentation event handlers will then use
-# # the examplefn to generate the sample shorthand syntax
-# # in the docs. Registering here should ensure that this
-# # handler gets called first but it still feels a bit brittle.
-# event_handlers.register('doc-option-example.*.*.*',
-# param_shorthand.add_example_fn)
- event_handlers.register('doc-examples.*.*',
- add_examples)
+ # # The following will get fired for every option we are
+ # # documenting. It will attempt to add an example_fn on to
+ # # the parameter object if the parameter supports shorthand
+ # # syntax. The documentation event handlers will then use
+ # # the examplefn to generate the sample shorthand syntax
+ # # in the docs. Registering here should ensure that this
+ # # handler gets called first but it still feels a bit brittle.
+ # event_handlers.register('doc-option-example.*.*.*',
+ # param_shorthand.add_example_fn)
+ event_handlers.register('doc-examples.*.*', add_examples)
register_cli_input_args(event_handlers)
- event_handlers.register('building-argument-table.*',
- add_streaming_output_arg)
+ event_handlers.register(
+ 'building-argument-table.*', add_streaming_output_arg
+ )
register_count_events(event_handlers)
- event_handlers.register('building-argument-table.ec2.get-password-data',
- ec2_add_priv_launch_key)
+ event_handlers.register(
+ 'building-argument-table.ec2.get-password-data',
+ ec2_add_priv_launch_key,
+ )
register_parse_global_args(event_handlers)
register_pagination(event_handlers)
register_secgroup(event_handlers)
@@ -179,10 +209,12 @@ def awscli_initialize(event_handlers):
register_custom_endpoint_note(event_handlers)
event_handlers.register(
'building-argument-table.iot.create-keys-and-certificate',
- register_create_keys_and_cert_arguments)
+ register_create_keys_and_cert_arguments,
+ )
event_handlers.register(
'building-argument-table.iot.create-certificate-from-csr',
- register_create_keys_from_csr_arguments)
+ register_create_keys_from_csr_arguments,
+ )
register_cloudfront(event_handlers)
register_gamelift_commands(event_handlers)
register_ec2_page_size_injector(event_handlers)
diff --git a/awscli/help.py b/awscli/help.py
index 1ce8571f3aec..b1f429f0334e 100644
--- a/awscli/help.py
+++ b/awscli/help.py
@@ -12,35 +12,37 @@
# language governing permissions and limitations under the License.
import logging
import os
-import sys
import platform
import shlex
-from subprocess import Popen, PIPE
+import sys
+from subprocess import PIPE, Popen
from docutils.core import publish_string
from docutils.writers import manpage
-from awscli.clidocs import ProviderDocumentEventHandler
-from awscli.clidocs import ServiceDocumentEventHandler
-from awscli.clidocs import OperationDocumentEventHandler
-from awscli.clidocs import TopicListerDocumentEventHandler
-from awscli.clidocs import TopicDocumentEventHandler
+from awscli.argparser import ArgTableArgParser
+from awscli.argprocess import ParamShorthandParser
from awscli.bcdoc import docevents
from awscli.bcdoc.restdoc import ReSTDocument
from awscli.bcdoc.textwriter import TextWriter
-from awscli.argprocess import ParamShorthandParser
-from awscli.argparser import ArgTableArgParser
+from awscli.clidocs import (
+ OperationDocumentEventHandler,
+ ProviderDocumentEventHandler,
+ ServiceDocumentEventHandler,
+ TopicDocumentEventHandler,
+ TopicListerDocumentEventHandler,
+)
from awscli.topictags import TopicTagDB
from awscli.utils import ignore_ctrl_c
-
LOG = logging.getLogger('awscli.help')
class ExecutableNotFoundError(Exception):
def __init__(self, executable_name):
super(ExecutableNotFoundError, self).__init__(
- 'Could not find executable named "%s"' % executable_name)
+ 'Could not find executable named "%s"' % executable_name
+ )
def get_renderer():
@@ -62,6 +64,7 @@ class PagingHelpRenderer(object):
a particular platform.
"""
+
def __init__(self, output_stream=sys.stdout):
self.output_stream = output_stream
@@ -118,7 +121,8 @@ def _convert_doc_content(self, contents):
settings_overrides = self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES.copy()
settings_overrides["report_level"] = 3
man_contents = publish_string(
- contents, writer=manpage.Writer(),
+ contents,
+ writer=manpage.Writer(),
settings_overrides=self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES,
)
if self._exists_on_path('groff'):
@@ -135,8 +139,9 @@ def _convert_doc_content(self, contents):
def _send_output_to_pager(self, output):
cmdline = self.get_pager_cmdline()
if not self._exists_on_path(cmdline[0]):
- LOG.debug("Pager '%s' not found in PATH, printing raw help." %
- cmdline[0])
+ LOG.debug(
+ "Pager '%s' not found in PATH, printing raw help." % cmdline[0]
+ )
self.output_stream.write(output.decode('utf-8') + "\n")
self.output_stream.flush()
return
@@ -159,8 +164,12 @@ def _send_output_to_pager(self, output):
def _exists_on_path(self, name):
# Since we're only dealing with POSIX systems, we can
# ignore things like PATHEXT.
- return any([os.path.exists(os.path.join(p, name))
- for p in os.environ.get('PATH', '').split(os.pathsep)])
+ return any(
+ [
+ os.path.exists(os.path.join(p, name))
+ for p in os.environ.get('PATH', '').split(os.pathsep)
+ ]
+ )
class WindowsHelpRenderer(PagingHelpRenderer):
@@ -170,7 +179,8 @@ class WindowsHelpRenderer(PagingHelpRenderer):
def _convert_doc_content(self, contents):
text_output = publish_string(
- contents, writer=TextWriter(),
+ contents,
+ writer=TextWriter(),
settings_overrides=self._DEFAULT_DOCUTILS_SETTINGS_OVERRIDES,
)
return text_output
@@ -280,8 +290,9 @@ def __call__(self, args, parsed_globals):
subcommand_parser = ArgTableArgParser({}, self.subcommand_table)
parsed, remaining = subcommand_parser.parse_known_args(args)
if getattr(parsed, 'subcommand', None) is not None:
- return self.subcommand_table[parsed.subcommand](remaining,
- parsed_globals)
+ return self.subcommand_table[parsed.subcommand](
+ remaining, parsed_globals
+ )
# Create an event handler for a Provider Document
instance = self.EventHandlerClass(self)
@@ -299,12 +310,13 @@ class ProviderHelpCommand(HelpCommand):
This is what is called when ``aws help`` is run.
"""
+
EventHandlerClass = ProviderDocumentEventHandler
- def __init__(self, session, command_table, arg_table,
- description, synopsis, usage):
- HelpCommand.__init__(self, session, None,
- command_table, arg_table)
+ def __init__(
+ self, session, command_table, arg_table, description, synopsis, usage
+ ):
+ HelpCommand.__init__(self, session, None, command_table, arg_table)
self.description = description
self.synopsis = synopsis
self.help_usage = usage
@@ -353,10 +365,12 @@ class ServiceHelpCommand(HelpCommand):
EventHandlerClass = ServiceDocumentEventHandler
- def __init__(self, session, obj, command_table, arg_table, name,
- event_class):
- super(ServiceHelpCommand, self).__init__(session, obj, command_table,
- arg_table)
+ def __init__(
+ self, session, obj, command_table, arg_table, name, event_class
+ ):
+ super(ServiceHelpCommand, self).__init__(
+ session, obj, command_table, arg_table
+ )
self._name = name
self._event_class = event_class
@@ -376,10 +390,10 @@ class OperationHelpCommand(HelpCommand):
e.g. ``aws ec2 describe-instances help``.
"""
+
EventHandlerClass = OperationDocumentEventHandler
- def __init__(self, session, operation_model, arg_table, name,
- event_class):
+ def __init__(self, session, operation_model, arg_table, name, event_class):
HelpCommand.__init__(self, session, operation_model, None, arg_table)
self.param_shorthand = ParamShorthandParser()
self._name = name
diff --git a/awscli/logger.py b/awscli/logger.py
index 38b16fe7bbe3..e0f818b0d009 100644
--- a/awscli/logger.py
+++ b/awscli/logger.py
@@ -17,8 +17,7 @@
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
-def set_stream_logger(logger_name, log_level, stream=None,
- format_string=None):
+def set_stream_logger(logger_name, log_level, stream=None, format_string=None):
"""
Convenience method to configure a stream logger.
diff --git a/awscli/paramfile.py b/awscli/paramfile.py
index da5307f214f6..eaf2a84a922b 100644
--- a/awscli/paramfile.py
+++ b/awscli/paramfile.py
@@ -10,12 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import copy
import logging
import os
-import copy
-from awscli.compat import compat_open
from awscli import argprocess
+from awscli.compat import compat_open
logger = logging.getLogger(__name__)
@@ -77,7 +77,7 @@ def get_paramfile(path, cases):
def get_file(prefix, path, mode):
- file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):]))
+ file_path = os.path.expandvars(os.path.expanduser(path[len(prefix) :]))
try:
with compat_open(file_path, mode) as f:
return f.read()
@@ -85,13 +85,15 @@ def get_file(prefix, path, mode):
raise ResourceLoadingError(
'Unable to load paramfile (%s), text contents could '
'not be decoded. If this is a binary file, please use the '
- 'fileb:// prefix instead of the file:// prefix.' % file_path)
+ 'fileb:// prefix instead of the file:// prefix.' % file_path
+ )
except (OSError, IOError) as e:
- raise ResourceLoadingError('Unable to load paramfile %s: %s' % (
- path, e))
+ raise ResourceLoadingError(
+ 'Unable to load paramfile %s: %s' % (path, e)
+ )
LOCAL_PREFIX_MAP = {
'file://': (get_file, {'mode': 'r'}),
'fileb://': (get_file, {'mode': 'rb'}),
-}
\ No newline at end of file
+}
diff --git a/awscli/plugin.py b/awscli/plugin.py
index 1c2331ae1cbe..46a26a4fc1a7 100644
--- a/awscli/plugin.py
+++ b/awscli/plugin.py
@@ -10,9 +10,9 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import sys
-import os
import logging
+import os
+import sys
from botocore.hooks import HierarchicalEmitter
@@ -80,6 +80,9 @@ def _import_plugins(plugin_mapping):
def _add_plugin_path_to_sys_path(plugin_path):
for dirname in plugin_path.split(os.pathsep):
- log.debug("Adding additional path from cli_legacy_plugin_path "
- "configuration: %s", dirname)
+ log.debug(
+ "Adding additional path from cli_legacy_plugin_path "
+ "configuration: %s",
+ dirname,
+ )
sys.path.append(dirname)
diff --git a/awscli/schema.py b/awscli/schema.py
index 17ec6ba416cd..4c3a60cff67d 100644
--- a/awscli/schema.py
+++ b/awscli/schema.py
@@ -63,6 +63,7 @@ class SchemaTransformer(object):
$ aws foo bar --baz arg1=Value1,arg2=5 arg1=Value2
"""
+
JSON_SCHEMA_TO_AWS_TYPES = {
'object': 'structure',
'array': 'list',
@@ -116,7 +117,8 @@ def _transform_structure(self, schema, shapes):
for key, value in schema['properties'].items():
current_type_name = self._json_schema_to_aws_type(value)
current_shape_name = self._shape_namer.new_shape_name(
- current_type_name)
+ current_type_name
+ )
members[key] = {'shape': current_shape_name}
if value.get('required', False):
required_members.append(key)
diff --git a/awscli/shorthand.py b/awscli/shorthand.py
index 7443dba4a141..f1edbcd1f435 100644
--- a/awscli/shorthand.py
+++ b/awscli/shorthand.py
@@ -38,6 +38,7 @@
``BackCompatVisitor`` class.
"""
+
import re
import string
@@ -57,25 +58,24 @@ def match(self, value):
class ShorthandParseError(Exception):
-
def _error_location(self):
consumed, remaining, num_spaces = self.value, '', self.index
- if '\n' in self.value[:self.index]:
+ if '\n' in self.value[: self.index]:
# If there's newlines in the consumed expression, we want
# to make sure we're only counting the spaces
# from the last newline:
# foo=bar,\n
# bar==baz
# ^
- last_newline = self.value[:self.index].rindex('\n')
+ last_newline = self.value[: self.index].rindex('\n')
num_spaces = self.index - last_newline - 1
- if '\n' in self.value[self.index:]:
+ if '\n' in self.value[self.index :]:
# If there's newline in the remaining, divide value
# into consumed and remainig
# foo==bar,\n
# ^
# bar=baz
- next_newline = self.index + self.value[self.index:].index('\n')
+ next_newline = self.index + self.value[self.index :].index('\n')
consumed = self.value[:next_newline]
remaining = self.value[next_newline:]
return '%s\n%s%s' % (consumed, (' ' * num_spaces) + '^', remaining)
@@ -91,10 +91,11 @@ def __init__(self, value, expected, actual, index):
super(ShorthandParseSyntaxError, self).__init__(msg)
def _construct_msg(self):
- msg = (
- "Expected: '%s', received: '%s' for input:\n"
- "%s"
- ) % (self.expected, self.actual, self._error_location())
+ msg = ("Expected: '%s', received: '%s' for input:\n" "%s") % (
+ self.expected,
+ self.actual,
+ self._error_location(),
+ )
return msg
@@ -135,20 +136,22 @@ class ShorthandParser(object):
_ESCAPED_COMMA = '(\\\\,)'
_FIRST_VALUE = _NamedRegex(
'first',
- u'({escaped_comma}|[{start_word}])'
- u'({escaped_comma}|[{follow_chars}])*'.format(
+ '({escaped_comma}|[{start_word}])'
+ '({escaped_comma}|[{follow_chars}])*'.format(
escaped_comma=_ESCAPED_COMMA,
start_word=_START_WORD,
follow_chars=_FIRST_FOLLOW_CHARS,
- ))
+ ),
+ )
_SECOND_VALUE = _NamedRegex(
'second',
- u'({escaped_comma}|[{start_word}])'
- u'({escaped_comma}|[{follow_chars}])*'.format(
+ '({escaped_comma}|[{start_word}])'
+ '({escaped_comma}|[{follow_chars}])*'.format(
escaped_comma=_ESCAPED_COMMA,
start_word=_START_WORD,
follow_chars=_SECOND_FOLLOW_CHARS,
- ))
+ ),
+ )
def __init__(self):
self._tokens = []
@@ -213,7 +216,7 @@ def _key(self):
if self._current() not in valid_chars:
break
self._index += 1
- return self._input_value[start:self._index]
+ return self._input_value[start : self._index]
def _values(self):
# values = csv-list / explicit-list / hash-literal
@@ -275,11 +278,15 @@ def _csv_value(self):
return csv_list
def _value(self):
- result = self._FIRST_VALUE.match(self._input_value[self._index:])
+ result = self._FIRST_VALUE.match(self._input_value[self._index :])
if result is not None:
consumed = self._consume_matched_regex(result)
processed = consumed.replace('\\,', ',').rstrip()
- return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed
+ return (
+ self._resolve_paramfiles(processed)
+ if self._should_resolve_paramfiles
+ else processed
+ )
return ''
def _explicit_list(self):
@@ -339,7 +346,11 @@ def _single_quoted_value(self):
# val-escaped-single = %x20-26 / %x28-7F / escaped-escape /
# (escape single-quote)
processed = self._consume_quoted(self._SINGLE_QUOTED, escaped_char="'")
- return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed
+ return (
+ self._resolve_paramfiles(processed)
+ if self._should_resolve_paramfiles
+ else processed
+ )
def _consume_quoted(self, regex, escaped_char=None):
value = self._must_consume_regex(regex)[1:-1]
@@ -350,7 +361,11 @@ def _consume_quoted(self, regex, escaped_char=None):
def _double_quoted_value(self):
processed = self._consume_quoted(self._DOUBLE_QUOTED, escaped_char='"')
- return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed
+ return (
+ self._resolve_paramfiles(processed)
+ if self._should_resolve_paramfiles
+ else processed
+ )
def _second_value(self):
if self._current() == "'":
@@ -360,7 +375,11 @@ def _second_value(self):
else:
consumed = self._must_consume_regex(self._SECOND_VALUE)
processed = consumed.replace('\\,', ',').rstrip()
- return self._resolve_paramfiles(processed) if self._should_resolve_paramfiles else processed
+ return (
+ self._resolve_paramfiles(processed)
+ if self._should_resolve_paramfiles
+ else processed
+ )
def _resolve_paramfiles(self, val):
if (paramfile := get_paramfile(val, LOCAL_PREFIX_MAP)) is not None:
@@ -371,27 +390,30 @@ def _expect(self, char, consume_whitespace=False):
if consume_whitespace:
self._consume_whitespace()
if self._index >= len(self._input_value):
- raise ShorthandParseSyntaxError(self._input_value, char,
- 'EOF', self._index)
+ raise ShorthandParseSyntaxError(
+ self._input_value, char, 'EOF', self._index
+ )
actual = self._input_value[self._index]
if actual != char:
- raise ShorthandParseSyntaxError(self._input_value, char,
- actual, self._index)
+ raise ShorthandParseSyntaxError(
+ self._input_value, char, actual, self._index
+ )
self._index += 1
if consume_whitespace:
self._consume_whitespace()
def _must_consume_regex(self, regex):
- result = regex.match(self._input_value[self._index:])
+ result = regex.match(self._input_value[self._index :])
if result is not None:
return self._consume_matched_regex(result)
- raise ShorthandParseSyntaxError(self._input_value, '<%s>' % regex.name,
- '', self._index)
+ raise ShorthandParseSyntaxError(
+ self._input_value, '<%s>' % regex.name, '', self._index
+ )
def _consume_matched_regex(self, result):
start, end = result.span()
- v = self._input_value[self._index+start:self._index+end]
- self._index += (end - start)
+ v = self._input_value[self._index + start : self._index + end]
+ self._index += end - start
return v
def _current(self):
@@ -418,16 +440,18 @@ def visit(self, params, model):
self._visit({}, model, '', params)
def _visit(self, parent, shape, name, value):
- method = getattr(self, '_visit_%s' % shape.type_name,
- self._visit_scalar)
+ method = getattr(
+ self, '_visit_%s' % shape.type_name, self._visit_scalar
+ )
method(parent, shape, name, value)
def _visit_structure(self, parent, shape, name, value):
if not isinstance(value, dict):
return
for member_name, member_shape in shape.members.items():
- self._visit(value, member_shape, member_name,
- value.get(member_name))
+ self._visit(
+ value, member_shape, member_name, value.get(member_name)
+ )
def _visit_list(self, parent, shape, name, value):
if not isinstance(value, list):
@@ -453,8 +477,9 @@ def _visit_structure(self, parent, shape, name, value):
return
for member_name, member_shape in shape.members.items():
try:
- self._visit(value, member_shape, member_name,
- value.get(member_name))
+ self._visit(
+ value, member_shape, member_name, value.get(member_name)
+ )
except DocumentTypesNotSupportedError:
# Catch and propagate the document type error to a better
# error message as when the original error is thrown there is
@@ -474,7 +499,8 @@ def _visit_list(self, parent, shape, name, value):
parent[name] = [value]
else:
return super(BackCompatVisitor, self)._visit_list(
- parent, shape, name, value)
+ parent, shape, name, value
+ )
def _visit_scalar(self, parent, shape, name, value):
if value is None:
diff --git a/awscli/table.py b/awscli/table.py
index 8ebfc454d0ed..f971d791a896 100644
--- a/awscli/table.py
+++ b/awscli/table.py
@@ -6,19 +6,19 @@
# http://aws.amazon.com/apache2.0/
+import struct
+
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
-import struct
import unicodedata
import colorama
from awscli.utils import is_a_tty
-
# `autoreset` allows us to not have to sent reset sequences for every
# string. `strip` lets us preserve color when redirecting.
COLORAMA_KWARGS = {
@@ -35,28 +35,32 @@ def get_text_length(text):
# * F(Fullwidth)
# * W(Wide)
text = str(text)
- return sum(2 if unicodedata.east_asian_width(char) in 'WFA' else 1
- for char in text)
+ return sum(
+ 2 if unicodedata.east_asian_width(char) in 'WFA' else 1
+ for char in text
+ )
def determine_terminal_width(default_width=80):
# If we can't detect the terminal width, the default_width is returned.
try:
- from termios import TIOCGWINSZ
from fcntl import ioctl
+ from termios import TIOCGWINSZ
except ImportError:
return default_width
try:
- height, width = struct.unpack('hhhh', ioctl(sys.stdout,
- TIOCGWINSZ, '\000' * 8))[0:2]
+ height, width = struct.unpack(
+ 'hhhh', ioctl(sys.stdout, TIOCGWINSZ, '\000' * 8)
+ )[0:2]
except Exception:
return default_width
else:
return width
-def center_text(text, length=80, left_edge='|', right_edge='|',
- text_length=None):
+def center_text(
+ text, length=80, left_edge='|', right_edge='|', text_length=None
+):
"""Center text with specified edge chars.
You can pass in the length of the text as an arg, otherwise it is computed
@@ -77,15 +81,24 @@ def center_text(text, length=80, left_edge='|', right_edge='|',
return final
-def align_left(text, length, left_edge='|', right_edge='|', text_length=None,
- left_padding=2):
+def align_left(
+ text,
+ length,
+ left_edge='|',
+ right_edge='|',
+ text_length=None,
+ left_padding=2,
+):
"""Left align text."""
# postcondition: get_text_length(returned_text) == length
if text_length is None:
text_length = get_text_length(text)
computed_length = (
- text_length + left_padding + \
- get_text_length(left_edge) + get_text_length(right_edge))
+ text_length
+ + left_padding
+ + get_text_length(left_edge)
+ + get_text_length(right_edge)
+ )
if length - computed_length >= 0:
padding = left_padding
else:
@@ -126,8 +139,9 @@ def convert_to_vertical_table(sections):
class IndentedStream(object):
- def __init__(self, stream, indent_level, left_indent_char='|',
- right_indent_char='|'):
+ def __init__(
+ self, stream, indent_level, left_indent_char='|', right_indent_char='|'
+ ):
self._stream = stream
self._indent_level = indent_level
self._left_indent_char = left_indent_char
@@ -167,25 +181,39 @@ def __init__(self):
def style_title(self, text):
# Originally bold + underline
return text
- #return colorama.Style.BOLD + text + colorama.Style.RESET_ALL
+ # return colorama.Style.BOLD + text + colorama.Style.RESET_ALL
def style_header_column(self, text):
# Originally underline
return text
def style_row_element(self, text):
- return (colorama.Style.BRIGHT + colorama.Fore.BLUE +
- text + colorama.Style.RESET_ALL)
+ return (
+ colorama.Style.BRIGHT
+ + colorama.Fore.BLUE
+ + text
+ + colorama.Style.RESET_ALL
+ )
def style_indentation_char(self, text):
- return (colorama.Style.DIM + colorama.Fore.YELLOW +
- text + colorama.Style.RESET_ALL)
+ return (
+ colorama.Style.DIM
+ + colorama.Fore.YELLOW
+ + text
+ + colorama.Style.RESET_ALL
+ )
class MultiTable(object):
- def __init__(self, terminal_width=None, initial_section=True,
- column_separator='|', terminal=None,
- styler=None, auto_reformat=True):
+ def __init__(
+ self,
+ terminal_width=None,
+ initial_section=True,
+ column_separator='|',
+ terminal=None,
+ styler=None,
+ auto_reformat=True,
+ ):
self._auto_reformat = auto_reformat
if initial_section:
self._current_section = Section()
@@ -238,16 +266,22 @@ def _determine_conversion_needed(self, max_width):
return self._auto_reformat
def _calculate_max_width(self):
- max_width = max(s.total_width(padding=4, with_border=True,
- outer_padding=s.indent_level)
- for s in self._sections)
+ max_width = max(
+ s.total_width(
+ padding=4, with_border=True, outer_padding=s.indent_level
+ )
+ for s in self._sections
+ )
return max_width
def _render_section(self, section, max_width, stream):
- stream = IndentedStream(stream, section.indent_level,
- self._styler.style_indentation_char('|'),
- self._styler.style_indentation_char('|'))
- max_width -= (section.indent_level * 2)
+ stream = IndentedStream(
+ stream,
+ section.indent_level,
+ self._styler.style_indentation_char('|'),
+ self._styler.style_indentation_char('|'),
+ )
+ max_width -= section.indent_level * 2
self._render_title(section, max_width, stream)
self._render_column_titles(section, max_width, stream)
self._render_rows(section, max_width, stream)
@@ -258,8 +292,12 @@ def _render_title(self, section, max_width, stream):
# bottom_border: ----------------------------
if section.title:
title = self._styler.style_title(section.title)
- stream.write(center_text(title, max_width, '|', '|',
- get_text_length(section.title)) + '\n')
+ stream.write(
+ center_text(
+ title, max_width, '|', '|', get_text_length(section.title)
+ )
+ + '\n'
+ )
if not section.headers and not section.rows:
stream.write('+%s+' % ('-' * (max_width - 2)) + '\n')
@@ -268,8 +306,9 @@ def _render_column_titles(self, section, max_width, stream):
return
# In order to render the column titles we need to know
# the width of each of the columns.
- widths = section.calculate_column_widths(padding=4,
- max_width=max_width)
+ widths = section.calculate_column_widths(
+ padding=4, max_width=max_width
+ )
# TODO: Built a list instead of +=, it's more efficient.
current = ''
length_so_far = 0
@@ -283,9 +322,13 @@ def _render_column_titles(self, section, max_width, stream):
first = False
else:
left_edge = ''
- current += center_text(text=stylized_header, length=width,
- left_edge=left_edge, right_edge='|',
- text_length=get_text_length(header))
+ current += center_text(
+ text=stylized_header,
+ length=width,
+ left_edge=left_edge,
+ right_edge='|',
+ text_length=get_text_length(header),
+ )
length_so_far += width
self._write_line_break(stream, widths)
stream.write(current + '\n')
@@ -307,8 +350,9 @@ def _write_line_break(self, stream, widths):
def _render_rows(self, section, max_width, stream):
if not section.rows:
return
- widths = section.calculate_column_widths(padding=4,
- max_width=max_width)
+ widths = section.calculate_column_widths(
+ padding=4, max_width=max_width
+ )
if not widths:
return
self._write_line_break(stream, widths)
@@ -325,10 +369,13 @@ def _render_rows(self, section, max_width, stream):
else:
left_edge = ''
stylized = self._styler.style_row_element(element)
- current += align_left(text=stylized, length=width,
- left_edge=left_edge,
- right_edge=self._column_separator,
- text_length=get_text_length(element))
+ current += align_left(
+ text=stylized,
+ length=width,
+ left_edge=left_edge,
+ right_edge=self._column_separator,
+ text_length=get_text_length(element),
+ )
length_so_far += width
stream.write(current + '\n')
self._write_line_break(stream, widths)
@@ -344,8 +391,10 @@ def __init__(self):
self._max_widths = []
def __repr__(self):
- return ("Section(title=%s, headers=%s, indent_level=%s, num_rows=%s)" %
- (self.title, self.headers, self.indent_level, len(self.rows)))
+ return (
+ "Section(title=%s, headers=%s, indent_level=%s, num_rows=%s)"
+ % (self.title, self.headers, self.indent_level, len(self.rows))
+ )
def calculate_column_widths(self, padding=0, max_width=None):
# postcondition: sum(widths) == max_width
@@ -385,8 +434,13 @@ def total_width(self, padding=0, with_border=False, outer_padding=0):
if with_border:
total += border_padding
total += outer_padding + outer_padding
- return max(get_text_length(self.title) + border_padding + outer_padding +
- outer_padding, total)
+ return max(
+ get_text_length(self.title)
+ + border_padding
+ + outer_padding
+ + outer_padding,
+ total,
+ )
def add_title(self, title):
self.title = title
@@ -404,8 +458,10 @@ def add_row(self, row):
if self._num_cols is None:
self._num_cols = len(row)
if len(row) != self._num_cols:
- raise ValueError("Row should have %s elements, instead "
- "it has %s" % (self._num_cols, len(row)))
+ raise ValueError(
+ "Row should have %s elements, instead "
+ "it has %s" % (self._num_cols, len(row))
+ )
row = self._format_row(row)
self.rows.append(row)
self._update_max_widths(row)
@@ -418,4 +474,6 @@ def _update_max_widths(self, row):
self._max_widths = [get_text_length(el) for el in row]
else:
for i, el in enumerate(row):
- self._max_widths[i] = max(get_text_length(el), self._max_widths[i])
+ self._max_widths[i] = max(
+ get_text_length(el), self._max_widths[i]
+ )
diff --git a/awscli/testutils.py b/awscli/testutils.py
index 5153fa960559..8893ab30d39a 100644
--- a/awscli/testutils.py
+++ b/awscli/testutils.py
@@ -19,37 +19,34 @@
advantage of all the testing utilities we provide.
"""
-import os
-import sys
+
+import binascii
+import contextlib
import copy
-import shutil
-import time
import json
import logging
-import tempfile
-import platform
-import contextlib
-import binascii
import math
+import os
+import platform
+import shutil
+import sys
+import tempfile
+import time
+import unittest
from pprint import pformat
-from subprocess import Popen, PIPE
+from subprocess import PIPE, Popen
from unittest import mock
-import unittest
-
-from awscli.compat import BytesIO, StringIO
from ruamel.yaml import YAML
-from botocore.session import Session
-from botocore.exceptions import ClientError
-from botocore.exceptions import WaiterError
-import botocore.loaders
-from botocore.awsrequest import AWSResponse
-
import awscli.clidriver
-from awscli.plugin import load_plugins
+import botocore.loaders
from awscli.clidriver import CLIDriver
-
+from awscli.compat import BytesIO, StringIO
+from awscli.plugin import load_plugins
+from botocore.awsrequest import AWSResponse
+from botocore.exceptions import ClientError, WaiterError
+from botocore.session import Session
_LOADER = botocore.loaders.Loader()
INTEG_LOG = logging.getLogger('awscli.tests.integration')
@@ -66,9 +63,12 @@ def test_some_non_windows_stuff(self):
self.assertEqual(...)
"""
+
def decorator(func):
return unittest.skipIf(
- platform.system() not in ['Darwin', 'Linux'], reason)(func)
+ platform.system() not in ['Darwin', 'Linux'], reason
+ )(func)
+
return decorator
@@ -82,8 +82,10 @@ def test_some_windows_stuff(self):
self.assertEqual(...)
"""
+
def decorator(func):
return unittest.skipIf(platform.system() != 'Windows', reason)(func)
+
return decorator
@@ -101,6 +103,7 @@ def create_clidriver():
def get_aws_cmd():
global AWS_CMD
import awscli
+
if AWS_CMD is None:
# Try /bin/aws
repo_root = os.path.dirname(os.path.abspath(awscli.__file__))
@@ -108,10 +111,12 @@ def get_aws_cmd():
if not os.path.isfile(aws_cmd):
aws_cmd = _search_path_for_cmd('aws')
if aws_cmd is None:
- raise ValueError('Could not find "aws" executable. Either '
- 'make sure it is on your PATH, or you can '
- 'explicitly set this value using '
- '"set_aws_cmd()"')
+ raise ValueError(
+ 'Could not find "aws" executable. Either '
+ 'make sure it is on your PATH, or you can '
+ 'explicitly set this value using '
+ '"set_aws_cmd()"'
+ )
AWS_CMD = aws_cmd
return AWS_CMD
@@ -197,15 +202,12 @@ def create_dir_bucket(session, name=None, location=None):
params = {
'Bucket': bucket_name,
'CreateBucketConfiguration': {
- 'Location': {
- 'Type': 'AvailabilityZone',
- 'Name': az
- },
+ 'Location': {'Type': 'AvailabilityZone', 'Name': az},
'Bucket': {
'Type': 'Directory',
- 'DataRedundancy': 'SingleAvailabilityZone'
- }
- }
+ 'DataRedundancy': 'SingleAvailabilityZone',
+ },
+ },
}
try:
client.create_bucket(**params)
@@ -249,6 +251,7 @@ class BaseCLIDriverTest(unittest.TestCase):
This will load all the default plugins as well so it
will simulate the behavior the user will see.
"""
+
def setUp(self):
self.environ = {
'AWS_DATA_PATH': os.environ['AWS_DATA_PATH'],
@@ -280,23 +283,29 @@ def tearDown(self):
def assert_contains(self, contains):
if contains not in self.renderer.rendered_contents:
- self.fail("The expected contents:\n%s\nwere not in the "
- "actual rendered contents:\n%s" % (
- contains, self.renderer.rendered_contents))
+ self.fail(
+ "The expected contents:\n%s\nwere not in the "
+ "actual rendered contents:\n%s"
+ % (contains, self.renderer.rendered_contents)
+ )
def assert_contains_with_count(self, contains, count):
r_count = self.renderer.rendered_contents.count(contains)
if r_count != count:
- self.fail("The expected contents:\n%s\n, with the "
- "count:\n%d\nwere not in the actual rendered "
- " contents:\n%s\nwith count:\n%d" % (
- contains, count, self.renderer.rendered_contents, r_count))
+ self.fail(
+ "The expected contents:\n%s\n, with the "
+ "count:\n%d\nwere not in the actual rendered "
+ " contents:\n%s\nwith count:\n%d"
+ % (contains, count, self.renderer.rendered_contents, r_count)
+ )
def assert_not_contains(self, contents):
if contents in self.renderer.rendered_contents:
- self.fail("The contents:\n%s\nwere not suppose to be in the "
- "actual rendered contents:\n%s" % (
- contents, self.renderer.rendered_contents))
+ self.fail(
+ "The contents:\n%s\nwere not suppose to be in the "
+ "actual rendered contents:\n%s"
+ % (contents, self.renderer.rendered_contents)
+ )
def assert_text_order(self, *args, **kwargs):
# First we need to find where the SYNOPSIS section starts.
@@ -309,11 +318,15 @@ def assert_text_order(self, *args, **kwargs):
previous = arg_indices[0]
for i, index in enumerate(arg_indices[1:], 1):
if index == -1:
- self.fail('The string %r was not found in the contents: %s'
- % (args[index], contents))
+ self.fail(
+ 'The string %r was not found in the contents: %s'
+ % (args[index], contents)
+ )
if index < previous:
- self.fail('The string %r came before %r, but was suppose to come '
- 'after it.\n%s' % (args[i], args[i - 1], contents))
+ self.fail(
+ 'The string %r came before %r, but was suppose to come '
+ 'after it.\n%s' % (args[i], args[i - 1], contents)
+ )
previous = index
@@ -388,7 +401,9 @@ def setUp(self):
self.http_response = AWSResponse(None, 200, {}, None)
self.error_http_response = AWSResponse(None, 400, {}, None)
self.parsed_response = {}
- self.make_request_patch = mock.patch('botocore.endpoint.Endpoint.make_request')
+ self.make_request_patch = mock.patch(
+ 'botocore.endpoint.Endpoint.make_request'
+ )
self.make_request_is_patched = False
self.operations_called = []
self.parsed_responses = None
@@ -424,7 +439,10 @@ def patch_make_request(self):
if self.parsed_responses is not None:
make_request_patch.side_effect = self._request_patch_side_effect
else:
- make_request_patch.return_value = (self.http_response, self.parsed_response)
+ make_request_patch.return_value = (
+ self.http_response,
+ self.parsed_response,
+ )
self.make_request_is_patched = True
def _request_patch_side_effect(self, *args, **kwargs):
@@ -436,8 +454,14 @@ def _request_patch_side_effect(self, *args, **kwargs):
http_response = self.error_http_response
return http_response, parsed_response
- def assert_params_for_cmd(self, cmd, params=None, expected_rc=0,
- stderr_contains=None, ignore_params=None):
+ def assert_params_for_cmd(
+ self,
+ cmd,
+ params=None,
+ expected_rc=0,
+ stderr_contains=None,
+ ignore_params=None,
+ ):
stdout, stderr, rc = self.run_cmd(cmd, expected_rc)
if stderr_contains is not None:
self.assertIn(stderr_contains, stderr)
@@ -451,11 +475,12 @@ def assert_params_for_cmd(self, cmd, params=None, expected_rc=0,
except KeyError:
pass
if params != last_kwargs:
- self.fail("Actual params did not match expected params.\n"
- "Expected:\n\n"
- "%s\n"
- "Actual:\n\n%s\n" % (
- pformat(params), pformat(last_kwargs)))
+ self.fail(
+ "Actual params did not match expected params.\n"
+ "Expected:\n\n"
+ "%s\n"
+ "Actual:\n\n%s\n" % (pformat(params), pformat(last_kwargs))
+ )
return stdout, stderr, rc
def before_parameter_build(self, params, model, **kwargs):
@@ -468,7 +493,8 @@ def run_cmd(self, cmd, expected_rc=0):
event_emitter = self.driver.session.get_component('event_emitter')
event_emitter.register('before-call', self.before_call)
event_emitter.register_first(
- 'before-parameter-build.*.*', self.before_parameter_build)
+ 'before-parameter-build.*.*', self.before_parameter_build
+ )
if not isinstance(cmd, list):
cmdlist = cmd.split()
else:
@@ -478,10 +504,11 @@ def run_cmd(self, cmd, expected_rc=0):
stderr = captured.stderr.getvalue()
stdout = captured.stdout.getvalue()
self.assertEqual(
- rc, expected_rc,
+ rc,
+ expected_rc,
"Unexpected rc (expected: %s, actual: %s) for command: %s\n"
- "stdout:\n%sstderr:\n%s" % (
- expected_rc, rc, cmd, stdout, stderr))
+ "stdout:\n%sstderr:\n%s" % (expected_rc, rc, cmd, stdout, stderr),
+ )
return stdout, stderr, rc
@@ -492,7 +519,7 @@ def setUp(self):
'AWS_DEFAULT_REGION': 'us-east-1',
'AWS_ACCESS_KEY_ID': 'access_key',
'AWS_SECRET_ACCESS_KEY': 'secret_key',
- 'AWS_CONFIG_FILE': ''
+ 'AWS_CONFIG_FILE': '',
}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
@@ -502,7 +529,6 @@ def setUp(self):
self.driver = create_clidriver()
self.entry_point = awscli.clidriver.AWSCLIEntryPoint(self.driver)
-
def tearDown(self):
self.environ_patch.stop()
if self.send_is_patched:
@@ -514,9 +540,9 @@ def patch_send(self, status_code=200, headers={}, content=b''):
self.send_patch.stop()
self.send_is_patched = False
send_patch = self.send_patch.start()
- send_patch.return_value = mock.Mock(status_code=status_code,
- headers=headers,
- content=content)
+ send_patch.return_value = mock.Mock(
+ status_code=status_code, headers=headers, content=content
+ )
self.send_is_patched = True
def run_cmd(self, cmd, expected_rc=0):
@@ -532,10 +558,11 @@ def run_cmd(self, cmd, expected_rc=0):
stderr = captured.stderr.getvalue()
stdout = captured.stdout.getvalue()
self.assertEqual(
- rc, expected_rc,
+ rc,
+ expected_rc,
"Unexpected rc (expected: %s, actual: %s) for command: %s\n"
- "stdout:\n%sstderr:\n%s" % (
- expected_rc, rc, cmd, stdout, stderr))
+ "stdout:\n%sstderr:\n%s" % (expected_rc, rc, cmd, stdout, stderr),
+ )
return stdout, stderr, rc
@@ -547,8 +574,9 @@ def remove_all(self):
if os.path.exists(self.rootdir):
shutil.rmtree(self.rootdir)
- def create_file(self, filename, contents, mtime=None, mode='w',
- encoding=None):
+ def create_file(
+ self, filename, contents, mtime=None, mode='w', encoding=None
+ ):
"""Creates a file in a tmpdir
``filename`` should be a relative path, e.g. "foo/bar/baz.txt"
@@ -638,8 +666,14 @@ def _escape_quotes(command):
return command
-def aws(command, collect_memory=False, env_vars=None,
- wait_for_finish=True, input_data=None, input_file=None):
+def aws(
+ command,
+ collect_memory=False,
+ env_vars=None,
+ wait_for_finish=True,
+ input_data=None,
+ input_file=None,
+):
"""Run an aws command.
This help function abstracts the differences of running the "aws"
@@ -687,8 +721,14 @@ def aws(command, collect_memory=False, env_vars=None,
env = env_vars
if input_file is None:
input_file = PIPE
- process = Popen(full_command, stdout=PIPE, stderr=PIPE, stdin=input_file,
- shell=True, env=env)
+ process = Popen(
+ full_command,
+ stdout=PIPE,
+ stderr=PIPE,
+ stdin=input_file,
+ shell=True,
+ env=env,
+ )
if not wait_for_finish:
return process
memory = None
@@ -699,10 +739,12 @@ def aws(command, collect_memory=False, env_vars=None,
stdout, stderr = process.communicate(**kwargs)
else:
stdout, stderr, memory = _wait_and_collect_mem(process)
- return Result(process.returncode,
- stdout.decode(stdout_encoding),
- stderr.decode(stdout_encoding),
- memory)
+ return Result(
+ process.returncode,
+ stdout.decode(stdout_encoding),
+ stderr.decode(stdout_encoding),
+ memory,
+ )
def get_stdout_encoding():
@@ -720,8 +762,9 @@ def _wait_and_collect_mem(process):
get_memory = _get_memory_with_ps
else:
raise ValueError(
- "Can't collect memory for process on platform %s." %
- platform.system())
+ "Can't collect memory for process on platform %s."
+ % platform.system()
+ )
memory = []
while process.poll() is None:
try:
@@ -758,6 +801,7 @@ class BaseS3CLICommand(unittest.TestCase):
and more streamlined.
"""
+
_PUT_HEAD_SHARED_EXTRAS = [
'SSECustomerAlgorithm',
'SSECustomerKey',
@@ -803,8 +847,10 @@ def assert_key_contents_equal(self, bucket, key, expected_contents):
# without necessarily printing the actual contents.
self.assertEqual(len(actual_contents), len(expected_contents))
if actual_contents != expected_contents:
- self.fail("Contents for %s/%s do not match (but they "
- "have the same length)" % (bucket, key))
+ self.fail(
+ "Contents for %s/%s do not match (but they "
+ "have the same length)" % (bucket, key)
+ )
def delete_public_access_block(self, bucket_name):
client = self.create_client_for_bucket(bucket_name)
@@ -825,10 +871,7 @@ def create_bucket(self, name=None, region=None):
def put_object(self, bucket_name, key_name, contents='', extra_args=None):
client = self.create_client_for_bucket(bucket_name)
- call_args = {
- 'Bucket': bucket_name,
- 'Key': key_name, 'Body': contents
- }
+ call_args = {'Bucket': bucket_name, 'Key': key_name, 'Body': contents}
if extra_args is not None:
call_args.update(extra_args)
response = client.put_object(**call_args)
@@ -836,7 +879,8 @@ def put_object(self, bucket_name, key_name, contents='', extra_args=None):
extra_head_params = {}
if extra_args:
extra_head_params = dict(
- (k, v) for (k, v) in extra_args.items()
+ (k, v)
+ for (k, v) in extra_args.items()
if k in self._PUT_HEAD_SHARED_EXTRAS
)
self.wait_until_key_exists(
@@ -893,7 +937,8 @@ def wait_bucket_exists(self, bucket_name, min_successes=3):
client = self.create_client_for_bucket(bucket_name)
waiter = client.get_waiter('bucket_exists')
consistency_waiter = ConsistencyWaiter(
- min_successes=min_successes, delay_initial_poll=True)
+ min_successes=min_successes, delay_initial_poll=True
+ )
consistency_waiter.wait(
lambda: waiter.wait(Bucket=bucket_name) is None
)
@@ -911,7 +956,8 @@ def bucket_not_exists(self, bucket_name):
def key_exists(self, bucket_name, key_name, min_successes=3):
try:
self.wait_until_key_exists(
- bucket_name, key_name, min_successes=min_successes)
+ bucket_name, key_name, min_successes=min_successes
+ )
return True
except (ClientError, WaiterError):
return False
@@ -919,7 +965,8 @@ def key_exists(self, bucket_name, key_name, min_successes=3):
def key_not_exists(self, bucket_name, key_name, min_successes=3):
try:
self.wait_until_key_not_exists(
- bucket_name, key_name, min_successes=min_successes)
+ bucket_name, key_name, min_successes=min_successes
+ )
return True
except (ClientError, WaiterError):
return False
@@ -937,18 +984,28 @@ def head_object(self, bucket_name, key_name):
response = client.head_object(Bucket=bucket_name, Key=key_name)
return response
- def wait_until_key_exists(self, bucket_name, key_name, extra_params=None,
- min_successes=3):
- self._wait_for_key(bucket_name, key_name, extra_params,
- min_successes, exists=True)
+ def wait_until_key_exists(
+ self, bucket_name, key_name, extra_params=None, min_successes=3
+ ):
+ self._wait_for_key(
+ bucket_name, key_name, extra_params, min_successes, exists=True
+ )
- def wait_until_key_not_exists(self, bucket_name, key_name, extra_params=None,
- min_successes=3):
- self._wait_for_key(bucket_name, key_name, extra_params,
- min_successes, exists=False)
+ def wait_until_key_not_exists(
+ self, bucket_name, key_name, extra_params=None, min_successes=3
+ ):
+ self._wait_for_key(
+ bucket_name, key_name, extra_params, min_successes, exists=False
+ )
- def _wait_for_key(self, bucket_name, key_name, extra_params=None,
- min_successes=3, exists=True):
+ def _wait_for_key(
+ self,
+ bucket_name,
+ key_name,
+ extra_params=None,
+ min_successes=3,
+ exists=True,
+ ):
client = self.create_client_for_bucket(bucket_name)
if exists:
waiter = client.get_waiter('object_exists')
@@ -962,8 +1019,10 @@ def _wait_for_key(self, bucket_name, key_name, extra_params=None,
def assert_no_errors(self, p):
self.assertEqual(
- p.rc, 0,
- "Non zero rc (%s) received: %s" % (p.rc, p.stdout + p.stderr))
+ p.rc,
+ 0,
+ "Non zero rc (%s) received: %s" % (p.rc, p.stdout + p.stderr),
+ )
self.assertNotIn("Error:", p.stderr)
self.assertNotIn("failed:", p.stderr)
self.assertNotIn("client error", p.stderr)
@@ -1010,8 +1069,14 @@ class ConsistencyWaiter(object):
:param delay: The number of seconds to delay the next API call after a
failed check call. Default of 5 seconds.
"""
- def __init__(self, min_successes=1, max_attempts=20, delay=5,
- delay_initial_poll=False):
+
+ def __init__(
+ self,
+ min_successes=1,
+ max_attempts=20,
+ delay=5,
+ delay_initial_poll=False,
+ ):
self.min_successes = min_successes
self.max_attempts = max_attempts
self.delay = delay
diff --git a/awscli/text.py b/awscli/text.py
index a5bd0090829e..0ce5af91b4a4 100644
--- a/awscli/text.py
+++ b/awscli/text.py
@@ -34,15 +34,18 @@ def _format_list(item, identifier, stream):
if any(isinstance(el, dict) for el in item):
all_keys = _all_scalar_keys(item)
for element in item:
- _format_text(element, stream=stream, identifier=identifier,
- scalar_keys=all_keys)
+ _format_text(
+ element,
+ stream=stream,
+ identifier=identifier,
+ scalar_keys=all_keys,
+ )
elif any(isinstance(el, list) for el in item):
scalar_elements, non_scalars = _partition_list(item)
if scalar_elements:
_format_scalar_list(scalar_elements, identifier, stream)
for non_scalar in non_scalars:
- _format_text(non_scalar, stream=stream,
- identifier=identifier)
+ _format_text(non_scalar, stream=stream, identifier=identifier)
else:
_format_scalar_list(item, identifier, stream)
@@ -61,8 +64,7 @@ def _partition_list(item):
def _format_scalar_list(elements, identifier, stream):
if identifier is not None:
for item in elements:
- stream.write('%s\t%s\n' % (identifier.upper(),
- item))
+ stream.write('%s\t%s\n' % (identifier.upper(), item))
else:
# For a bare list, just print the contents.
stream.write('\t'.join([str(item) for item in elements]))
@@ -77,8 +79,7 @@ def _format_dict(scalar_keys, item, identifier, stream):
stream.write('\t'.join(scalars))
stream.write('\n')
for new_identifier, non_scalar in non_scalars:
- _format_text(item=non_scalar, stream=stream,
- identifier=new_identifier)
+ _format_text(item=non_scalar, stream=stream, identifier=new_identifier)
def _all_scalar_keys(list_of_dicts):
diff --git a/awscli/topictags.py b/awscli/topictags.py
index 93d281b8add2..6372f476c689 100644
--- a/awscli/topictags.py
+++ b/awscli/topictags.py
@@ -19,8 +19,9 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-import os
import json
+import os
+
import docutils.core
@@ -67,19 +68,25 @@ class TopicTagDB(object):
that all tag values for a specific tag of a specific topic are unique.
"""
- VALID_TAGS = ['category', 'description', 'title', 'related topic',
- 'related command']
+ VALID_TAGS = [
+ 'category',
+ 'description',
+ 'title',
+ 'related topic',
+ 'related command',
+ ]
# The default directory to look for topics.
TOPIC_DIR = os.path.join(
- os.path.dirname(
- os.path.abspath(__file__)), 'topics')
+ os.path.dirname(os.path.abspath(__file__)), 'topics'
+ )
# The default JSON index to load.
JSON_INDEX = os.path.join(TOPIC_DIR, 'topic-tags.json')
- def __init__(self, tag_dictionary=None, index_file=JSON_INDEX,
- topic_dir=TOPIC_DIR):
+ def __init__(
+ self, tag_dictionary=None, index_file=JSON_INDEX, topic_dir=TOPIC_DIR
+ ):
"""
:param index_file: The path to a specific JSON index to load.
If nothing is specified it will default to the default JSON
@@ -164,7 +171,8 @@ def scan(self, topic_files):
topic_content = f.read()
# Record the tags and the values
self._add_tag_and_values_from_content(
- topic_name, topic_content)
+ topic_name, topic_content
+ )
def _find_topic_name(self, topic_src_file):
# Get the name of each of these files
@@ -259,9 +267,9 @@ def query(self, tag, values=None):
# no value constraints are provided or if the tag value
# falls in the allowed tag values.
if values is None or tag_value in values:
- self._add_key_values(query_dict,
- key=tag_value,
- values=[topic_name])
+ self._add_key_values(
+ query_dict, key=tag_value, values=[topic_name]
+ )
return query_dict
def get_tag_value(self, topic_name, tag, default_value=None):
diff --git a/awscli/utils.py b/awscli/utils.py
index c8424bba997b..98d25a123a9f 100644
--- a/awscli/utils.py
+++ b/awscli/utils.py
@@ -10,24 +10,28 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import contextlib
import csv
-import signal
import datetime
-import contextlib
+import logging
import os
import re
+import signal
import sys
-from subprocess import Popen, PIPE
-import logging
+from subprocess import PIPE, Popen
-from awscli.compat import get_stdout_text_writer
-from awscli.compat import get_popen_kwargs_for_pager_cmd
-from awscli.compat import StringIO
-from botocore.useragent import UserAgentComponent
-from botocore.utils import resolve_imds_endpoint_mode
-from botocore.utils import IMDSFetcher
-from botocore.utils import BadIMDSRequestError
+from awscli.compat import (
+ StringIO,
+ get_popen_kwargs_for_pager_cmd,
+ get_stdout_text_writer,
+)
from botocore.configprovider import BaseProvider
+from botocore.useragent import UserAgentComponent
+from botocore.utils import (
+ BadIMDSRequestError,
+ IMDSFetcher,
+ resolve_imds_endpoint_mode,
+)
logger = logging.getLogger(__name__)
@@ -128,12 +132,15 @@ def _get_fetcher(self):
def _create_fetcher(self):
metadata_timeout = self._session.get_config_variable(
- 'metadata_service_timeout')
+ 'metadata_service_timeout'
+ )
metadata_num_attempts = self._session.get_config_variable(
- 'metadata_service_num_attempts')
+ 'metadata_service_num_attempts'
+ )
imds_config = {
'ec2_metadata_service_endpoint': self._session.get_config_variable(
- 'ec2_metadata_service_endpoint'),
+ 'ec2_metadata_service_endpoint'
+ ),
'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode(
self._session
),
@@ -175,13 +182,14 @@ def retrieve_region(self):
logger.debug(
"Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
- self._num_attempts
+ self._num_attempts,
)
except BadIMDSRequestError as e:
logger.debug(
"Failed to retrieve a region from IMDS. "
"Region detection may not be supported from this endpoint: "
- "%s", e.request.url
+ "%s",
+ e.request.url,
)
return None
@@ -190,7 +198,7 @@ def _get_region(self):
response = self._get_request(
url_path=self._URL_PATH,
retry_func=self._default_retry,
- token=token
+ token=token,
)
availability_zone = response.text
region = availability_zone[:-1]
@@ -229,16 +237,19 @@ def _split_with_quotes(value):
# Find an opening list bracket
list_start = part.find('=[')
- if list_start >= 0 and value.find(']') != -1 and \
- (quote_char is None or part.find(quote_char) > list_start):
+ if (
+ list_start >= 0
+ and value.find(']') != -1
+ and (quote_char is None or part.find(quote_char) > list_start)
+ ):
# This is a list, eat all the items until the end
if ']' in part:
# Short circuit for only one item
new_chunk = part
else:
new_chunk = _eat_items(value, iter_parts, part, ']')
- list_items = _split_with_quotes(new_chunk[list_start + 2:-1])
- new_chunk = new_chunk[:list_start + 1] + ','.join(list_items)
+ list_items = _split_with_quotes(new_chunk[list_start + 2 : -1])
+ new_chunk = new_chunk[: list_start + 1] + ','.join(list_items)
new_parts.append(new_chunk)
continue
elif quote_char is None:
@@ -334,8 +345,11 @@ def is_document_type_container(shape):
def is_streaming_blob_type(shape):
"""Check if the shape is a streaming blob type."""
- return (shape and shape.type_name == 'blob' and
- shape.serialization.get('streaming', False))
+ return (
+ shape
+ and shape.type_name == 'blob'
+ and shape.serialization.get('streaming', False)
+ )
def is_tagged_union_type(shape):
@@ -373,8 +387,7 @@ def ignore_ctrl_c():
def emit_top_level_args_parsed_event(session, args):
- session.emit(
- 'top-level-args-parsed', parsed_args=args, session=session)
+ session.emit('top-level-args-parsed', parsed_args=args, session=session)
def is_a_tty():
@@ -392,8 +405,9 @@ def is_stdin_a_tty():
class OutputStreamFactory(object):
- def __init__(self, session, popen=None, environ=None,
- default_less_flags='FRX'):
+ def __init__(
+ self, session, popen=None, environ=None, default_less_flags='FRX'
+ ):
self._session = session
self._popen = popen
if popen is None:
@@ -537,12 +551,14 @@ def _do_shape_visit(self, shape, visitor):
class BaseShapeVisitor(object):
"""Visit shape encountered by ShapeWalker"""
+
def visit_shape(self, shape):
pass
class ShapeRecordingVisitor(BaseShapeVisitor):
"""Record shapes visited by ShapeWalker"""
+
def __init__(self):
self.visited = []
@@ -558,12 +574,13 @@ def add_component_to_user_agent_extra(session, component):
def add_metadata_component_to_user_agent_extra(session, name, value=None):
add_component_to_user_agent_extra(
- session,
- UserAgentComponent("md", name, value)
+ session, UserAgentComponent("md", name, value)
)
def add_command_lineage_to_user_agent_extra(session, lineage):
# Only add a command lineage if one is not already present in the user agent extra.
if not re.search(r'md\/command#[\w\.]*', session.user_agent_extra):
- add_metadata_component_to_user_agent_extra(session, "command", ".".join(lineage))
+ add_metadata_component_to_user_agent_extra(
+ session, "command", ".".join(lineage)
+ )
diff --git a/backends/build_system/__main__.py b/backends/build_system/__main__.py
index 2e8f8ceddbb9..bb3285d3fe59 100644
--- a/backends/build_system/__main__.py
+++ b/backends/build_system/__main__.py
@@ -13,19 +13,14 @@
import argparse
import os
import shutil
+
from awscli_venv import AwsCliVenv
-from constants import (
- ArtifactType,
- BUILD_DIR,
- INSTALL_DIRNAME,
-)
from exe import ExeBuilder
-from install import (
- Installer,
- Uninstaller,
-)
+from install import Installer, Uninstaller
from validate_env import validate_env
+from constants import BUILD_DIR, INSTALL_DIRNAME, ArtifactType
+
def create_exe(aws_venv, build_dir):
exe_workspace = os.path.join(build_dir, "exe")
diff --git a/backends/build_system/awscli_venv.py b/backends/build_system/awscli_venv.py
index 7997043153d6..be995bf8116e 100644
--- a/backends/build_system/awscli_venv.py
+++ b/backends/build_system/awscli_venv.py
@@ -10,24 +10,24 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import os
import json
-import subprocess
+import os
+import pathlib
import site
+import subprocess
import sys
-import pathlib
from constants import (
- ArtifactType,
- DOWNLOAD_DEPS_BOOTSTRAP_LOCK,
- PORTABLE_EXE_REQUIREMENTS_LOCK,
- SYSTEM_SANDBOX_REQUIREMENTS_LOCK,
- ROOT_DIR,
- IS_WINDOWS,
BIN_DIRNAME,
- PYTHON_EXE_NAME,
CLI_SCRIPTS,
DISTRIBUTION_SOURCE_SANDBOX,
+ DOWNLOAD_DEPS_BOOTSTRAP_LOCK,
+ IS_WINDOWS,
+ PORTABLE_EXE_REQUIREMENTS_LOCK,
+ PYTHON_EXE_NAME,
+ ROOT_DIR,
+ SYSTEM_SANDBOX_REQUIREMENTS_LOCK,
+ ArtifactType,
)
from utils import Utils
@@ -138,15 +138,19 @@ def _site_packages(self) -> str:
# On windows the getsitepackages can return the root venv dir.
# So instead of just taking the first entry, we need to take the
# first entry that contains the string "site-packages" in the path.
- site_path = [path for path in json.loads(
- subprocess.check_output(
- [
- self.python_exe,
- "-c",
- "import site, json; print(json.dumps(site.getsitepackages()))",
- ]
+ site_path = [
+ path
+ for path in json.loads(
+ subprocess.check_output(
+ [
+ self.python_exe,
+ "-c",
+ "import site, json; print(json.dumps(site.getsitepackages()))",
+ ]
+ )
+ .decode()
+ .strip()
)
- .decode()
- .strip()
- ) if "site-packages" in path][0]
+ if "site-packages" in path
+ ][0]
return site_path
diff --git a/backends/build_system/constants.py b/backends/build_system/constants.py
index 7fb8bb35cc99..382027450073 100644
--- a/backends/build_system/constants.py
+++ b/backends/build_system/constants.py
@@ -14,7 +14,6 @@
from enum import Enum
from pathlib import Path
-
ROOT_DIR = Path(__file__).parents[2]
BUILD_DIR = ROOT_DIR / "build"
@@ -34,10 +33,16 @@
REQUIREMENTS_DIR = ROOT_DIR / "requirements"
BOOTSTRAP_REQUIREMENTS = REQUIREMENTS_DIR / "bootstrap.txt"
DOWNLOAD_DEPS_BOOTSTRAP = REQUIREMENTS_DIR / "download-deps" / "bootstrap.txt"
-DOWNLOAD_DEPS_BOOTSTRAP_LOCK = REQUIREMENTS_DIR / "download-deps" / f"bootstrap-{LOCK_SUFFIX}"
+DOWNLOAD_DEPS_BOOTSTRAP_LOCK = (
+ REQUIREMENTS_DIR / "download-deps" / f"bootstrap-{LOCK_SUFFIX}"
+)
PORTABLE_EXE_REQUIREMENTS = REQUIREMENTS_DIR / "portable-exe-extras.txt"
-PORTABLE_EXE_REQUIREMENTS_LOCK = REQUIREMENTS_DIR / "download-deps" / f"portable-exe-{LOCK_SUFFIX}"
-SYSTEM_SANDBOX_REQUIREMENTS_LOCK = REQUIREMENTS_DIR / "download-deps" / f"system-sandbox-{LOCK_SUFFIX}"
+PORTABLE_EXE_REQUIREMENTS_LOCK = (
+ REQUIREMENTS_DIR / "download-deps" / f"portable-exe-{LOCK_SUFFIX}"
+)
+SYSTEM_SANDBOX_REQUIREMENTS_LOCK = (
+ REQUIREMENTS_DIR / "download-deps" / f"system-sandbox-{LOCK_SUFFIX}"
+)
# Auto-complete index
AC_INDEX = ROOT_DIR / "awscli" / "data" / "ac.index"
diff --git a/backends/build_system/exe.py b/backends/build_system/exe.py
index 60a3d9253789..60bdb4348873 100644
--- a/backends/build_system/exe.py
+++ b/backends/build_system/exe.py
@@ -13,10 +13,16 @@
import os
from dataclasses import dataclass, field
-from constants import EXE_ASSETS_DIR, PYINSTALLER_DIR, DISTRIBUTION_SOURCE_EXE, PYINSTALLER_EXE_NAME
-from utils import Utils
from awscli_venv import AwsCliVenv
+from constants import (
+ DISTRIBUTION_SOURCE_EXE,
+ EXE_ASSETS_DIR,
+ PYINSTALLER_DIR,
+ PYINSTALLER_EXE_NAME,
+)
+from utils import Utils
+
@dataclass
class ExeBuilder:
@@ -52,12 +58,10 @@ def _update_metadata(self):
distribution_source=DISTRIBUTION_SOURCE_EXE,
)
for distinfo in self._utils.glob(
- '**/*.dist-info',
- root=self._final_dist_dir
+ '**/*.dist-info', root=self._final_dist_dir
):
self._utils.rmtree(os.path.join(self._final_dist_dir, distinfo))
-
def _ensure_no_existing_build_dir(self):
if self._utils.isdir(self._dist_dir):
self._utils.rmtree(self._dist_dir)
diff --git a/backends/build_system/install.py b/backends/build_system/install.py
index 189455d30145..ed230e2ca5f0 100644
--- a/backends/build_system/install.py
+++ b/backends/build_system/install.py
@@ -10,21 +10,23 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import os
import functools
+import os
-from constants import CLI_SCRIPTS
-from constants import IS_WINDOWS
-from constants import BIN_DIRNAME
-from constants import PYTHON_EXE_NAME
-from constants import ArtifactType
+from constants import (
+ BIN_DIRNAME,
+ CLI_SCRIPTS,
+ IS_WINDOWS,
+ PYTHON_EXE_NAME,
+ ArtifactType,
+)
from utils import Utils
-
WINDOWS_CMD_TEMPLATE = """@echo off
{path} %*
"""
+
class Uninstaller:
def __init__(self, utils: Utils = None):
if utils is None:
@@ -36,7 +38,9 @@ def uninstall(self, install_dir: str, bin_dir: str):
self._utils.rmtree(install_dir)
for exe in CLI_SCRIPTS:
exe_path = os.path.join(bin_dir, exe)
- if self._utils.islink(exe_path) or self._utils.path_exists(exe_path):
+ if self._utils.islink(exe_path) or self._utils.path_exists(
+ exe_path
+ ):
self._utils.remove(exe_path)
@@ -78,7 +82,9 @@ def _install_executables(self, install_dir, bin_dir):
def _install_executables_on_windows(self, install_dir, bin_dir):
filepath = os.path.join(bin_dir, "aws.cmd")
- content = WINDOWS_CMD_TEMPLATE.format(path=os.path.join(install_dir, "aws.exe"))
+ content = WINDOWS_CMD_TEMPLATE.format(
+ path=os.path.join(install_dir, "aws.exe")
+ )
self._utils.write_file(filepath, content)
def _symlink_executables(self, install_dir, bin_dir):
diff --git a/backends/build_system/utils.py b/backends/build_system/utils.py
index 230307857988..b41846050add 100644
--- a/backends/build_system/utils.py
+++ b/backends/build_system/utils.py
@@ -10,22 +10,19 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import contextlib
+import glob
+import json
import os
import re
-import sys
import shlex
-import glob
-import json
import shutil
import subprocess
+import sys
import venv
-import contextlib
-from typing import List, Dict, Any, Optional, Callable
-
-from constants import ROOT_DIR
-from constants import IS_WINDOWS
-from constants import BOOTSTRAP_REQUIREMENTS
+from typing import Any, Callable, Dict, List, Optional
+from constants import BOOTSTRAP_REQUIREMENTS, IS_WINDOWS, ROOT_DIR
PACKAGE_NAME = re.compile(r"(?P[A-Za-z][A-Za-z0-9_\.\-]+)(?P.+)")
CONSTRAINT = re.compile(r"(?P[=\<\>]+)(?P.+)")
@@ -47,7 +44,9 @@ def __init__(self, unmet_deps, in_venv, reason=None):
f"{package} (required: {required.constraints}) "
f"(version installed: {actual_version})\n"
)
- pip_install_command_args.append(f'{package}{required.string_constraints()}')
+ pip_install_command_args.append(
+ f'{package}{required.string_constraints()}'
+ )
if reason:
msg += f"\n{reason}\n"
@@ -100,7 +99,9 @@ def _meets_constraint(self, version, constraint) -> bool:
if not match:
raise RuntimeError(f"Unknown version specifier {constraint}")
comparison, constraint_version = match.group('comparison', 'version')
- version, constraint_version = self._normalize(version, constraint_version)
+ version, constraint_version = self._normalize(
+ version, constraint_version
+ )
compare_fn = COMPARISONS.get(comparison)
if not compare_fn:
@@ -120,7 +121,9 @@ def _normalize(self, v1: str, v2: str):
def __eq__(self, other):
if other is None:
return False
- return (self.name == other.name and self.constraints == other.constraints)
+ return (
+ self.name == other.name and self.constraints == other.constraints
+ )
def string_constraints(self):
return ','.join(self.constraints)
@@ -138,7 +141,7 @@ def parse_requirements(lines_list):
if line.startswith('#'):
continue
if ' #' in line:
- line = line[:line.find(' #')]
+ line = line[: line.find(' #')]
if line.endswith('\\'):
line = line[:-2].strip()
try:
@@ -184,17 +187,14 @@ def get_install_requires():
def get_flit_core_unmet_exception():
in_venv = sys.prefix != sys.base_prefix
with open(BOOTSTRAP_REQUIREMENTS, 'r') as f:
- flit_core_req = [
- l for l in f.read().split('\n')
- if 'flit_core' in l
- ]
+ flit_core_req = [l for l in f.read().split('\n') if 'flit_core' in l]
return UnmetDependenciesException(
[('flit_core', None, list(parse_requirements(flit_core_req))[0])],
in_venv,
reason=(
'flit_core is needed ahead of time in order to parse the '
'rest of the requirements.'
- )
+ ),
)
@@ -248,7 +248,9 @@ def copy_directory(self, src: str, dst: str):
def update_metadata(self, dirname, **kwargs):
print("Update metadata values %s" % kwargs)
- metadata_file = os.path.join(dirname, "awscli", "data", "metadata.json")
+ metadata_file = os.path.join(
+ dirname, "awscli", "data", "metadata.json"
+ )
with open(metadata_file) as f:
metadata = json.load(f)
for key, value in kwargs.items():
@@ -261,5 +263,7 @@ def create_venv(self, name: str, with_pip: bool = True):
def get_script_header(self, python_exe_path: str) -> str:
if IS_WINDOWS:
- return f'@echo off & "{python_exe_path}" -x "%~f0" %* & goto :eof\n'
+ return (
+ f'@echo off & "{python_exe_path}" -x "%~f0" %* & goto :eof\n'
+ )
return f"#!{python_exe_path}\n"
diff --git a/backends/build_system/validate_env.py b/backends/build_system/validate_env.py
index 6a7b4110dc77..98e0883d7073 100644
--- a/backends/build_system/validate_env.py
+++ b/backends/build_system/validate_env.py
@@ -10,24 +10,21 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import importlib.metadata
import re
import sys
from pathlib import Path
-import importlib.metadata
-from constants import (
- BOOTSTRAP_REQUIREMENTS,
- PORTABLE_EXE_REQUIREMENTS,
+from constants import BOOTSTRAP_REQUIREMENTS, PORTABLE_EXE_REQUIREMENTS
+from utils import (
+ UnmetDependenciesException,
+ get_install_requires,
+ parse_requirements,
)
-from utils import get_install_requires, parse_requirements
-from utils import UnmetDependenciesException
-
ROOT = Path(__file__).parents[2]
PYPROJECT = ROOT / "pyproject.toml"
-BUILD_REQS_RE = re.compile(
- r"requires = \[([\s\S]+?)\]\s", re.MULTILINE
-)
+BUILD_REQS_RE = re.compile(r"requires = \[([\s\S]+?)\]\s", re.MULTILINE)
EXTRACT_DEPENDENCIES_RE = re.compile(r'"(.+)"')
diff --git a/backends/pep517.py b/backends/pep517.py
index 4e4818007e84..b567673312d1 100644
--- a/backends/pep517.py
+++ b/backends/pep517.py
@@ -23,15 +23,16 @@
is that it builds the auto-complete index and injects it into the wheel
built by flit prior to returning.
"""
-import re
+
+import base64
import contextlib
+import glob
import hashlib
-import base64
import os
-import glob
-import tarfile
+import re
import shutil
import sys
+import tarfile
import zipfile
from pathlib import Path
@@ -150,6 +151,7 @@ def _should_copy(path):
return False
return True
+
def read_sdist_extras():
with open(ROOT_DIR / "pyproject.toml", "r") as f:
data = f.read()
diff --git a/bin/aws b/bin/aws
index b462ced92128..8d28af14f384 100755
--- a/bin/aws
+++ b/bin/aws
@@ -7,12 +7,13 @@
# http://aws.amazon.com/apache2.0/
+import os
+
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
-import os
if os.environ.get('LC_CTYPE', '') == 'UTF-8':
os.environ['LC_CTYPE'] = 'en_US.UTF-8'
diff --git a/bin/aws_completer b/bin/aws_completer
index a7f2b1e2af41..3c480dfdc544 100755
--- a/bin/aws_completer
+++ b/bin/aws_completer
@@ -12,6 +12,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
+
if os.environ.get('LC_CTYPE', '') == 'UTF-8':
os.environ['LC_CTYPE'] = 'en_US.UTF-8'
from awscli.autocomplete.main import autocomplete
diff --git a/doc/source/bootstrapdocs.py b/doc/source/bootstrapdocs.py
index 830071a3cf9e..39b50d8d863b 100644
--- a/doc/source/bootstrapdocs.py
+++ b/doc/source/bootstrapdocs.py
@@ -4,10 +4,9 @@
import sys
RST_GENERATION_SCRIPT = 'htmlgen'
-script_path = os.path.join(os.path.dirname(__file__),
- RST_GENERATION_SCRIPT)
+script_path = os.path.join(os.path.dirname(__file__), RST_GENERATION_SCRIPT)
os.environ['PATH'] += ':.'
-rc = subprocess.call("python "+ script_path, shell=True, env=os.environ)
+rc = subprocess.call("python " + script_path, shell=True, env=os.environ)
if rc != 0:
sys.stderr.write("Failed to generate documentation!\n")
sys.exit(2)
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 16be235bb810..4505f306d6b9 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,7 +11,8 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys, os
+import os
+import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -23,23 +24,24 @@
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['notfound.extension',]
+extensions = [
+ 'notfound.extension',
+]
notfound_context = {
'title': 'Page not found',
'body': 'Page not found
\n\n'
- 'Sorry, the page you requested could not be found.'
+ 'Sorry, the page you requested could not be found.',
}
notfound_pagename = '_404'
# notfound.extension changes all the relative links to links like
# "/en/latest/_static/**" and we use "notfound_default_language" key
# to change “en” to our path prefix
notfound_default_language = os.environ.get(
- 'DOCS_STATIC_PATH',
- 'v2/documentation/api'
+ 'DOCS_STATIC_PATH', 'v2/documentation/api'
)
# For local 404.html testing uncomment lines below and put in local path
@@ -55,14 +57,14 @@
source_suffix = '.rst'
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
-project = u'AWS CLI Command Reference'
-copyright = u'2018, Amazon Web Services'
+project = 'AWS CLI Command Reference'
+copyright = '2018, Amazon Web Services'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -75,45 +77,45 @@
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-#language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['examples']
# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'guzzle_sphinx_theme.GuzzleStyle'
-#pygments_style = 'sphinx'
+# pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-#html_theme = 'pyramid'
+# html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -123,23 +125,23 @@
# }
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = ['.']
+# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
html_title = "AWS CLI %s Command Reference" % release
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@@ -148,50 +150,52 @@
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
- '**': ['sidebarlogo.html',
- 'localtoc.html',
- 'searchbox.html',
- 'feedback.html',
- 'userguide.html']
+ '**': [
+ 'sidebarlogo.html',
+ 'localtoc.html',
+ 'searchbox.html',
+ 'feedback.html',
+ 'userguide.html',
+ ]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aws-clidoc'
@@ -219,46 +223,48 @@
}
-
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'aws-cli.tex', u'AWS CLI Documentation',
- u'Amazon Web Services', 'manual'),
+ (
+ 'index',
+ 'aws-cli.tex',
+ 'AWS CLI Documentation',
+ 'Amazon Web Services',
+ 'manual',
+ ),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
@@ -266,63 +272,122 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [('reference/index', 'aws', 'The main command', '', 1),
- ('reference/autoscaling/index', 'aws-autoscaling',
- 'The autoscaling service', '', 1),
- ('reference/cloudformation/index', 'aws-cloudformation',
- 'AWS CloudFormation', '', 1),
- ('reference/cloudwatch/index', 'aws-cloudwatch',
- 'Amazon CloudWatch', '', 1),
- ('reference/datapipeline/index', 'aws-datapipeline',
- 'AWS Data Pipeline', '', 1),
- ('reference/directconnect/index', 'aws-directconnect',
- 'AWS Direct Connect', '', 1),
- ('reference/dynamodb/index', 'aws-dynamodb',
- 'Amazon DynamoDB', '', 1),
- ('reference/ec2/index', 'aws-ec2',
- 'Amazon Elastic Compute Cloud', '', 1),
- ('reference/elasticache/index', 'aws-elasticache',
- 'Amazon ElastiCache', '', 1),
- ('reference/elasticbeanstalk/index', 'aws-elasticbeanstalk',
- 'AWS Elastic Beanstalk', '', 1),
- ('reference/elastictranscoder/index', 'aws-elastictranscoder',
- 'Amazon Elastic Transcoder', '', 1),
- ('reference/elb/index', 'aws-elb',
- 'Elastic Load Balancing', '', 1),
- ('reference/emr/index', 'aws-emr',
- 'Amazon Elastic MapReduce', '', 1),
- ('reference/iam/index', 'aws-iam',
- 'AWS Identity and Access Management', '', 1),
- ('reference/importexport/index', 'aws-importexport',
- 'AWS Import/Export', '', 1),
- ('reference/opsworks/index', 'aws-opsworks',
- 'AWS OpsWorks', '', 1),
- ('reference/rds/index', 'aws-rds',
- 'Amazon Relational Database Service', '', 1),
- ('reference/redshift/index', 'aws-redshift',
- 'Amazon Redshift', '', 1),
- ('reference/route53/index', 'aws-route53',
- 'Amazon Route 53', '', 1),
- ('reference/s3/index', 'aws-s3',
- 'Amazon Simple Storage Service', '', 1),
- ('reference/ses/index', 'aws-ses',
- 'Amazon Simple Email Service', '', 1),
- ('reference/sns/index', 'aws-sns',
- 'Amazon Simple Notification Service', '', 1),
- ('reference/sqs/index', 'aws-sqs',
- 'Amazon Simple Queue Service', '', 1),
- ('reference/storagegateway/index', 'aws-storagegateway',
- 'AWS Storage Gateway', '', 1),
- ('reference/sts/index', 'aws-sts',
- 'AWS Security Token Service', '', 1),
- ('reference/support/index', 'aws-support',
- 'AWS Support', '', 1),
- ('reference/swf/index', 'aws-swf',
- 'Amazon Simple Workflow Service', '', 1),
- ]
+man_pages = [
+ ('reference/index', 'aws', 'The main command', '', 1),
+ (
+ 'reference/autoscaling/index',
+ 'aws-autoscaling',
+ 'The autoscaling service',
+ '',
+ 1,
+ ),
+ (
+ 'reference/cloudformation/index',
+ 'aws-cloudformation',
+ 'AWS CloudFormation',
+ '',
+ 1,
+ ),
+ (
+ 'reference/cloudwatch/index',
+ 'aws-cloudwatch',
+ 'Amazon CloudWatch',
+ '',
+ 1,
+ ),
+ (
+ 'reference/datapipeline/index',
+ 'aws-datapipeline',
+ 'AWS Data Pipeline',
+ '',
+ 1,
+ ),
+ (
+ 'reference/directconnect/index',
+ 'aws-directconnect',
+ 'AWS Direct Connect',
+ '',
+ 1,
+ ),
+ ('reference/dynamodb/index', 'aws-dynamodb', 'Amazon DynamoDB', '', 1),
+ ('reference/ec2/index', 'aws-ec2', 'Amazon Elastic Compute Cloud', '', 1),
+ (
+ 'reference/elasticache/index',
+ 'aws-elasticache',
+ 'Amazon ElastiCache',
+ '',
+ 1,
+ ),
+ (
+ 'reference/elasticbeanstalk/index',
+ 'aws-elasticbeanstalk',
+ 'AWS Elastic Beanstalk',
+ '',
+ 1,
+ ),
+ (
+ 'reference/elastictranscoder/index',
+ 'aws-elastictranscoder',
+ 'Amazon Elastic Transcoder',
+ '',
+ 1,
+ ),
+ ('reference/elb/index', 'aws-elb', 'Elastic Load Balancing', '', 1),
+ ('reference/emr/index', 'aws-emr', 'Amazon Elastic MapReduce', '', 1),
+ (
+ 'reference/iam/index',
+ 'aws-iam',
+ 'AWS Identity and Access Management',
+ '',
+ 1,
+ ),
+ (
+ 'reference/importexport/index',
+ 'aws-importexport',
+ 'AWS Import/Export',
+ '',
+ 1,
+ ),
+ ('reference/opsworks/index', 'aws-opsworks', 'AWS OpsWorks', '', 1),
+ (
+ 'reference/rds/index',
+ 'aws-rds',
+ 'Amazon Relational Database Service',
+ '',
+ 1,
+ ),
+ ('reference/redshift/index', 'aws-redshift', 'Amazon Redshift', '', 1),
+ ('reference/route53/index', 'aws-route53', 'Amazon Route 53', '', 1),
+ ('reference/s3/index', 'aws-s3', 'Amazon Simple Storage Service', '', 1),
+ ('reference/ses/index', 'aws-ses', 'Amazon Simple Email Service', '', 1),
+ (
+ 'reference/sns/index',
+ 'aws-sns',
+ 'Amazon Simple Notification Service',
+ '',
+ 1,
+ ),
+ ('reference/sqs/index', 'aws-sqs', 'Amazon Simple Queue Service', '', 1),
+ (
+ 'reference/storagegateway/index',
+ 'aws-storagegateway',
+ 'AWS Storage Gateway',
+ '',
+ 1,
+ ),
+ ('reference/sts/index', 'aws-sts', 'AWS Security Token Service', '', 1),
+ ('reference/support/index', 'aws-support', 'AWS Support', '', 1),
+ (
+ 'reference/swf/index',
+ 'aws-swf',
+ 'Amazon Simple Workflow Service',
+ '',
+ 1,
+ ),
+]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@@ -334,10 +399,10 @@
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
diff --git a/doc/source/htmlgen b/doc/source/htmlgen
index e2f1a63bf6ef..80a96bc78dea 100755
--- a/doc/source/htmlgen
+++ b/doc/source/htmlgen
@@ -1,8 +1,9 @@
#!/usr/bin/env python
+import argparse
+import json
import os
import sys
-import json
-import argparse
+
import awscli.clidriver
from awscli.help import PagingHelpRenderer
@@ -12,7 +13,6 @@ TOPIC_PATH = 'topic'
class FileRenderer(PagingHelpRenderer):
-
def __init__(self, file_path):
self._file_path = file_path
@@ -23,8 +23,7 @@ class FileRenderer(PagingHelpRenderer):
def do_operation(driver, service_path, operation_name, operation_command):
- file_path = os.path.join(service_path,
- operation_name + '.rst')
+ file_path = os.path.join(service_path, operation_name + '.rst')
help_command = operation_command.create_help_command()
if help_command is None:
# Do not document anything that does not have a help command.
@@ -34,8 +33,9 @@ def do_operation(driver, service_path, operation_name, operation_command):
help_command(None, None)
-def do_service(driver, ref_path, service_name, service_command,
- is_top_level_service=True):
+def do_service(
+ driver, ref_path, service_name, service_command, is_top_level_service=True
+):
if is_top_level_service:
print('...%s' % service_name)
service_path = os.path.join(ref_path, service_name)
@@ -57,21 +57,24 @@ def do_service(driver, ref_path, service_name, service_command,
# If the operation command has a subcommand table with commands
# in it, treat it as a service command as opposed to an operation
# command.
- if (len(subcommand_table) > 0):
- do_service(driver, service_path, operation_name,
- operation_command, False)
+ if len(subcommand_table) > 0:
+ do_service(
+ driver, service_path, operation_name, operation_command, False
+ )
else:
- do_operation(driver, service_path, operation_name,
- operation_command)
+ do_operation(
+ driver, service_path, operation_name, operation_command
+ )
+
def do_topic(driver, topic_path, topic_help_command):
print('...%s' % topic_help_command.name)
- file_path = os.path.join(topic_path,
- topic_help_command.name + '.rst')
+ file_path = os.path.join(topic_path, topic_help_command.name + '.rst')
topic_help_command.doc.target = 'html'
topic_help_command.renderer = FileRenderer(file_path)
topic_help_command(None, None)
+
def do_provider(driver):
help_command = driver.create_help_command()
help_command.doc.target = 'html'
@@ -79,8 +82,9 @@ def do_provider(driver):
help_command(None, None)
topic_help_command = help_command.subcommand_table['topics']
- topic_help_command.renderer = FileRenderer(os.path.join(TOPIC_PATH,
- 'index.rst'))
+ topic_help_command.renderer = FileRenderer(
+ os.path.join(TOPIC_PATH, 'index.rst')
+ )
topic_help_command.doc.target = 'html'
help_command(['topics'], None)
topics = help_command.subcommand_table
@@ -118,20 +122,25 @@ def build_service_list(tut_path, ref_path, driver):
for full_name, service_name in l:
service_ref_path = os.path.join(ref_path, service_name)
service_ref_path = os.path.join(service_ref_path, 'index')
- fp.write('* :doc:`%s <..%s%s>`\n' % (full_name,
- os.path.sep,
- service_ref_path))
+ fp.write(
+ '* :doc:`%s <..%s%s>`\n'
+ % (full_name, os.path.sep, service_ref_path)
+ )
fp.write('\n')
fp.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('-s', '--service',
- help='Name of service, or else all services')
- parser.add_argument('-o', '--operations',
- help='Name of operations, or else all operations',
- nargs='*')
+ parser.add_argument(
+ '-s', '--service', help='Name of service, or else all services'
+ )
+ parser.add_argument(
+ '-o',
+ '--operations',
+ help='Name of operations, or else all operations',
+ nargs='*',
+ )
args = parser.parse_args()
driver = awscli.clidriver.create_clidriver()
if not os.path.isdir(REF_PATH):
diff --git a/exe/pyinstaller/hook-awscli.py b/exe/pyinstaller/hook-awscli.py
index 617b71d927ed..5e53e45b233a 100644
--- a/exe/pyinstaller/hook-awscli.py
+++ b/exe/pyinstaller/hook-awscli.py
@@ -1,6 +1,5 @@
from PyInstaller.utils import hooks
-
hiddenimports = [
'docutils',
'urllib',
@@ -14,19 +13,17 @@
# NOTE: This can be removed once this hidden import issue related to
# setuptools and PyInstaller is resolved:
# https://github.com/pypa/setuptools/issues/1963
- 'pkg_resources.py2_warn'
+ 'pkg_resources.py2_warn',
]
-imports_for_legacy_plugins = (
- hooks.collect_submodules('http') +
- hooks.collect_submodules('logging')
-)
+imports_for_legacy_plugins = hooks.collect_submodules(
+ 'http'
+) + hooks.collect_submodules('logging')
hiddenimports += imports_for_legacy_plugins
-alias_packages_plugins = (
- hooks.collect_submodules('awscli.botocore') +
- hooks.collect_submodules('awscli.s3transfer')
-)
+alias_packages_plugins = hooks.collect_submodules(
+ 'awscli.botocore'
+) + hooks.collect_submodules('awscli.s3transfer')
hiddenimports += alias_packages_plugins
datas = hooks.collect_data_files('awscli')
diff --git a/exe/tests/README.rst b/exe/tests/README.rst
index 6d0f38d5efb0..2c35693f570b 100644
--- a/exe/tests/README.rst
+++ b/exe/tests/README.rst
@@ -33,4 +33,3 @@ test file::
$ shellcheck ../assets/install
$ shellcheck install.bats
-
diff --git a/proposals/assets/contribution-guide/contribution-guide-flowchart.xml b/proposals/assets/contribution-guide/contribution-guide-flowchart.xml
index e5b23af4802f..218ed54a949d 100644
--- a/proposals/assets/contribution-guide/contribution-guide-flowchart.xml
+++ b/proposals/assets/contribution-guide/contribution-guide-flowchart.xml
@@ -1,2 +1,2 @@
-5V1bd5s4EP41ebQPIC7mMU2TNG3S9XGa7WZf9shGsakxcgHbcX79ChBXyTK2uThpzmkKg7hpvhl9mhmRC3C1eL314HL2gC3kXCiS9XoBPl8oCgDygPwXSraxRDZ1EEumnm1RWSZ4tN8QFUpUurIt5BcaBhg7gb0sCifYddEkKMig5+FNsdkLdop3XcIpYgSPE+iw0p+2Fcxi6UCTMvkXZE9nyZ1liR5ZwKQxFfgzaOFNTgSuL8CVh3EQby1er5AT9l7SL/F5NzuOpg/mITeocsIP9+vd0+jfx9727/m3H89o7Hyf9tKHC7bJGyOLdADdxV4ww1PsQuc6k37y8Mq1UHhZiexlbe4xXhKhTIS/UBBsqTbhKsBENAsWDj2KXu3gn/D0vkb3nunFwu3Pr/mdbbLjBt42d1K4+5w/lp0W7RXOGyLPXqAAeVTI9h3tzvDlcwLak7cIk7O9LWngIQcG9rqIDkhBNk3bpacOsU1uoUjUInogQc62LEgu4uOVN0H0vEyfZCP3IJko0jJf4xv1H3A3BZJyj+/mcH5z9fbrskcfeQ2dFX0JBgH+xl440A1V7QfQC6gegUkElu0RM7OxSwRuqHpRb66RF6BXYX8m/SJLaqljZEmnkk1meHriF2Z5oxtoe/UwIk8N3Sl5xeyeg7IuZMk0mVsanDsqoHhD6BBsuTBAn0LT8E/U28vE/fT76+8vs0fFv3U/vb0tle89dcBRnO4EkU7WZHMabt65AZyjSJ2IeEjagNww14Zz2gQvlg6xjwtws+ucMkZmcBluejO8GK/8on1b0J+l/mEzswPkL+EkbL4hwwSRLXP2SK+QM1EOoIRQrowyoOoFhasao21T4+BLknbjq6BngVKHb6P18/D2Yf756a+bb2/af5vRvxxjvMKLxcq1g/CpoWtFAwnxBOQf6Rkyjtr+ZOX7of2VNZJ5ZTnp9cdir+c0BMc+dlYBuvQm1LojabanRsbv4Xk67CkNKkY2jX2KSZXQjmZUwKjmO+5krDx23DtijD1MvfFAVcFlkTFkioL9ZtD04KvKRX+vNDbyct9y0C18CuDJsLQDPsdDQaThPBKEkGkaCqDENswSg4gftDEoyLzBvBoLU6VWWdigMgkr89h3TsL4ejN3krAXHOErU6D+e4WTAz0/Ut4laSAPlq/ZwTInG6dELiRkC6JKGKk5I2Xj8ilEFt97B1Mjug+KPiEe16+wg70IPxHGXmzHKYmgY09DhE3IY0S8LESSTebGl/TAwrasyE3x2EbRdXmYvgn43DP5DkZsKftBTAGks9xB5aGnBubAd2G7MZIj1Pe2O0dWFM2ozNJv7eDLahze3/dX58vVT9VjygnNviZlP6A4dndN3RXWhV8RM/TsMRmDQ6qOl8j1wzuH3bOKLuih3yvkB4yCuqHuNekJqKCgJ3mfntpm8t3yrnTnWN4lVNNe4qVwOLjQbTXNvJQkWEMBYrTLvBR2XnemzEsDFZlXSo4+MvNS1LaY1witbbT5cxlXaiGVGZfMcfKtUi6ZUcH79/CJGvIe/vrXz4fRtX37BOTrqTQcft08DXpKRQevtOTflaJX0Rvz76LeEJPvhzSk6kcvHBu8FMxQZS4+HKWOw+P4BxEdPyO2d4CtvxO2p7Hj+zPyO/UPB0be+IHbJG1aNXBb2aeIfEXep/DTx3o7TgUAsVOpL3LL7Q5d7RZAXUT+KwOICwxgsggS+euaAXTpeXCba7AMkeEL8CUV8aXlUcS01nVBa7IR3/1YBAr7M+fWnvwoI1hOEoYj2gZ7cxpaklZLixBzsjEc9T/WUFSavrQ69gi9YU5JD4jgP6EWUkgaqMJStcS7Ewf7WbsovvfBtCUmDnKrGV+ukz+XhG9XYzmoPQB0kjpYIn/nFw2pULRSNpU01L3HTBrDuyIZ/RzcJaWAd8DCfcBB+6ApmgzYqNc7q/tLGVLlaTYnV801BL0q+e3KXrbrn5c/l8rN+tv94/PTM7p8vdV7GmMvnFlrNBWqNEntDA2HTnQqF6NwGCm/YSOMlK0ENaRiILynlzPoO0Il7KUGJe/SM0BjEySRyeTAd+mHDHQVc9SIiIb5sIjvJARnB/mJeCpbltwN+Tk8kG+a5QoKjq9vOSgC2DrDjorZEpPPu//U/PcMACfHWU9xFSLYN+4qmEyNUa54bjgAYnAGlxJ6ziNpZkgVk2ay/rGSZly17S4ZrzlnNvRQz/tz8mZCG6kOX13pl3LhLHhlnVPV0lgiTe9mWnAkxa+HQIqUuXeuIHMmCy2yykPjnHKpAjop2tzFLBVgiNo3E+uUWTYZropA0YOFtZDx9J9Y8mR2ka50kVyU1tNJY5SLDxApeRDJgeNocWLKPScT7Fk28e/bcwm11eNXSiOUynEq7cZFZbZ2o+O1FAdSyWNo6/GORuQ/9lJSua2MnKn1ZVUZaEb8G6hF0Jn95Ej4WzeK1686s5VLWG5uraSo03O4zROd1PM0FIisxxuU45CA26EtxCG5r6OxXfwAo+QVjEszoBW+y0tUUZt0/Afy1eKsCDD7Zv6nRUfO15bEaOudFVc075WFMG/cK5f8pVktoliXv0ziTOcxfTiuTqKx+DNnpsBPNRntYKUHktleEsowlcaiSvw3Zd1JU6up6LL4PyA2cUwUTS9lIkzD6HPW1zRU0itKronTWd8jGpauqpHWNqxcwEmal9ZTnXP1Zg15CFmS20xEcJOU3YwP1M8n2wclHU7mF1WzzkrF4aH2tfKnLa4yOtHoiaW1hyDhhBG/6ucP2qqi7emlMsee2VwhrfBNd0z34BraDhxHCZd4yjdJllSezxdPDnfFGpszatUV8+ur3vnnKKp+eUKpHKFvywrVvVbY8DRN5QXY83YmjWjc5S4kRuwEP1lQPtk6NsGDt9/6xjFw7sepAE7m0whOf60CcpXEIOuwN6Cz9gY4hJaXpTWN3fo8bb04m0MvrhenNDYqcgxzGLSIG5eLZt6pEwRGaZJxBnUxCu9TDe07xROcm1GVYtT+XZ3T8szHf/8GGDUBUlUYL8H51o3Mm/Uy32ysr2Oa+sBM3sH3aFj9oMhH1XBHDtfhZanWFDXZpxeWRYEQJmTC6lsMq8ooqLrsQG0spK59OBomjIDvD5W3RcO0Mg3TmvsIGD/JcPynKEBdozLjBHkFdFwfqDZmEkZTn3oYcbOJglXcH8UvGgevXlFLZK1rN6mzU5a96/p95IRfBC+sGeKp/iwDvs3oUTY7jztorCaTWWa1dH+rk8+a1FCakKpyX82l9QftTU35xgWYTv7waVuhYvezmapBJb12NnOaprsN17e2gkPn6Kedzw/sUA/Zzf7SQ0wgsz+YAa7/Bw==
\ No newline at end of file
+5V1bd5s4EP41ebQPIC7mMU2TNG3S9XGa7WZf9shGsakxcgHbcX79ChBXyTK2uThpzmkKg7hpvhl9mhmRC3C1eL314HL2gC3kXCiS9XoBPl8oCgDygPwXSraxRDZ1EEumnm1RWSZ4tN8QFUpUurIt5BcaBhg7gb0sCifYddEkKMig5+FNsdkLdop3XcIpYgSPE+iw0p+2Fcxi6UCTMvkXZE9nyZ1liR5ZwKQxFfgzaOFNTgSuL8CVh3EQby1er5AT9l7SL/F5NzuOpg/mITeocsIP9+vd0+jfx9727/m3H89o7Hyf9tKHC7bJGyOLdADdxV4ww1PsQuc6k37y8Mq1UHhZiexlbe4xXhKhTIS/UBBsqTbhKsBENAsWDj2KXu3gn/D0vkb3nunFwu3Pr/mdbbLjBt42d1K4+5w/lp0W7RXOGyLPXqAAeVTI9h3tzvDlcwLak7cIk7O9LWngIQcG9rqIDkhBNk3bpacOsU1uoUjUInogQc62LEgu4uOVN0H0vEyfZCP3IJko0jJf4xv1H3A3BZJyj+/mcH5z9fbrskcfeQ2dFX0JBgH+xl440A1V7QfQC6gegUkElu0RM7OxSwRuqHpRb66RF6BXYX8m/SJLaqljZEmnkk1meHriF2Z5oxtoe/UwIk8N3Sl5xeyeg7IuZMk0mVsanDsqoHhD6BBsuTBAn0LT8E/U28vE/fT76+8vs0fFv3U/vb0tle89dcBRnO4EkU7WZHMabt65AZyjSJ2IeEjagNww14Zz2gQvlg6xjwtws+ucMkZmcBluejO8GK/8on1b0J+l/mEzswPkL+EkbL4hwwSRLXP2SK+QM1EOoIRQrowyoOoFhasao21T4+BLknbjq6BngVKHb6P18/D2Yf756a+bb2/af5vRvxxjvMKLxcq1g/CpoWtFAwnxBOQf6Rkyjtr+ZOX7of2VNZJ5ZTnp9cdir+c0BMc+dlYBuvQm1LojabanRsbv4Xk67CkNKkY2jX2KSZXQjmZUwKjmO+5krDx23DtijD1MvfFAVcFlkTFkioL9ZtD04KvKRX+vNDbyct9y0C18CuDJsLQDPsdDQaThPBKEkGkaCqDENswSg4gftDEoyLzBvBoLU6VWWdigMgkr89h3TsL4ejN3krAXHOErU6D+e4WTAz0/Ut4laSAPlq/ZwTInG6dELiRkC6JKGKk5I2Xj8ilEFt97B1Mjug+KPiEe16+wg70IPxHGXmzHKYmgY09DhE3IY0S8LESSTebGl/TAwrasyE3x2EbRdXmYvgn43DP5DkZsKftBTAGks9xB5aGnBubAd2G7MZIj1Pe2O0dWFM2ozNJv7eDLahze3/dX58vVT9VjygnNviZlP6A4dndN3RXWhV8RM/TsMRmDQ6qOl8j1wzuH3bOKLuih3yvkB4yCuqHuNekJqKCgJ3mfntpm8t3yrnTnWN4lVNNe4qVwOLjQbTXNvJQkWEMBYrTLvBR2XnemzEsDFZlXSo4+MvNS1LaY1witbbT5cxlXaiGVGZfMcfKtUi6ZUcH79/CJGvIe/vrXz4fRtX37BOTrqTQcft08DXpKRQevtOTflaJX0Rvz76LeEJPvhzSk6kcvHBu8FMxQZS4+HKWOw+P4BxEdPyO2d4CtvxO2p7Hj+zPyO/UPB0be+IHbJG1aNXBb2aeIfEXep/DTx3o7TgUAsVOpL3LL7Q5d7RZAXUT+KwOICwxgsggS+euaAXTpeXCba7AMkeEL8CUV8aXlUcS01nVBa7IR3/1YBAr7M+fWnvwoI1hOEoYj2gZ7cxpaklZLixBzsjEc9T/WUFSavrQ69gi9YU5JD4jgP6EWUkgaqMJStcS7Ewf7WbsovvfBtCUmDnKrGV+ukz+XhG9XYzmoPQB0kjpYIn/nFw2pULRSNpU01L3HTBrDuyIZ/RzcJaWAd8DCfcBB+6ApmgzYqNc7q/tLGVLlaTYnV801BL0q+e3KXrbrn5c/l8rN+tv94/PTM7p8vdV7GmMvnFlrNBWqNEntDA2HTnQqF6NwGCm/YSOMlK0ENaRiILynlzPoO0Il7KUGJe/SM0BjEySRyeTAd+mHDHQVc9SIiIb5sIjvJARnB/mJeCpbltwN+Tk8kG+a5QoKjq9vOSgC2DrDjorZEpPPu//U/PcMACfHWU9xFSLYN+4qmEyNUa54bjgAYnAGlxJ6ziNpZkgVk2ay/rGSZly17S4ZrzlnNvRQz/tz8mZCG6kOX13pl3LhLHhlnVPV0lgiTe9mWnAkxa+HQIqUuXeuIHMmCy2yykPjnHKpAjop2tzFLBVgiNo3E+uUWTYZropA0YOFtZDx9J9Y8mR2ka50kVyU1tNJY5SLDxApeRDJgeNocWLKPScT7Fk28e/bcwm11eNXSiOUynEq7cZFZbZ2o+O1FAdSyWNo6/GORuQ/9lJSua2MnKn1ZVUZaEb8G6hF0Jn95Ej4WzeK1686s5VLWG5uraSo03O4zROd1PM0FIisxxuU45CA26EtxCG5r6OxXfwAo+QVjEszoBW+y0tUUZt0/Afy1eKsCDD7Zv6nRUfO15bEaOudFVc075WFMG/cK5f8pVktoliXv0ziTOcxfTiuTqKx+DNnpsBPNRntYKUHktleEsowlcaiSvw3Zd1JU6up6LL4PyA2cUwUTS9lIkzD6HPW1zRU0itKronTWd8jGpauqpHWNqxcwEmal9ZTnXP1Zg15CFmS20xEcJOU3YwP1M8n2wclHU7mF1WzzkrF4aH2tfKnLa4yOtHoiaW1hyDhhBG/6ucP2qqi7emlMsee2VwhrfBNd0z34BraDhxHCZd4yjdJllSezxdPDnfFGpszatUV8+ur3vnnKKp+eUKpHKFvywrVvVbY8DRN5QXY83YmjWjc5S4kRuwEP1lQPtk6NsGDt9/6xjFw7sepAE7m0whOf60CcpXEIOuwN6Cz9gY4hJaXpTWN3fo8bb04m0MvrhenNDYqcgxzGLSIG5eLZt6pEwRGaZJxBnUxCu9TDe07xROcm1GVYtT+XZ3T8szHf/8GGDUBUlUYL8H51o3Mm/Uy32ysr2Oa+sBM3sH3aFj9oMhH1XBHDtfhZanWFDXZpxeWRYEQJmTC6lsMq8ooqLrsQG0spK59OBomjIDvD5W3RcO0Mg3TmvsIGD/JcPynKEBdozLjBHkFdFwfqDZmEkZTn3oYcbOJglXcH8UvGgevXlFLZK1rN6mzU5a96/p95IRfBC+sGeKp/iwDvs3oUTY7jztorCaTWWa1dH+rk8+a1FCakKpyX82l9QftTU35xgWYTv7waVuhYvezmapBJb12NnOaprsN17e2gkPn6Kedzw/sUA/Zzf7SQ0wgsz+YAa7/Bw==
diff --git a/proposals/contribution-guide.md b/proposals/contribution-guide.md
index abc5437300de..ee1a53759fca 100644
--- a/proposals/contribution-guide.md
+++ b/proposals/contribution-guide.md
@@ -48,7 +48,7 @@ GitHub issues are triaged regularly to determine that they are correctly
categorized and express a real and relevant problem or request. An answer is
provided as soon as possible to acknowledge or resolve the issue. Feature
requests are reviewed for general suitability and uniqueness. Users can vote for
-features via "reactions" on the issue.
+features via "reactions" on the issue.
#### Implementation stage
@@ -62,7 +62,7 @@ is provided to the contributor to improve the proposed change.
Pull requests are selected for review opportunistically when the maintainers
have decided that a change or feature should be incorporated. When a feature is
selected for review it is added to an internal queue. This queue manages the
-prioritization of these features but is not visible to the community.
+prioritization of these features but is not visible to the community.
Pull requests are subjected to automated tests and checks which provide
preliminary feedback. Once a pull request passes all automated tests and checks,
@@ -252,7 +252,7 @@ following criteria in descending order of importance:
A maintainer must select the issue from the queue with the current highest
priority. The prioritization of issues in the queue is reviewed on a regular
cadence and is ultimately decided based on the maintainers' discretion. See the
-[rationale](#rationale-reprioritize) for further discussion.
+[rationale](#rationale-reprioritize) for further discussion.
### Review stage
@@ -305,7 +305,7 @@ contribution. The ready for review lane is the prioritization queue, and issues
will be ordered in decreasing priority from top to bottom.
[Figure 2](#figure-2) demonstrates an example project with contributions in
-various stages of completion.
+various stages of completion.
## Managing the existing backlog
@@ -361,7 +361,7 @@ Pull requests are often made for problems that only affect the contributor or a
small portion of the user base. An issue provides a mechanism to gather
quantitative feedback in the form of "upvotes" through GitHub reactions to
estimate the impact of the issue on the community.
-
+
We intend for issues to be a way to improve contributor confidence in both their
contributions and the overall process. Draft pull requests can be used to
demonstrate a potential implementation and get community feedback or interest
@@ -478,7 +478,7 @@ include (but are not limited to):
also affect the behavior in other SDKs and must thus be made in coordination
with internal teams.
-### Preliminary review criteria
+### Preliminary review criteria
The maintainers will use a set of criteria to move a pull request from the
implementation to the ready for review stage, which may include (but are not
diff --git a/pyproject.toml b/pyproject.toml
index 671e41b2af59..bff2f7b73bdf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -134,3 +134,70 @@ markers = [
[tool.black]
line-length = 80
+
+[tool.isort]
+profile = "black"
+line_length = 79
+honor_noqa = true
+src_paths = ["awscli", "tests"]
+
+[tool.ruff]
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".ipynb_checkpoints",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pyenv",
+ ".pytest_cache",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ ".vscode",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "node_modules",
+ "site-packages",
+ "venv",
+]
+
+# Format same as Black.
+line-length = 79
+indent-width = 4
+
+target-version = "py38"
+
+[tool.ruff.lint]
+# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
+# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
+# McCabe complexity (`C901`) by default.
+select = ["E4", "E7", "E9", "F", "UP"]
+ignore = ["F401"]
+
+# Allow fix for all enabled rules (when `--fix`) is provided.
+fixable = ["ALL"]
+unfixable = []
+
+# Allow unused variables when underscore-prefixed.
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+
+[tool.ruff.format]
+# Like Black, use double quotes for strings, spaces for indents
+# and trailing commas.
+quote-style = "preserve"
+indent-style = "space"
+skip-magic-trailing-comma = false
+line-ending = "auto"
+
+docstring-code-format = false
+docstring-code-line-length = "dynamic"
diff --git a/requirements-build.txt b/requirements-build.txt
index 0ef53f656530..6bf6d5b21336 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -1,4 +1,4 @@
# Requirements we need to run our build jobs for the installers.
# We create the separation for cases where we're doing installation
# from a local dependency directory instead of requirements.txt.
-PyInstaller==5.13.2
\ No newline at end of file
+PyInstaller==5.13.2
diff --git a/requirements.txt b/requirements.txt
index dfd867f42c69..60285a2d7faa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1 @@
--r requirements-dev.txt
\ No newline at end of file
+-r requirements-dev.txt
diff --git a/requirements/bootstrap.txt b/requirements/bootstrap.txt
index fcc7ca5345f4..31ab5e26f67c 100644
--- a/requirements/bootstrap.txt
+++ b/requirements/bootstrap.txt
@@ -1,2 +1,2 @@
pip>=22.0.0,<25.0.0
-flit_core>=3.7.1,<3.9.1
\ No newline at end of file
+flit_core>=3.7.1,<3.9.1
diff --git a/scripts/ci/install b/scripts/ci/install
index 0d2e41906fa2..5f1627d7762e 100755
--- a/scripts/ci/install
+++ b/scripts/ci/install
@@ -1,9 +1,9 @@
#!/usr/bin/env python
+import glob
import os
+import shutil
import sys
-import glob
from subprocess import check_call
-import shutil
_dname = os.path.dirname
@@ -14,6 +14,7 @@ os.chdir(REPO_ROOT)
def run(command):
return check_call(command, shell=True)
+
if sys.version_info[:2] >= (3, 12):
# Python 3.12+ no longer includes setuptools by default.
diff --git a/scripts/ci/install-benchmark b/scripts/ci/install-benchmark
index 70c82f26d074..5ec17234fea2 100755
--- a/scripts/ci/install-benchmark
+++ b/scripts/ci/install-benchmark
@@ -10,33 +10,37 @@ perf testing.
* Install dependencies
"""
+
import os
import shutil
from subprocess import check_call
-
GIT_OWNER = os.environ.get('GIT_OWNER', 'boto')
# Using PERF_BRANCH instead of GIT_BRANCH because that value
# is set by jenkins to the branch that's been checked out via
# git.
PERF_BRANCH = os.environ.get('PERF_BRANCH', 'develop')
REPO_ROOT = os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir'))
def clone_s3_transfer_repo():
if os.path.isdir('s3transfer'):
shutil.rmtree('s3transfer')
- check_call('git clone https://github.com/%s/s3transfer.git' % GIT_OWNER,
- shell=True)
+ check_call(
+ 'git clone https://github.com/%s/s3transfer.git' % GIT_OWNER,
+ shell=True,
+ )
check_call('cd s3transfer && git checkout %s' % PERF_BRANCH, shell=True)
def pip_install_s3transfer_and_deps():
check_call('cd s3transfer && pip install -e .', shell=True)
- check_call('cd s3transfer && pip install -r requirements-dev.txt',
- shell=True)
+ check_call(
+ 'cd s3transfer && pip install -r requirements-dev.txt', shell=True
+ )
check_call('pip install "caf>=0.1.0,<1.0.0"', shell=True)
check_call('cd %s && pip install -e .' % REPO_ROOT, shell=True)
diff --git a/scripts/ci/install-build-system b/scripts/ci/install-build-system
index 8aa9cba1dba5..a54fddf47742 100755
--- a/scripts/ci/install-build-system
+++ b/scripts/ci/install-build-system
@@ -1,11 +1,10 @@
#!/usr/bin/env python3
import argparse
-import tarfile
-import tempfile
-import os
import glob
+import os
import shutil
-
+import tarfile
+import tempfile
from pathlib import Path
from subprocess import check_call
diff --git a/scripts/ci/run-benchmark b/scripts/ci/run-benchmark
index bee911dbcff8..fc5148418f3a 100755
--- a/scripts/ci/run-benchmark
+++ b/scripts/ci/run-benchmark
@@ -4,23 +4,24 @@
As of now this benchmarks `cp` and `rm` with test cases for multiple 4kb files
(default 10000 files) and a single large file (default 10gb, `cp` only).
"""
-import os
-import json
-from subprocess import check_call, Popen, PIPE
-from datetime import datetime
-import random
+
import argparse
import inspect
-import shutil
+import json
+import os
import platform
+import random
+import shutil
+from datetime import datetime
+from subprocess import PIPE, Popen, check_call
import awscli
import s3transfer
-
TEST_BUCKET = os.environ.get('PERF_TEST_BUCKET')
REPO_ROOT = os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir'))
MANY_FILES_DIR = 'many'
LARGE_FILE_DIR = 'large'
@@ -44,14 +45,18 @@ def initialize_files(num_files, file_size):
many_files_dir = os.path.join(WORKDIR, MANY_FILES_DIR)
if not os.path.exists(many_files_dir):
os.makedirs(many_files_dir)
- run('caf gen --file-size 4kb --max-files %s --directory %s' %
- (num_files, many_files_dir))
+ run(
+ 'caf gen --file-size 4kb --max-files %s --directory %s'
+ % (num_files, many_files_dir)
+ )
large_file_dir = os.path.join(WORKDIR, LARGE_FILE_DIR)
if not os.path.exists(large_file_dir):
os.makedirs(large_file_dir)
- run('caf gen --file-size %s --max-files 1 --directory %s' %
- (file_size, large_file_dir))
+ run(
+ 'caf gen --file-size %s --max-files 1 --directory %s'
+ % (file_size, large_file_dir)
+ )
def write_metadata_file(filename):
@@ -81,12 +86,21 @@ def _inject_package_info(package, metadata):
def _get_git_version(package):
dname = os.path.dirname(inspect.getfile(package))
- git_sha = Popen(
- 'git rev-parse HEAD',
- cwd=dname, shell=True, stdout=PIPE).communicate()[0].strip()
- git_branch = Popen(
- 'git rev-parse --abbrev-ref HEAD',
- cwd=dname, shell=True, stdout=PIPE).communicate()[0].strip()
+ git_sha = (
+ Popen('git rev-parse HEAD', cwd=dname, shell=True, stdout=PIPE)
+ .communicate()[0]
+ .strip()
+ )
+ git_branch = (
+ Popen(
+ 'git rev-parse --abbrev-ref HEAD',
+ cwd=dname,
+ shell=True,
+ stdout=PIPE,
+ )
+ .communicate()[0]
+ .strip()
+ )
return '%s (%s)' % (git_sha, git_branch)
@@ -115,24 +129,30 @@ def benchmark(bucket, results_dir, num_iterations=1):
results = os.path.join(results_dir, 'upload-10k-small')
os.makedirs(results)
benchmark_cp = os.path.join(perf_dir, 'benchmark-cp')
- run(benchmark_cp + ' --recursive --num-iterations %s '
- '--source %s --dest %s --result-dir %s --no-cleanup' % (
- num_iterations, local_dir, s3_location, results))
+ run(
+ benchmark_cp + ' --recursive --num-iterations %s '
+ '--source %s --dest %s --result-dir %s --no-cleanup'
+ % (num_iterations, local_dir, s3_location, results)
+ )
# 10k download
results = os.path.join(results_dir, 'download-10k-small')
os.makedirs(results)
- run(benchmark_cp + ' --recursive --num-iterations %s '
- '--source %s --dest %s --result-dir %s' % (
- num_iterations, s3_location, local_dir, results))
+ run(
+ benchmark_cp + ' --recursive --num-iterations %s '
+ '--source %s --dest %s --result-dir %s'
+ % (num_iterations, s3_location, local_dir, results)
+ )
# 10k rm
results = os.path.join(results_dir, 'delete-10k-small')
os.makedirs(results)
benchmark_rm = os.path.join(perf_dir, 'benchmark-rm')
- run(benchmark_rm + ' --recursive --num-iterations %s '
- '--target %s --result-dir %s' % (
- num_iterations, s3_location, results))
+ run(
+ benchmark_rm + ' --recursive --num-iterations %s '
+ '--target %s --result-dir %s'
+ % (num_iterations, s3_location, results)
+ )
finally:
# Note that the delete-10k-small benchmark restores
# the files it's deleted once the script is finished.
@@ -146,16 +166,20 @@ def benchmark(bucket, results_dir, num_iterations=1):
# 10gb upload
results = os.path.join(results_dir, 'upload-10gb')
os.makedirs(results)
- run(benchmark_cp + ' --recursive --num-iterations %s '
- '--source %s --dest %s --result-dir %s --no-cleanup' % (
- num_iterations, local_dir, s3_location, results))
+ run(
+ benchmark_cp + ' --recursive --num-iterations %s '
+ '--source %s --dest %s --result-dir %s --no-cleanup'
+ % (num_iterations, local_dir, s3_location, results)
+ )
# 10gb download
results = os.path.join(results_dir, 'download-10gb')
os.makedirs(results)
- run(benchmark_cp + ' --recursive --num-iterations %s '
- '--source %s --dest %s --result-dir %s' % (
- num_iterations, s3_location, local_dir, results))
+ run(
+ benchmark_cp + ' --recursive --num-iterations %s '
+ '--source %s --dest %s --result-dir %s'
+ % (num_iterations, s3_location, local_dir, results)
+ )
finally:
# Not benchmarking a single rm call since it's just a single call
run('aws s3 rm --recursive ' + s3_location)
@@ -170,23 +194,32 @@ def s3_uri(value):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
- '-n', '--num-iterations', type=int, default=10,
- help='The number of times to run each test.'
+ '-n',
+ '--num-iterations',
+ type=int,
+ default=10,
+ help='The number of times to run each test.',
)
parser.add_argument(
- '-b', '--bucket', default=TEST_BUCKET, type=s3_uri,
+ '-b',
+ '--bucket',
+ default=TEST_BUCKET,
+ type=s3_uri,
required=TEST_BUCKET is None,
help='The bucket to use for testing as an s3 uri. This can also be '
- 'set by the environment variable PERF_TEST_BUCKET. If the '
- 'environment variable is not set, then this argument is required.'
+ 'set by the environment variable PERF_TEST_BUCKET. If the '
+ 'environment variable is not set, then this argument is required.',
)
parser.add_argument(
- '--num-files', default=10000, type=int,
- help='The number of files to use for the multiple file case.'
+ '--num-files',
+ default=10000,
+ type=int,
+ help='The number of files to use for the multiple file case.',
)
parser.add_argument(
- '--large-file-size', default='10gb',
+ '--large-file-size',
+ default='10gb',
help='The file size for the large file case. This can be in the form '
- '10gb, 4kb, etc.'
+ '10gb, 4kb, etc.',
)
main(parser.parse_args())
diff --git a/scripts/ci/run-build-system-tests b/scripts/ci/run-build-system-tests
index 3fae903657e0..8614df8c9942 100755
--- a/scripts/ci/run-build-system-tests
+++ b/scripts/ci/run-build-system-tests
@@ -2,7 +2,6 @@
import argparse
import os
import sys
-
from contextlib import contextmanager
from pathlib import Path
from subprocess import check_call
diff --git a/scripts/ci/run-tests b/scripts/ci/run-tests
index 663b7f1a506e..670006a1630b 100755
--- a/scripts/ci/run-tests
+++ b/scripts/ci/run-tests
@@ -84,23 +84,19 @@ if __name__ == "__main__":
"running tests. This allows you to run the tests against the "
"current repository without have to install the package as a "
"distribution."
- )
+ ),
)
parser.add_argument(
"--ignore",
nargs='+',
default=[],
- help=(
- "Ignore a test subdirectory. Can be specified multiple times."
- )
+ help=("Ignore a test subdirectory. Can be specified multiple times."),
)
parser.add_argument(
"--tests-path",
default=None,
type=os.path.abspath,
- help=(
- "Optional path to an alternate test directory to use."
- )
+ help=("Optional path to an alternate test directory to use."),
)
raw_args = parser.parse_args()
test_runner, test_args, test_dirs = process_args(raw_args)
diff --git a/scripts/ci/upload-benchmark b/scripts/ci/upload-benchmark
index 33f7496ffa49..bf7d689a488f 100755
--- a/scripts/ci/upload-benchmark
+++ b/scripts/ci/upload-benchmark
@@ -1,13 +1,14 @@
#!/usr/bin/env python
"""Script to upload benchmark results to an s3 location."""
-import os
+
import argparse
+import os
from datetime import datetime
from subprocess import check_call
-
REPO_ROOT = os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
WORKDIR = os.environ.get('PERF_WORKDIR', os.path.join(REPO_ROOT, 'workdir'))
DEFAULT_BUCKET = os.environ.get('PERF_RESULTS_BUCKET')
DATE_FORMAT = "%Y-%m-%d-%H-%M-%S-"
@@ -18,8 +19,8 @@ def main(args):
run_id = source.split(os.sep)[-1]
destination = '%s/%s' % (args.bucket, run_id)
check_call(
- 'aws s3 cp --recursive %s %s' % (source, destination),
- shell=True)
+ 'aws s3 cp --recursive %s %s' % (source, destination), shell=True
+ )
def s3_uri(value):
@@ -54,15 +55,22 @@ def _is_result_dir_format(directory):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
- '-d', '--directory', default=os.path.join(WORKDIR, 'results'),
+ '-d',
+ '--directory',
+ default=os.path.join(WORKDIR, 'results'),
help='A directory containing multiple test runs or a single test '
- 'run directory. If this is a directory with multiple test runs, '
- 'the latest will be uploaded.')
+ 'run directory. If this is a directory with multiple test runs, '
+ 'the latest will be uploaded.',
+ )
parser.add_argument(
- '-b', '--bucket', default=DEFAULT_BUCKET, type=s3_uri,
+ '-b',
+ '--bucket',
+ default=DEFAULT_BUCKET,
+ type=s3_uri,
required=DEFAULT_BUCKET is None,
help='An s3uri to upload the results to. This can also be set with '
- 'the environment variable PERF_RESULTS_BUCKET. If the '
- 'environment variable is not set, then this argument is '
- 'required.')
+ 'the environment variable PERF_RESULTS_BUCKET. If the '
+ 'environment variable is not set, then this argument is '
+ 'required.',
+ )
main(parser.parse_args())
diff --git a/scripts/gen-ac-index b/scripts/gen-ac-index
index a4c16c1bc657..4f4df9174663 100755
--- a/scripts/gen-ac-index
+++ b/scripts/gen-ac-index
@@ -1,23 +1,26 @@
#!/usr/bin/env python
-"""Generate the index used for the new auto-completion.
+"""Generate the index used for the new auto-completion."""
-"""
-import os
import argparse
+import os
-from awscli.autocomplete import db
-from awscli.autocomplete import generator
+from awscli.autocomplete import db, generator
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('--include-builtin-index', action='store_true',
- help=("Also generate builtin index as well as the "
- "INDEX_LOCATION."))
- parser.add_argument('--index-location', default=db.INDEX_FILE,
- help=(
- 'Location to write the index file. '
- 'Defaults to ' + db.INDEX_FILE))
+ parser.add_argument(
+ '--include-builtin-index',
+ action='store_true',
+ help=("Also generate builtin index as well as the " "INDEX_LOCATION."),
+ )
+ parser.add_argument(
+ '--index-location',
+ default=db.INDEX_FILE,
+ help=(
+ 'Location to write the index file. ' 'Defaults to ' + db.INDEX_FILE
+ ),
+ )
args = parser.parse_args()
index_dir = os.path.dirname(os.path.abspath(args.index_location))
if not os.path.isdir(index_dir):
diff --git a/scripts/gen-server-completions b/scripts/gen-server-completions
index 335e685cec75..938e9d130e26 100755
--- a/scripts/gen-server-completions
+++ b/scripts/gen-server-completions
@@ -13,16 +13,18 @@ if you want to see the generated completion data without modifying existing
files.
"""
+
import argparse
import json
-import sys
import os
import re
+import sys
import botocore.session
-
-from awscli.autocomplete.autogen import ServerCompletionHeuristic
-from awscli.autocomplete.autogen import BasicSingularize
+from awscli.autocomplete.autogen import (
+ BasicSingularize,
+ ServerCompletionHeuristic,
+)
# The awscli/__init__.py file sets the AWS_DATA_PATH env var, so as long
# as we import from awscli we're ensured this env var exists.
@@ -37,8 +39,10 @@ def generate_completion_data(args):
model = session.get_service_model(service_name)
completion_data = gen.generate_completion_descriptions(model)
out_filename = os.path.join(
- BOTOCORE_DATA_PATH, service_name,
- model.api_version, 'completions-1.json'
+ BOTOCORE_DATA_PATH,
+ service_name,
+ model.api_version,
+ 'completions-1.json',
)
to_json = _pretty_json_dump(completion_data)
if args.only_print:
diff --git a/scripts/install b/scripts/install
index 8f825eb62ac7..12f23e6344a1 100755
--- a/scripts/install
+++ b/scripts/install
@@ -13,19 +13,18 @@ import subprocess
import sys
import tarfile
import tempfile
-
from contextlib import contextmanager
PACKAGES_DIR = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'packages')
-INSTALL_DIR = os.path.expanduser(os.path.join(
- '~', '.local', 'lib', 'aws'))
+ os.path.dirname(os.path.abspath(__file__)), 'packages'
+)
+INSTALL_DIR = os.path.expanduser(os.path.join('~', '.local', 'lib', 'aws'))
GTE_PY37 = sys.version_info[:2] >= (3, 7)
UNSUPPORTED_PYTHON = (
- (2,6),
- (3,3),
- (3,4),
- (3,5),
+ (2, 6),
+ (3, 3),
+ (3, 4),
+ (3, 5),
)
INSTALL_ARGS = (
'--no-binary :all: --no-build-isolation --no-cache-dir --no-index '
@@ -45,6 +44,7 @@ class PythonDeprecationWarning(Warning):
Python version being used is scheduled to become unsupported
in an future release. See warning for specifics.
"""
+
pass
@@ -52,12 +52,10 @@ def _build_deprecations():
py_27_params = {
'date': 'July 15, 2021',
'blog_link': 'https://aws.amazon.com/blogs/developer/announcing-end-'
- 'of-support-for-python-2-7-in-aws-sdk-for-python-and-'
- 'aws-cli-v1/'
- }
- return {
- (2,7): py_27_params
+ 'of-support-for-python-2-7-in-aws-sdk-for-python-and-'
+ 'aws-cli-v1/',
}
+ return {(2, 7): py_27_params}
DEPRECATED_PYTHON = _build_deprecations()
@@ -75,13 +73,15 @@ def cd(dirname):
def run(cmd):
sys.stdout.write("Running cmd: %s\n" % cmd)
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
stdout, stderr = p.communicate()
if p.returncode != 0:
output = (stdout + stderr).decode("utf-8")
- raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (
- p.returncode, cmd, output))
+ raise BadRCError(
+ "Bad rc (%s) for cmd '%s': %s" % (p.returncode, cmd, output)
+ )
return stdout
@@ -123,15 +123,16 @@ def _create_virtualenv_external(location, working_dir):
# We know that virtualenv is the only dir in this directory
# so we can listdir()[0] it.
with cd(os.listdir('.')[0]):
- run(('%s virtualenv.py --no-download '
- '--python %s %s') % (sys.executable,
- sys.executable,
- location))
+ run(
+ ('%s virtualenv.py --no-download ' '--python %s %s')
+ % (sys.executable, sys.executable, location)
+ )
def _get_package_tarball(package_dir, package_prefix):
- package_filenames = sorted([p for p in os.listdir(package_dir)
- if p.startswith(package_prefix)])
+ package_filenames = sorted(
+ [p for p in os.listdir(package_dir) if p.startswith(package_prefix)]
+ )
return package_filenames[-1]
@@ -145,8 +146,9 @@ def create_working_dir():
def pip_install_packages(install_dir):
- cli_tarball = [p for p in os.listdir(PACKAGES_DIR)
- if p.startswith('awscli')]
+ cli_tarball = [
+ p for p in os.listdir(PACKAGES_DIR) if p.startswith('awscli')
+ ]
if len(cli_tarball) != 1:
message = (
"Multiple versions of the CLI were found in %s. Please clear "
@@ -161,8 +163,10 @@ def pip_install_packages(install_dir):
_install_setup_deps(pip_script, '.')
with cd(PACKAGES_DIR):
- run('%s install %s --find-links file://%s %s' % (
- pip_script, INSTALL_ARGS, PACKAGES_DIR, cli_tarball))
+ run(
+ '%s install %s --find-links file://%s %s'
+ % (pip_script, INSTALL_ARGS, PACKAGES_DIR, cli_tarball)
+ )
def _install_setup_deps(pip_script, setup_package_dir):
@@ -172,15 +176,19 @@ def _install_setup_deps(pip_script, setup_package_dir):
# so for now we're explicitly installing the one setup_requires package
# we need. This comes from python-dateutils.
setuptools_scm_tarball = _get_package_tarball(
- setup_package_dir, 'setuptools_scm')
- run('%s install --no-binary :all: --no-cache-dir --no-index '
- '--find-links file://%s %s' % (
- pip_script, setup_package_dir, setuptools_scm_tarball))
- wheel_tarball = _get_package_tarball(
- setup_package_dir, 'wheel')
- run('%s install --no-binary :all: --no-cache-dir --no-index '
- '--find-links file://%s %s' % (
- pip_script, setup_package_dir, wheel_tarball))
+ setup_package_dir, 'setuptools_scm'
+ )
+ run(
+ '%s install --no-binary :all: --no-cache-dir --no-index '
+ '--find-links file://%s %s'
+ % (pip_script, setup_package_dir, setuptools_scm_tarball)
+ )
+ wheel_tarball = _get_package_tarball(setup_package_dir, 'wheel')
+ run(
+ '%s install --no-binary :all: --no-cache-dir --no-index '
+ '--find-links file://%s %s'
+ % (pip_script, setup_package_dir, wheel_tarball)
+ )
def create_symlink(real_location, symlink_name):
@@ -197,17 +205,25 @@ def create_symlink(real_location, symlink_name):
def main():
parser = optparse.OptionParser()
- parser.add_option('-i', '--install-dir', help="The location to install "
- "the AWS CLI. The default value is ~/.local/lib/aws",
- default=INSTALL_DIR)
- parser.add_option('-b', '--bin-location', help="If this argument is "
- "provided, then a symlink will be created at this "
- "location that points to the aws executable. "
- "This argument is useful if you want to put the aws "
- "executable somewhere already on your path, e.g. "
- "-b /usr/local/bin/aws. This is an optional argument. "
- "If you do not provide this argument you will have to "
- "add INSTALL_DIR/bin to your PATH.")
+ parser.add_option(
+ '-i',
+ '--install-dir',
+ help="The location to install "
+ "the AWS CLI. The default value is ~/.local/lib/aws",
+ default=INSTALL_DIR,
+ )
+ parser.add_option(
+ '-b',
+ '--bin-location',
+ help="If this argument is "
+ "provided, then a symlink will be created at this "
+ "location that points to the aws executable. "
+ "This argument is useful if you want to put the aws "
+ "executable somewhere already on your path, e.g. "
+ "-b /usr/local/bin/aws. This is an optional argument. "
+ "If you do not provide this argument you will have to "
+ "add INSTALL_DIR/bin to your PATH.",
+ )
py_version = sys.version_info[:2]
if py_version in UNSUPPORTED_PYTHON:
unsupported_python_msg = (
@@ -240,8 +256,9 @@ def main():
create_install_structure(working_dir, opts.install_dir)
pip_install_packages(opts.install_dir)
real_location = os.path.join(opts.install_dir, bin_path(), 'aws')
- if opts.bin_location and create_symlink(real_location,
- opts.bin_location):
+ if opts.bin_location and create_symlink(
+ real_location, opts.bin_location
+ ):
print("You can now run: %s --version" % opts.bin_location)
else:
print("You can now run: %s --version" % real_location)
diff --git a/scripts/install_deps.py b/scripts/install_deps.py
index 39db22085a35..2a142d0fe4c6 100644
--- a/scripts/install_deps.py
+++ b/scripts/install_deps.py
@@ -1,8 +1,10 @@
import os
-from utils import cd, bin_path, run, virtualenv_enabled
+from utils import bin_path, cd, run, virtualenv_enabled
-INSTALL_ARGS = "--no-build-isolation --no-cache-dir --no-index --prefer-binary "
+INSTALL_ARGS = (
+ "--no-build-isolation --no-cache-dir --no-index --prefer-binary "
+)
PINNED_PIP_VERSION = '24.0'
SETUP_DEPS = ("setuptools-", "setuptools_scm", "wheel", "hatchling")
@@ -17,7 +19,8 @@ def get_package_tarball(package_dir, package_prefix):
)
if len(package_filenames) == 0:
raise InstallationError(
- "Unable to find local package starting with %s prefix." % package_prefix
+ "Unable to find local package starting with %s prefix."
+ % package_prefix
)
# We only expect a single package from the downloader
return package_filenames[0]
@@ -41,7 +44,9 @@ def pip_install_packages(package_dir):
# Setup pip to support modern setuptools calls
pip_script = os.path.join(os.environ["VIRTUAL_ENV"], bin_path(), "pip")
- local_python = os.path.join(os.environ["VIRTUAL_ENV"], bin_path(), "python")
+ local_python = os.path.join(
+ os.environ["VIRTUAL_ENV"], bin_path(), "python"
+ )
# Windows can't replace a running pip.exe, so we need to work around
run("%s -m pip install pip==%s" % (local_python, PINNED_PIP_VERSION))
diff --git a/scripts/installers/make-docker b/scripts/installers/make-docker
index c9293ba02a8a..12d606da755a 100755
--- a/scripts/installers/make-docker
+++ b/scripts/installers/make-docker
@@ -1,19 +1,27 @@
#!/usr/bin/env python
"""Script to build a Docker image of the AWS CLI"""
+
import argparse
import os
-import sys
import shutil
+import sys
from distutils.dir_util import copy_tree
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from utils import run, tmp_dir, cd, BadRCError, \
- extract_zip, update_metadata, save_to_zip
-
-
-ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+from utils import (
+ BadRCError,
+ cd,
+ extract_zip,
+ run,
+ save_to_zip,
+ tmp_dir,
+ update_metadata,
+)
+
+ROOT = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
DOCKER_DIR = os.path.join(ROOT, 'docker')
DIST_DIR = os.path.join(ROOT, 'dist')
DEFAULT_EXE_ZIP = os.path.join(DIST_DIR, 'awscli-exe.zip')
@@ -52,8 +60,10 @@ def _make_build_context(build_context_dir, exe):
def _update_exe_metadata(exe):
with tmp_dir() as tmp:
extract_zip(exe, tmp)
- update_metadata(os.path.join(tmp, 'aws', 'dist'),
- distribution_source=DISTRIBUTION_SOURCE)
+ update_metadata(
+ os.path.join(tmp, 'aws', 'dist'),
+ distribution_source=DISTRIBUTION_SOURCE,
+ )
save_to_zip(tmp, exe)
@@ -63,15 +73,19 @@ def _copy_docker_dir_to_build_context(build_context_dir):
def _copy_exe_to_build_context(build_context_dir, exe):
build_context_exe_path = os.path.join(
- build_context_dir, os.path.basename(exe))
+ build_context_dir, os.path.basename(exe)
+ )
shutil.copy(exe, build_context_exe_path)
def _docker_build(build_context_dir, tags, exe_filename):
with cd(build_context_dir):
docker_build_cmd = [
- 'docker', 'build', '--build-arg',
- f'EXE_FILENAME={exe_filename}', '.'
+ 'docker',
+ 'build',
+ '--build-arg',
+ f'EXE_FILENAME={exe_filename}',
+ '.',
]
for tag in tags:
docker_build_cmd.extend(['-t', tag])
@@ -94,7 +108,7 @@ def main():
help=(
'The name of the exe zip to build into the Docker image. By '
'default the exe located at: %s' % DEFAULT_EXE_ZIP
- )
+ ),
)
parser.add_argument(
'--output',
@@ -102,7 +116,7 @@ def main():
help=(
'The name of the file to save the Docker image. By default, '
'this will be saved at: %s' % DEFAULT_DOCKER_OUTPUT
- )
+ ),
)
parser.add_argument(
'--tags',
diff --git a/scripts/installers/make-exe b/scripts/installers/make-exe
index 75a3634fcc18..58c872fb37a1 100755
--- a/scripts/installers/make-exe
+++ b/scripts/installers/make-exe
@@ -4,21 +4,23 @@
This exe can then be wrapped in a platform specific installer for each
supported platform.
"""
+
import argparse
import json
import os
-import sys
import shutil
+import sys
from distutils.dir_util import copy_tree
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from utils import run, tmp_dir, update_metadata, save_to_zip, remove_dist_info
from install_deps import install_packages
+from utils import remove_dist_info, run, save_to_zip, tmp_dir, update_metadata
-ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+ROOT = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
EXE_DIR = os.path.join(ROOT, 'exe')
PYINSTALLER_DIR = os.path.join(EXE_DIR, 'pyinstaller')
ASSETS_DIR = os.path.join(EXE_DIR, 'assets')
@@ -42,12 +44,14 @@ def do_make_exe(workdir, exe_zipfile, ac_index):
aws_exe_build = pyinstaller('aws.spec')
if ac_index:
full_internal_ac_index_path = os.path.join(
- aws_exe_build, AC_INDEX_INTERNAL_PATH)
+ aws_exe_build, AC_INDEX_INTERNAL_PATH
+ )
copy_file(ac_index, full_internal_ac_index_path)
copy_directory(aws_exe_build, output_exe_dist_dir)
aws_complete_exe_build = pyinstaller('aws_completer.spec')
- update_metadata(aws_complete_exe_build,
- distribution_source=DISTRIBUTION_SOURCE)
+ update_metadata(
+ aws_complete_exe_build, distribution_source=DISTRIBUTION_SOURCE
+ )
copy_directory_contents_into(aws_complete_exe_build, output_exe_dist_dir)
copy_directory_contents_into(ASSETS_DIR, exe_dir)
remove_dist_info(workdir)
@@ -63,8 +67,7 @@ def delete_existing_exe_build():
def pyinstaller(specfile):
aws_spec_path = os.path.join(PYINSTALLER_DIR, specfile)
print(run('pyinstaller %s' % (aws_spec_path), cwd=PYINSTALLER_DIR))
- return os.path.join(
- PYINSTALLER_DIR, 'dist', os.path.splitext(specfile)[0])
+ return os.path.join(PYINSTALLER_DIR, 'dist', os.path.splitext(specfile)[0])
def copy_directory(src, dst):
@@ -101,7 +104,7 @@ def main():
'The name of the file to save the exe zip. By default, '
'this will be saved in "dist/%s" directory in the root of the '
'awscli.' % DEFAULT_OUTPUT_ZIP
- )
+ ),
)
parser.add_argument(
'--no-cleanup',
@@ -124,16 +127,15 @@ def main():
parser.add_argument(
'--ac-index-path',
default=None,
- help=(
- 'Path to ac.index file to include in the exe.'
- )
+ help=('Path to ac.index file to include in the exe.'),
)
args = parser.parse_args()
output = os.path.abspath(args.output)
if args.src_dir:
print(
- 'Installing dependencies from local directory: %s' % args.src_dir)
+ 'Installing dependencies from local directory: %s' % args.src_dir
+ )
install_packages(args.src_dir)
else:
run('pip install -r requirements-dev-lock.txt')
diff --git a/scripts/installers/make-macpkg b/scripts/installers/make-macpkg
index 5b6f7e1e95f4..51e8b6336676 100755
--- a/scripts/installers/make-macpkg
+++ b/scripts/installers/make-macpkg
@@ -4,18 +4,19 @@
This script assumes that an executable has been produced previously
by the sibling script make-pyinstaller.
"""
+
import argparse
import os
-import sys
import shutil
+import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from utils import run, tmp_dir, extract_zip
-
+from utils import extract_zip, run, tmp_dir
-ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+ROOT = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
PKG_DIR = os.path.join(ROOT, 'macpkg')
SCRIPTS_DIR = os.path.join(PKG_DIR, 'scripts')
RESOURCES_DIR = os.path.join(PKG_DIR, 'resources')
@@ -45,27 +46,30 @@ def stage_files(workdir):
def do_make_pkg(workdir, pkg_name):
version = get_version(workdir)
- print(run(
- (
- 'pkgbuild --identifier com.amazon.aws.cli2 '
- '--root ./stage '
- '--scripts %s '
- '--version %s '
- '%s'
- ) % (SCRIPTS_DIR, version, TEMP_PKG_NAME),
- cwd=workdir,
- ))
- with tmp_dir() as formatted_resource_dir:
- render_resources(
- formatted_resource_dir, RESOURCES_DIR, {'version': version})
- print(run(
+ print(
+ run(
(
- 'productbuild --distribution %s '
- '--resources %s '
+ 'pkgbuild --identifier com.amazon.aws.cli2 '
+ '--root ./stage '
+ '--scripts %s '
+ '--version %s '
'%s'
- ) % (DISTRIBUTION_PATH, formatted_resource_dir, PKG_NAME),
+ )
+ % (SCRIPTS_DIR, version, TEMP_PKG_NAME),
cwd=workdir,
- ))
+ )
+ )
+ with tmp_dir() as formatted_resource_dir:
+ render_resources(
+ formatted_resource_dir, RESOURCES_DIR, {'version': version}
+ )
+ print(
+ run(
+ ('productbuild --distribution %s ' '--resources %s ' '%s')
+ % (DISTRIBUTION_PATH, formatted_resource_dir, PKG_NAME),
+ cwd=workdir,
+ )
+ )
shutil.copyfile(os.path.join(workdir, PKG_NAME), pkg_name)
@@ -104,7 +108,7 @@ def main():
help=(
'The output PKG name. By default, this will be '
'"dist/%s" in the root of the awscli.' % PKG_NAME
- )
+ ),
)
parser.add_argument(
'--src-exe',
@@ -112,7 +116,7 @@ def main():
help=(
'The exe used to build the PKG. By default, this will be the'
'"dist/%s" zipfile in the root of the awscli.' % EXE_ZIP_NAME
- )
+ ),
)
args = parser.parse_args()
output = os.path.abspath(args.output)
diff --git a/scripts/installers/sign-exe b/scripts/installers/sign-exe
index a0f4c14fba3b..01763684a1e1 100755
--- a/scripts/installers/sign-exe
+++ b/scripts/installers/sign-exe
@@ -1,16 +1,17 @@
#!/usr/bin/env python
"""Script to sign exe bundle"""
+
import argparse
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from utils import run, BadRCError
-
+from utils import BadRCError, run
-ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))))
+ROOT = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+)
EXE_ZIP_NAME = 'awscli-exe.zip'
SIGNATURE_FILENAME = EXE_ZIP_NAME + '.sig'
@@ -28,9 +29,7 @@ def _verify_gpg_installed():
def _sign_exe_zip(exe_zipfile, signature_filename, key_name):
- options = [
- '--yes', '--output', signature_filename
- ]
+ options = ['--yes', '--output', signature_filename]
if key_name:
options.extend(['--local-user', key_name])
options = ' '.join(options)
@@ -45,7 +44,7 @@ def main():
help=(
'The output signature file. By default, this will be '
'"dist/%s" in the root of the awscli.' % SIGNATURE_FILENAME
- )
+ ),
)
parser.add_argument(
'--exe',
@@ -53,7 +52,7 @@ def main():
help=(
'The exe zip to sign. By default, this will be the'
'"dist/%s" zipfile in the root of the awscli.' % EXE_ZIP_NAME
- )
+ ),
)
parser.add_argument(
'--key-name',
@@ -61,7 +60,7 @@ def main():
'The name of the key to use for signing. This corresponds to the '
'--local-user option when running gpg. By default, the key used '
'is your default private key in gpg.'
- )
+ ),
)
args = parser.parse_args()
sign_exe(args.exe, args.output, args.key_name)
@@ -69,4 +68,3 @@ def main():
if __name__ == "__main__":
main()
-
diff --git a/scripts/installers/test-installer b/scripts/installers/test-installer
index 465a1bc5e25d..5f069b55cb78 100755
--- a/scripts/installers/test-installer
+++ b/scripts/installers/test-installer
@@ -1,23 +1,26 @@
#!/usr/bin/env python
"""Script to run smoke tests on aws cli packaged installers"""
+
import argparse
-import sys
import os
import re
import shutil
+import sys
import tempfile
SCRIPTS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(SCRIPTS_DIR)
-from utils import run, tmp_dir, extract_zip
+from utils import extract_zip, run, tmp_dir
REPO_ROOT = os.path.dirname(SCRIPTS_DIR)
DIST_DIR = os.path.join(REPO_ROOT, 'dist')
SMOKE_TEST_PATH = os.path.join(
- REPO_ROOT, 'tests', 'integration', 'test_smoke.py')
+ REPO_ROOT, 'tests', 'integration', 'test_smoke.py'
+)
UNINSTALL_MAC_PKG_PATH = os.path.join(
- SCRIPTS_DIR, 'installers', 'uninstall-mac-pkg')
+ SCRIPTS_DIR, 'installers', 'uninstall-mac-pkg'
+)
EXE_NAME = 'aws'
@@ -79,9 +82,8 @@ class ExeTester(InstallerTester):
extract_zip(self._installer_location, workdir)
install_script = os.path.join(workdir, 'aws', 'install')
run(
- '%s --install-dir %s --bin-dir %s' % (
- install_script, install_dir, bin_dir
- )
+ '%s --install-dir %s --bin-dir %s'
+ % (install_script, install_dir, bin_dir)
)
def cleanup(self):
@@ -92,7 +94,9 @@ class ExeTester(InstallerTester):
class PkgTester(InstallerTester):
- DEFAULT_INSTALLER_LOCATION = os.path.join(DIST_DIR, 'AWS-CLI-Installer.pkg')
+ DEFAULT_INSTALLER_LOCATION = os.path.join(
+ DIST_DIR, 'AWS-CLI-Installer.pkg'
+ )
_PKG_ID = 'com.amazon.aws.cli2'
def get_aws_cmd(self):
@@ -109,8 +113,9 @@ class PkgTester(InstallerTester):
run('sudo %s %s uninstall' % (sys.executable, UNINSTALL_MAC_PKG_PATH))
def __call__(self):
- assert os.geteuid() == 0, \
- 'Mac PKG installer must be run as root (with sudo).'
+ assert (
+ os.geteuid() == 0
+ ), 'Mac PKG installer must be run as root (with sudo).'
super(PkgTester, self).__call__()
@@ -121,20 +126,20 @@ def main():
}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
- '--installer-type', required=True,
+ '--installer-type',
+ required=True,
choices=installer_to_tester_cls.keys(),
- help='The type of installer to test'
+ help='The type of installer to test',
)
parser.add_argument(
'--installer-path',
help=(
'The path to the installer to test. By default, installers are '
'used from the dist directory.'
- )
+ ),
)
args = parser.parse_args()
- tester = installer_to_tester_cls[args.installer_type](
- args.installer_path)
+ tester = installer_to_tester_cls[args.installer_type](args.installer_path)
return tester()
diff --git a/scripts/installers/uninstall-mac-pkg b/scripts/installers/uninstall-mac-pkg
index 49bc4f164434..4f2003a3ae53 100755
--- a/scripts/installers/uninstall-mac-pkg
+++ b/scripts/installers/uninstall-mac-pkg
@@ -1,20 +1,17 @@
#!/usr/bin/env python
"""Script to uninstall AWS CLI V2 Mac PKG"""
+
import argparse
-import sys
import os
import re
+import sys
from datetime import datetime
-from subprocess import check_output
-from subprocess import CalledProcessError
-from subprocess import PIPE
+from subprocess import PIPE, CalledProcessError, check_output
SCRIPTS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(SCRIPTS_DIR)
-from utils import run
-from utils import BadRCError
-
+from utils import BadRCError, run
_PKG_ID = 'com.amazon.aws.cli2'
_PKGUTIL_PATTERN = re.compile(
@@ -24,9 +21,10 @@ _PKGUTIL_PATTERN = re.compile(
r'location:\s*(?P.*?)\n'
r'install-time:\s*(?P.*?)\n'
),
- re.X
+ re.X,
)
+
def uninstall():
assert _is_installed(), 'Could not find AWS CLI installation.'
assert os.geteuid() == 0, 'Script must be run as root (with sudo).'
@@ -58,7 +56,8 @@ def _get_root_dir():
def _get_file_list(root):
lines = run(
- 'pkgutil --only-files --files %s /' % _PKG_ID, echo=False).split('\n')
+ 'pkgutil --only-files --files %s /' % _PKG_ID, echo=False
+ ).split('\n')
pkg_file_list = [os.path.join(root, line) for line in lines if line]
extra_files = _read_install_metadata(root)
return pkg_file_list + extra_files
@@ -79,13 +78,14 @@ def _read_install_metadata(root):
def _get_dir_list(root):
lines = run(
- 'pkgutil --only-dirs --files %s /' % _PKG_ID, echo=False).split('\n')
+ 'pkgutil --only-dirs --files %s /' % _PKG_ID, echo=False
+ ).split('\n')
# Longer directory names are listed first to force them to come before
# their parent directories. This ensures that child directories are
# deleted before their parents.
return sorted(
[os.path.join(root, line) for line in lines if line],
- key=lambda x: -len(x)
+ key=lambda x: -len(x),
)
@@ -114,10 +114,14 @@ def check():
lines = run('pkgutil --pkg-info %s /' % _PKG_ID, echo=False)
output = _PKGUTIL_PATTERN.search(lines)
root = os.path.join(output.group('volume'), output.group('location'))
- print('Found AWS CLI version %s installed at %s' % (
- output.group('version'), root))
- print('Installed on %s' % datetime.fromtimestamp(
- int(output.group('install_time'))))
+ print(
+ 'Found AWS CLI version %s installed at %s'
+ % (output.group('version'), root)
+ )
+ print(
+ 'Installed on %s'
+ % datetime.fromtimestamp(int(output.group('install_time')))
+ )
command = 'sudo %s uninstall' % os.path.abspath(__file__)
print('To uninstall run the command:')
print(command)
@@ -133,11 +137,12 @@ def _is_installed():
def _warn_missing_arg(print_help):
-
# wrap `parser.print_help()` to return 1 so any callers don't receive
# a potentially misleading 0 exit code from a failed call.
def missing_arg_warning():
- print('Missing input: script requires at least one positional argument\n')
+ print(
+ 'Missing input: script requires at least one positional argument\n'
+ )
print_help()
return 1
@@ -152,12 +157,11 @@ def main():
help=(
'Check if the AWS CLI is currently installed from a PKG '
'installer.'
- )
+ ),
)
check_parser.set_defaults(func=check)
uninstall_parser = subparsers.add_parser(
- 'uninstall',
- help='Uninstall the AWS CLI installed from the Mac PKG'
+ 'uninstall', help='Uninstall the AWS CLI installed from the Mac PKG'
)
uninstall_parser.set_defaults(func=uninstall)
diff --git a/scripts/make-bundle b/scripts/make-bundle
index 915e4dea0255..4a9ccc17ad1f 100755
--- a/scripts/make-bundle
+++ b/scripts/make-bundle
@@ -12,15 +12,15 @@ interface for those not familiar with the python
ecosystem.
"""
+
import os
-import sys
-import subprocess
import shutil
+import subprocess
+import sys
import tempfile
import zipfile
from contextlib import contextmanager
-
EXTRA_RUNTIME_DEPS = [
# Use an up to date virtualenv/pip/setuptools on > 2.6.
('virtualenv', '16.7.8'),
@@ -36,7 +36,8 @@ PIP_DOWNLOAD_ARGS = '--no-binary :all:'
# we're distributing a copy that works on all supported platforms.
CONSTRAINTS_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- 'assets', 'constraints-bundled.txt'
+ 'assets',
+ 'constraints-bundled.txt',
)
@@ -56,13 +57,15 @@ def cd(dirname):
def run(cmd):
sys.stdout.write("Running cmd: %s\n" % cmd)
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
stdout, stderr = p.communicate()
rc = p.wait()
if p.returncode != 0:
- raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (
- rc, cmd, stderr + stdout))
+ raise BadRCError(
+ "Bad rc (%s) for cmd '%s': %s" % (rc, cmd, stderr + stdout)
+ )
return stdout
@@ -80,17 +83,19 @@ def create_scratch_dir():
def download_package_tarballs(dirname, packages):
with cd(dirname):
for package, package_version in packages:
- run('%s -m pip download %s==%s %s' % (
- sys.executable, package, package_version, PIP_DOWNLOAD_ARGS
- ))
+ run(
+ '%s -m pip download %s==%s %s'
+ % (sys.executable, package, package_version, PIP_DOWNLOAD_ARGS)
+ )
def download_cli_deps(scratch_dir):
- awscli_dir = os.path.dirname(
- os.path.dirname(os.path.abspath(__file__)))
+ awscli_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with cd(scratch_dir):
- run('pip download -c %s %s %s' % (
- CONSTRAINTS_FILE, PIP_DOWNLOAD_ARGS, awscli_dir))
+ run(
+ 'pip download -c %s %s %s'
+ % (CONSTRAINTS_FILE, PIP_DOWNLOAD_ARGS, awscli_dir)
+ )
def _remove_cli_zip(scratch_dir):
@@ -100,20 +105,21 @@ def _remove_cli_zip(scratch_dir):
def add_cli_sdist(scratch_dir):
- awscli_dir = os.path.dirname(
- os.path.dirname(os.path.abspath(__file__)))
+ awscli_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.exists(os.path.join(awscli_dir, 'dist')):
shutil.rmtree(os.path.join(awscli_dir, 'dist'))
with cd(awscli_dir):
run('%s setup.py sdist' % sys.executable)
filename = os.listdir('dist')[0]
- shutil.move(os.path.join('dist', filename),
- os.path.join(scratch_dir, filename))
+ shutil.move(
+ os.path.join('dist', filename), os.path.join(scratch_dir, filename)
+ )
def create_bootstrap_script(scratch_dir):
install_script = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'install')
+ os.path.dirname(os.path.abspath(__file__)), 'install'
+ )
shutil.copy(install_script, os.path.join(scratch_dir, 'install'))
@@ -135,11 +141,13 @@ def zip_dir(scratch_dir):
def verify_preconditions():
# The pip version looks like:
# 'pip 1.4.1 from ....'
- pip_version = run(
- '%s -m pip --version' % sys.executable).strip().split()[1]
+ pip_version = (
+ run('%s -m pip --version' % sys.executable).strip().split()[1]
+ )
# Virtualenv version just has the version string: '1.14.5\n'
virtualenv_version = run(
- '%s -m virtualenv --version' % sys.executable).strip()
+ '%s -m virtualenv --version' % sys.executable
+ ).strip()
_min_version_required('9.0.1', pip_version, 'pip')
_min_version_required('15.1.0', virtualenv_version, 'virtualenv')
@@ -152,8 +160,10 @@ def _min_version_required(min_version, actual_version, name):
for min_version_part, actual_version_part in zip(min_split, actual_split):
if int(actual_version_part) >= int(min_version_part):
return
- raise ValueError("%s requires at least version %s, but version %s was "
- "found." % (name, min_version, actual_version))
+ raise ValueError(
+ "%s requires at least version %s, but version %s was "
+ "found." % (name, min_version, actual_version)
+ )
def main():
diff --git a/scripts/make-global-opts-documentation b/scripts/make-global-opts-documentation
index 3f8f345df62c..00097504ab5b 100755
--- a/scripts/make-global-opts-documentation
+++ b/scripts/make-global-opts-documentation
@@ -10,11 +10,13 @@ every subcommand's help docs.
import os
-from awscli.clidriver import create_clidriver
from awscli.clidocs import (
- EXAMPLES_DIR, GLOBAL_OPTIONS_FILE,
- GLOBAL_OPTIONS_SYNOPSIS_FILE, GlobalOptionsDocumenter
+ EXAMPLES_DIR,
+ GLOBAL_OPTIONS_FILE,
+ GLOBAL_OPTIONS_SYNOPSIS_FILE,
+ GlobalOptionsDocumenter,
)
+from awscli.clidriver import create_clidriver
def main():
diff --git a/scripts/new-change b/scripts/new-change
index 8b4905147318..43fc571dad37 100755
--- a/scripts/new-change
+++ b/scripts/new-change
@@ -36,21 +36,20 @@ You can then use the ``scripts/render-change`` to generate the
CHANGELOG.rst file.
"""
+
+import argparse
+import json
import os
+import random
import re
-import sys
-import json
import string
-import random
-import tempfile
import subprocess
-import argparse
-
+import sys
+import tempfile
VALID_CHARS = set(string.ascii_letters + string.digits)
CHANGES_DIR = os.path.join(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- '.changes'
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.changes'
)
TEMPLATE = """\
# Type should be one of: feature, bugfix, enhancement, api-change
@@ -90,7 +89,8 @@ def new_changelog_entry(args):
parsed_values = get_values_from_editor(args)
if has_empty_values(parsed_values):
sys.stderr.write(
- "Empty changelog values received, skipping entry creation.\n")
+ "Empty changelog values received, skipping entry creation.\n"
+ )
return 1
replace_issue_references(parsed_values, args.repo)
write_new_change(parsed_values)
@@ -98,9 +98,11 @@ def new_changelog_entry(args):
def has_empty_values(parsed_values):
- return not (parsed_values.get('type') and
- parsed_values.get('category') and
- parsed_values.get('description'))
+ return not (
+ parsed_values.get('type')
+ and parsed_values.get('category')
+ and parsed_values.get('description')
+ )
def all_values_provided(args):
@@ -131,9 +133,11 @@ def replace_issue_references(parsed, repo_name):
def linkify(match):
number = match.group()[1:]
- return (
- '`%s `__' % (
- match.group(), repo_name, number))
+ return '`%s `__' % (
+ match.group(),
+ repo_name,
+ number,
+ )
new_description = re.sub('#\d+', linkify, description)
parsed['description'] = new_description
@@ -151,13 +155,15 @@ def write_new_change(parsed_values):
category = parsed_values['category']
short_summary = ''.join(filter(lambda x: x in VALID_CHARS, category))
filename = '{type_name}-{summary}'.format(
- type_name=parsed_values['type'],
- summary=short_summary)
+ type_name=parsed_values['type'], summary=short_summary
+ )
possible_filename = os.path.join(
- dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))))
+ dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000)))
+ )
while os.path.isfile(possible_filename):
possible_filename = os.path.join(
- dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))))
+ dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000)))
+ )
with open(possible_filename, 'w') as f:
f.write(json.dumps(parsed_values, indent=2) + "\n")
@@ -198,15 +204,21 @@ def parse_filled_in_contents(contents):
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('-t', '--type', dest='change_type',
- default='', choices=('bugfix', 'feature',
- 'enhancement', 'api-change'))
- parser.add_argument('-c', '--category', dest='category',
- default='')
- parser.add_argument('-d', '--description', dest='description',
- default='')
- parser.add_argument('-r', '--repo', default='aws/aws-cli',
- help='Optional repo name, e.g: aws/aws-cli')
+ parser.add_argument(
+ '-t',
+ '--type',
+ dest='change_type',
+ default='',
+ choices=('bugfix', 'feature', 'enhancement', 'api-change'),
+ )
+ parser.add_argument('-c', '--category', dest='category', default='')
+ parser.add_argument('-d', '--description', dest='description', default='')
+ parser.add_argument(
+ '-r',
+ '--repo',
+ default='aws/aws-cli',
+ help='Optional repo name, e.g: aws/aws-cli',
+ )
args = parser.parse_args()
sys.exit(new_changelog_entry(args))
diff --git a/scripts/performance/benchmark-cp b/scripts/performance/benchmark-cp
index e63ae7cd8d56..8a2e58cf1403 100755
--- a/scripts/performance/benchmark-cp
+++ b/scripts/performance/benchmark-cp
@@ -1,7 +1,12 @@
#!/usr/bin/env python
-from benchmark_utils import summarize, clean
-from benchmark_utils import get_default_argparser, get_transfer_command
-from benchmark_utils import create_random_subfolder, benchmark_command
+from benchmark_utils import (
+ benchmark_command,
+ clean,
+ create_random_subfolder,
+ get_default_argparser,
+ get_transfer_command,
+ summarize,
+)
def benchmark_cp(args):
@@ -16,21 +21,26 @@ def benchmark_cp(args):
clean(destination, args.recursive)
benchmark_command(
- command, args.benchmark_script, args.summarize_script,
- args.result_dir, args.num_iterations, args.dry_run,
- cleanup=cleanup
+ command,
+ args.benchmark_script,
+ args.summarize_script,
+ args.result_dir,
+ args.num_iterations,
+ args.dry_run,
+ cleanup=cleanup,
)
if __name__ == "__main__":
parser = get_default_argparser()
parser.add_argument(
- '-s', '--source', required=True,
- help='A local path or s3 path.'
+ '-s', '--source', required=True, help='A local path or s3 path.'
)
parser.add_argument(
- '-d', '--destination', required=True,
+ '-d',
+ '--destination',
+ required=True,
help='A local path or s3 path. A directory will be created in this '
- 'location to copy to in the case of a recursive transfer.'
+ 'location to copy to in the case of a recursive transfer.',
)
benchmark_cp(parser.parse_args())
diff --git a/scripts/performance/benchmark-mv b/scripts/performance/benchmark-mv
index b6e679425edd..f08a61a3dffc 100755
--- a/scripts/performance/benchmark-mv
+++ b/scripts/performance/benchmark-mv
@@ -1,7 +1,13 @@
#!/usr/bin/env python
-from benchmark_utils import backup, copy, clean, get_default_argparser
-from benchmark_utils import create_random_subfolder, benchmark_command
-from benchmark_utils import get_transfer_command
+from benchmark_utils import (
+ backup,
+ benchmark_command,
+ clean,
+ copy,
+ create_random_subfolder,
+ get_default_argparser,
+ get_transfer_command,
+)
def benchmark_mv(args):
@@ -22,22 +28,27 @@ def benchmark_mv(args):
copy(backup_path, args.source, args.recursive)
benchmark_command(
- command, args.benchmark_script, args.summarize_script,
- args.result_dir, args.num_iterations, args.dry_run,
+ command,
+ args.benchmark_script,
+ args.summarize_script,
+ args.result_dir,
+ args.num_iterations,
+ args.dry_run,
upkeep=upkeep,
- cleanup=cleanup
+ cleanup=cleanup,
)
if __name__ == "__main__":
parser = get_default_argparser()
parser.add_argument(
- '-s', '--source', required=True,
- help='A local path or s3 path.'
+ '-s', '--source', required=True, help='A local path or s3 path.'
)
parser.add_argument(
- '-d', '--destination', required=True,
+ '-d',
+ '--destination',
+ required=True,
help='A local path or s3 path. A directory will be created in this '
- 'location to move to in the case of a recursive transfer.'
+ 'location to move to in the case of a recursive transfer.',
)
benchmark_mv(parser.parse_args())
diff --git a/scripts/performance/benchmark-rm b/scripts/performance/benchmark-rm
index 16009c696cda..946830a68de8 100755
--- a/scripts/performance/benchmark-rm
+++ b/scripts/performance/benchmark-rm
@@ -1,18 +1,29 @@
#!/usr/bin/env python
-from benchmark_utils import benchmark_command, get_transfer_command
-from benchmark_utils import backup, copy, clean, get_default_argparser
+from benchmark_utils import (
+ backup,
+ benchmark_command,
+ clean,
+ copy,
+ get_default_argparser,
+ get_transfer_command,
+)
def benchmark_rm(args):
command = get_transfer_command(
- 'rm %s' % args.target, args.recursive, args.quiet)
+ 'rm %s' % args.target, args.recursive, args.quiet
+ )
backup_path = backup(args.target, args.recursive)
benchmark_command(
- command, args.benchmark_script, args.summarize_script,
- args.result_dir, args.num_iterations, args.dry_run,
+ command,
+ args.benchmark_script,
+ args.summarize_script,
+ args.result_dir,
+ args.num_iterations,
+ args.dry_run,
upkeep=lambda: copy(backup_path, args.target, args.recursive),
- cleanup=lambda: clean(backup_path, args.recursive)
+ cleanup=lambda: clean(backup_path, args.recursive),
)
diff --git a/scripts/performance/benchmark_utils.py b/scripts/performance/benchmark_utils.py
index da48ae372d81..9b6ece2114e6 100644
--- a/scripts/performance/benchmark_utils.py
+++ b/scripts/performance/benchmark_utils.py
@@ -1,10 +1,11 @@
-import s3transfer
+import argparse
import os
-import subprocess
-import uuid
import shutil
-import argparse
+import subprocess
import tempfile
+import uuid
+
+import s3transfer
def summarize(script, result_dir, summary_dir):
@@ -145,9 +146,16 @@ def get_transfer_command(command, recursive, quiet):
return cli_command
-def benchmark_command(command, benchmark_script, summarize_script,
- output_dir, num_iterations, dry_run, upkeep=None,
- cleanup=None):
+def benchmark_command(
+ command,
+ benchmark_script,
+ summarize_script,
+ output_dir,
+ num_iterations,
+ dry_run,
+ upkeep=None,
+ cleanup=None,
+):
"""Benchmark several runs of a long-running command.
:type command: str
@@ -192,7 +200,10 @@ def benchmark_command(command, benchmark_script, summarize_script,
out_file = 'performance%s.csv' % i
out_file = os.path.join(performance_dir, out_file)
benchmark_args = [
- benchmark_script, command, '--output-file', out_file
+ benchmark_script,
+ command,
+ '--output-file',
+ out_file,
]
if not dry_run:
subprocess.check_call(benchmark_args)
@@ -210,42 +221,61 @@ def get_default_argparser():
"""Get an ArgumentParser with all the base benchmark arguments added in."""
parser = argparse.ArgumentParser()
parser.add_argument(
- '--no-cleanup', action='store_true', default=False,
- help='Do not remove the destination after the tests complete.'
+ '--no-cleanup',
+ action='store_true',
+ default=False,
+ help='Do not remove the destination after the tests complete.',
)
parser.add_argument(
- '--recursive', action='store_true', default=False,
- help='Indicates that this is a recursive transfer.'
+ '--recursive',
+ action='store_true',
+ default=False,
+ help='Indicates that this is a recursive transfer.',
)
benchmark_script = get_benchmark_script()
parser.add_argument(
- '--benchmark-script', default=benchmark_script,
+ '--benchmark-script',
+ default=benchmark_script,
required=benchmark_script is None,
- help=('The benchmark script to run the commands with. This should be '
- 'from s3transfer.')
+ help=(
+ 'The benchmark script to run the commands with. This should be '
+ 'from s3transfer.'
+ ),
)
summarize_script = get_summarize_script()
parser.add_argument(
- '--summarize-script', default=summarize_script,
+ '--summarize-script',
+ default=summarize_script,
required=summarize_script is None,
- help=('The summarize script to run the commands with. This should be '
- 'from s3transfer.')
+ help=(
+ 'The summarize script to run the commands with. This should be '
+ 'from s3transfer.'
+ ),
)
parser.add_argument(
- '-o', '--result-dir', default='results',
+ '-o',
+ '--result-dir',
+ default='results',
help='The directory to output performance results to. Existing '
- 'results will be deleted.'
+ 'results will be deleted.',
)
parser.add_argument(
- '--dry-run', default=False, action='store_true',
- help='If set, commands will only be printed out, not executed.'
+ '--dry-run',
+ default=False,
+ action='store_true',
+ help='If set, commands will only be printed out, not executed.',
)
parser.add_argument(
- '--quiet', default=False, action='store_true',
- help='If set, output is suppressed.'
+ '--quiet',
+ default=False,
+ action='store_true',
+ help='If set, output is suppressed.',
)
parser.add_argument(
- '-n', '--num-iterations', default=1, type=int,
- help='The number of times to run the test.'
+ '-n',
+ '--num-iterations',
+ default=1,
+ type=int,
+ help='The number of times to run the test.',
)
return parser
diff --git a/scripts/performance/perfcmp b/scripts/performance/perfcmp
index d2b2c8378e87..8e8d93170972 100755
--- a/scripts/performance/perfcmp
+++ b/scripts/performance/perfcmp
@@ -7,16 +7,16 @@ the run information::
$ ./perfcmp /results/2016-01-01-1111/ /results/2016-01-01-2222/
"""
-import os
-import json
+
import argparse
+import json
+import os
from colorama import Fore, Style
from tabulate import tabulate
class RunComparison(object):
-
MEMORY_FIELDS = ['average_memory', 'max_memory']
TIME_FIELDS = ['total_time']
# Fields that aren't memory or time fields, they require
@@ -66,7 +66,7 @@ class RunComparison(object):
def _format(self, field, value):
if field.startswith('std_dev_'):
- field = field[len('std_dev_'):]
+ field = field[len('std_dev_') :]
if field in self.MEMORY_FIELDS:
return self._human_readable_size(value)[0]
elif field in self.TIME_FIELDS:
@@ -85,14 +85,15 @@ class RunComparison(object):
return '%d Bytes' % bytes_int
for i, suffix in enumerate(hummanize_suffixes):
- unit = base ** (i+2)
+ unit = base ** (i + 2)
if round((bytes_int / unit) * base) < base:
return ['%.2f' % (base * bytes_int / unit), suffix]
def diff_percent(self, field):
diff_percent = (
- (self.new_summary[field] - self.old_summary[field]) /
- float(self.old_summary[field])) * 100
+ (self.new_summary[field] - self.old_summary[field])
+ / float(self.old_summary[field])
+ ) * 100
return diff_percent
@@ -105,29 +106,39 @@ def compare_runs(old_dir, new_dir):
old_summary = get_summary(old_run_dir)
new_summary = get_summary(new_run_dir)
comp = RunComparison(old_summary, new_summary)
- header = [Style.BRIGHT + dirname + Style.RESET_ALL,
- Style.BRIGHT + 'old' + Style.RESET_ALL,
- # Numeric suffix (MiB, GiB, sec).
- '',
- 'std_dev',
- Style.BRIGHT + 'new' + Style.RESET_ALL,
- # Numeric suffix (MiB, GiB, sec).
- '',
- 'std_dev',
- Style.BRIGHT + 'delta' + Style.RESET_ALL]
+ header = [
+ Style.BRIGHT + dirname + Style.RESET_ALL,
+ Style.BRIGHT + 'old' + Style.RESET_ALL,
+ # Numeric suffix (MiB, GiB, sec).
+ '',
+ 'std_dev',
+ Style.BRIGHT + 'new' + Style.RESET_ALL,
+ # Numeric suffix (MiB, GiB, sec).
+ '',
+ 'std_dev',
+ Style.BRIGHT + 'delta' + Style.RESET_ALL,
+ ]
rows = []
for field in comp.iter_field_names():
- row = [field, comp.old(field), comp.old_suffix(field),
- comp.old_stddev(field), comp.new(field),
- comp.new_suffix(field), comp.new_stddev(field)]
+ row = [
+ field,
+ comp.old(field),
+ comp.old_suffix(field),
+ comp.old_stddev(field),
+ comp.new(field),
+ comp.new_suffix(field),
+ comp.new_stddev(field),
+ ]
diff_percent = comp.diff_percent(field)
diff_percent_str = '%.2f%%' % diff_percent
if diff_percent < 0:
diff_percent_str = (
- Fore.GREEN + diff_percent_str + Style.RESET_ALL)
+ Fore.GREEN + diff_percent_str + Style.RESET_ALL
+ )
else:
diff_percent_str = (
- Fore.RED + diff_percent_str + Style.RESET_ALL)
+ Fore.RED + diff_percent_str + Style.RESET_ALL
+ )
row.append(diff_percent_str)
rows.append(row)
print(tabulate(rows, headers=header, tablefmt='plain'))
diff --git a/scripts/regenerate-configure/Dockerfile b/scripts/regenerate-configure/Dockerfile
index be4d15e3cc6b..231549955d99 100644
--- a/scripts/regenerate-configure/Dockerfile
+++ b/scripts/regenerate-configure/Dockerfile
@@ -23,4 +23,4 @@ RUN make
RUN make install
WORKDIR /build
-RUN autoreconf
\ No newline at end of file
+RUN autoreconf
diff --git a/scripts/regenerate-configure/regenerate-configure b/scripts/regenerate-configure/regenerate-configure
index d5e8bd9891b4..e9ea173e53f6 100755
--- a/scripts/regenerate-configure/regenerate-configure
+++ b/scripts/regenerate-configure/regenerate-configure
@@ -4,7 +4,6 @@ import re
from pathlib import Path
from subprocess import run
-
ROOT = Path(__file__).parents[2]
DOCKERFILE_PATH = ROOT / "scripts" / "regenerate-configure" / "Dockerfile"
IMAGE_RE = re.compile(r"sha256:(?P.*?)\s")
@@ -84,7 +83,4 @@ if __name__ == "__main__":
help="Do not clean up docker image and container. Useful for debugging.",
)
args = parser.parse_args()
- main(
- not args.no_cleanup,
- args.dockerfile_path
- )
+ main(not args.no_cleanup, args.dockerfile_path)
diff --git a/scripts/regenerate-lock-files b/scripts/regenerate-lock-files
index 56554c1e1542..90921311d4a9 100755
--- a/scripts/regenerate-lock-files
+++ b/scripts/regenerate-lock-files
@@ -14,15 +14,15 @@
"""This script is to programatically regenerate the requirements/*-lock.txt
files. In order to run it you need to have pip-tools installed into the
currently active virtual environment."""
+
import argparse
-import sys
import os
-from typing import List, ClassVar
-from pathlib import Path
+import sys
from dataclasses import dataclass
+from pathlib import Path
+from typing import ClassVar, List
-from utils import run, BadRCError
-
+from utils import BadRCError, run
ROOT = Path(__file__).parents[1]
IS_WINDOWS = sys.platform == "win32"
@@ -124,10 +124,10 @@ def show_file(path: Path):
def main(
- build_directory: Path,
- should_show_files: bool,
- include_sdist: bool,
- include_base: bool
+ build_directory: Path,
+ should_show_files: bool,
+ include_sdist: bool,
+ include_base: bool,
):
builder = LockFileBuilder(
source_directory=ROOT,
@@ -204,13 +204,24 @@ if __name__ == "__main__":
help=("Default base directory where output lock files to be written."),
)
parser.add_argument('--show-files', action='store_true')
- parser.add_argument('--no-show-files', action='store_false', dest='show_files')
+ parser.add_argument(
+ '--no-show-files', action='store_false', dest='show_files'
+ )
parser.set_defaults(show_files=False)
parser.add_argument('--include-sdist', action='store_true')
- parser.add_argument('--no-include-sdist', action='store_false', dest='include_sdist')
+ parser.add_argument(
+ '--no-include-sdist', action='store_false', dest='include_sdist'
+ )
parser.set_defaults(include_sdist=True)
parser.add_argument('--include-base', action='store_true')
- parser.add_argument('--no-include-base', action='store_false', dest='include_base')
+ parser.add_argument(
+ '--no-include-base', action='store_false', dest='include_base'
+ )
parser.set_defaults(include_base=False)
args = parser.parse_args()
- main(args.output_directory, args.show_files, args.include_sdist, args.include_base)
+ main(
+ args.output_directory,
+ args.show_files,
+ args.include_sdist,
+ args.include_base,
+ )
diff --git a/scripts/utils.py b/scripts/utils.py
index 3d5bfaa8c722..065c5d12f2f8 100644
--- a/scripts/utils.py
+++ b/scripts/utils.py
@@ -1,13 +1,14 @@
import contextlib
+import glob
import json
import os
import platform
import shutil
-import sys
import subprocess
+import sys
import tempfile
import zipfile
-import glob
+
class BadRCError(Exception):
pass
@@ -31,8 +32,9 @@ def run(cmd, cwd=None, env=None, echo=True):
stdout, stderr = p.communicate()
output = stdout.decode('utf-8') + stderr.decode('utf-8')
if p.returncode != 0:
- raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (
- p.returncode, cmd, output))
+ raise BadRCError(
+ "Bad rc (%s) for cmd '%s': %s" % (p.returncode, cmd, output)
+ )
return output