Skip to content

Commit

Permalink
Fix bin scripts fo Python 3.
Browse files Browse the repository at this point in the history
Fixes S3 and EC2 scripts so that they are valid Python 3. Some things may
still not work, but the scripts are now capable of running under Python 3.
This mostly involved updates to `print` statements.
  • Loading branch information
danielgtaylor committed Aug 4, 2014
1 parent 2ffb00a commit bc126af
Show file tree
Hide file tree
Showing 7 changed files with 56 additions and 50 deletions.
4 changes: 2 additions & 2 deletions bin/fetch_file
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ The URI can be either an HTTP URL, or "s3://bucket_name/key_name"
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
exit(1)
raise SystemExit(1)
from boto.utils import fetch_file
f = fetch_file(args[0])
if options.outfile:
open(options.outfile, "w").write(f.read())
else:
print f.read()
print(f.read())
14 changes: 7 additions & 7 deletions bin/glacier
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ COMMANDS = ('vaults', 'jobs', 'upload')


def usage():
print """
print("""
glacier <command> [args]
Commands
Expand Down Expand Up @@ -78,7 +78,7 @@ glacier <command> [args]
Examples :
glacier upload pics *.jpg
glacier upload pics a.jpg b.jpg
"""
""")
sys.exit()


Expand All @@ -89,20 +89,20 @@ def connect(region, debug_level=0, access_key=None, secret_key=None):
aws_secret_access_key=secret_key,
debug=debug_level)
if layer2 is None:
print 'Invalid region (%s)' % region
print('Invalid region (%s)' % region)
sys.exit(1)
return layer2


def list_vaults(region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
for vault in layer2.list_vaults():
print vault.arn
print(vault.arn)


def list_jobs(vault_name, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
print layer2.layer1.list_jobs(vault_name)
print(layer2.layer1.list_jobs(vault_name))


def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
Expand All @@ -111,7 +111,7 @@ def upload_files(vault_name, filenames, region, access_key=None, secret_key=None
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
if isfile(filename):
print 'Uploading %s to %s' % (filename, vault_name)
print('Uploading %s to %s' % (filename, vault_name))
glacier_vault.upload_archive(filename)


Expand All @@ -128,7 +128,7 @@ def main():
long_options = ['access_key=', 'secret_key=', 'region=']
try:
opts, args = getopt(argv, options, long_options)
except GetoptError, e:
except GetoptError as e:
usage()

# Parse agument
Expand Down
4 changes: 2 additions & 2 deletions bin/kill_instance
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def kill_instance(region, ids):
# Connect the region
ec2 = boto.connect_ec2(region=region)
for instance_id in ids:
print "Stopping instance: %s" % instance_id
print("Stopping instance: %s" % instance_id)
ec2.terminate_instances([instance_id])


Expand All @@ -29,7 +29,7 @@ if __name__ == "__main__":
region = r
break
else:
print "Region %s not found." % options.region
print("Region %s not found." % options.region)
sys.exit(1)

kill_instance(region, args)
8 changes: 4 additions & 4 deletions bin/launch_instance
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ if __name__ == "__main__":
region = r
break
else:
print "Region %s not found." % options.region
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
if not options.nocred and not options.role:
Expand Down Expand Up @@ -206,7 +206,7 @@ if __name__ == "__main__":
scr_url = "file://%s" % scr_url
try:
scriptuples.append((scr, add_script(scr_url)))
except Exception, e:
except Exception as e:
pass

user_data = boto.utils.write_mime_multipart(scriptuples, compress=True)
Expand Down Expand Up @@ -248,5 +248,5 @@ if __name__ == "__main__":
time.sleep(3)

if options.dns:
print "Public DNS name: %s" % instance.public_dns_name
print "Private DNS name: %s" % instance.private_dns_name
print("Public DNS name: %s" % instance.public_dns_name)
print("Private DNS name: %s" % instance.private_dns_name)
10 changes: 5 additions & 5 deletions bin/list_instances
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def main():
region = r
break
else:
print "Region %s not found." % options.region
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)

Expand Down Expand Up @@ -73,17 +73,17 @@ def main():
# List and print

if not options.tab:
print format_string % headers
print "-" * len(format_string % headers)
print(format_string % headers)
print("-" * len(format_string % headers))

for r in ec2.get_all_reservations(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
if options.tab:
print "\t".join(tuple(get_column(h, i) for h in headers))
print("\t".join(tuple(get_column(h, i) for h in headers)))
else:
print format_string % tuple(get_column(h, i) for h in headers)
print(format_string % tuple(get_column(h, i) for h in headers))


if __name__ == "__main__":
Expand Down
20 changes: 10 additions & 10 deletions bin/lss3
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def list_bucket(b, prefix=None, marker=None):
if not prefix.endswith("/"):
prefix = prefix + "/"
query = b.list(prefix=prefix, delimiter="/", marker=marker)
print "%s" % prefix
print("%s" % prefix)
else:
query = b.list(delimiter="/", marker=marker)

Expand All @@ -42,27 +42,27 @@ def list_bucket(b, prefix=None, marker=None):
elif g.permission == "FULL_CONTROL":
mode = "-rwxrwx"
if isinstance(k, Key):
print "%s\t%s\t%010s\t%s" % (mode, k.last_modified,
sizeof_fmt(size), k.name)
print("%s\t%s\t%010s\t%s" % (mode, k.last_modified,
sizeof_fmt(size), k.name))
else:
#If it's not a Key object, it doesn't have a last_modified time, so
#print nothing instead
print "%s\t%s\t%010s\t%s" % (mode, ' ' * 24,
sizeof_fmt(size), k.name)
print("%s\t%s\t%010s\t%s" % (mode, ' ' * 24,
sizeof_fmt(size), k.name))
total += size
print "=" * 80
print "\t\tTOTAL: \t%010s \t%i Files" % (sizeof_fmt(total), num)
print ("=" * 80)
print ("\t\tTOTAL: \t%010s \t%i Files" % (sizeof_fmt(total), num))


def list_buckets(s3, display_tags=False):
"""List all the buckets"""
for b in s3.get_all_buckets():
print b.name
print(b.name)
if display_tags:
try:
tags = b.get_tags()
for tag in tags[0]:
print " %s:%s" % (tag.key, tag.value)
print(" %s:%s" % (tag.key, tag.value))
except S3ResponseError as e:
if e.status != 404:
raise
Expand All @@ -87,7 +87,7 @@ def main():
sys.exit(0)

if options.tags:
print "-t option only works for the overall bucket list"
print("-t option only works for the overall bucket list")
sys.exit(1)

pairs = []
Expand Down
46 changes: 26 additions & 20 deletions bin/s3put
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ import sys
import os
import boto

from boto.compat import six

try:
# multipart portions copyright Fabian Topfstedt
# https://gist.github.com/924094
Expand All @@ -43,8 +45,12 @@ try:
except ImportError as err:
multipart_capable = False
usage_flag_multipart_capable = ""
if six.PY2:
attribute = 'message'
else:
attribute = 'msg'
usage_string_multipart_capable = '\n\n "' + \
err.message[len('No module named '):] + \
getattr(err, attribute)[len('No module named '):] + \
'" is missing for multipart support '


Expand Down Expand Up @@ -121,12 +127,12 @@ SYNOPSIS


def usage(status=1):
print usage_string
print(usage_string)
sys.exit(status)


def submit_cb(bytes_so_far, total_bytes):
print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes))


def get_key_name(fullpath, prefix, key_prefix):
Expand All @@ -145,12 +151,12 @@ def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
Uploads a part with retries.
"""
if debug == 1:
print "_upload_part(%s, %s, %s)" % (source_path, offset, bytes)
print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes))

def _upload(retries_left=amount_of_retries):
try:
if debug == 1:
print 'Start uploading part #%d ...' % part_num
print('Start uploading part #%d ...' % part_num)
conn = S3Connection(aws_key, aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
Expand All @@ -161,21 +167,21 @@ def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
mp.upload_part_from_file(fp=fp, part_num=part_num,
cb=cb, num_cb=num_cb)
break
except Exception, exc:
except Exception as exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
print 'Failed uploading part #%d' % part_num
print('Failed uploading part #%d' % part_num)
raise exc
else:
if debug == 1:
print '... Uploaded part #%d' % part_num
print('... Uploaded part #%d' % part_num)

_upload()

def check_valid_region(conn, region):
if conn is None:
print 'Invalid region (%s)' % region
print('Invalid region (%s)' % region)
sys.exit(1)

def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
Expand Down Expand Up @@ -312,7 +318,7 @@ def main():
if multipart_capable:
multipart_requested = True
else:
print "multipart upload requested but not capable"
print("multipart upload requested but not capable")
sys.exit(4)
if o == '--region':
regions = boto.s3.regions()
Expand All @@ -327,7 +333,7 @@ def main():
usage(2)

if not bucket_name:
print "bucket name is required!"
print("bucket name is required!")
usage(3)

connect_args = {
Expand All @@ -352,23 +358,23 @@ def main():

# Classic region will be '', any other will have a name
if location:
print 'Bucket exists in %s but no host or region given!' % location
print('Bucket exists in %s but no host or region given!' % location)

# Override for EU, which is really Ireland according to the docs
if location == 'EU':
location = 'eu-west-1'

print 'Automatically setting region to %s' % location
print('Automatically setting region to %s' % location)

# Here we create a new connection, and then take the existing
# bucket and set it to use the new connection
c = boto.s3.connect_to_region(location, **connect_args)
c.debug = debug
b.connection = c
except Exception, e:
except Exception as e:
if debug > 0:
print e
print 'Could not get bucket region info, skipping...'
print(e)
print('Could not get bucket region info, skipping...')

existing_keys_to_check_against = []
files_to_check_for_upload = []
Expand All @@ -379,7 +385,7 @@ def main():
if os.path.isdir(path):
if no_overwrite:
if not quiet:
print 'Getting list of existing keys to check against'
print('Getting list of existing keys to check against')
for key in b.list(get_key_name(path, prefix, key_prefix)):
existing_keys_to_check_against.append(key.name)
for root, dirs, files in os.walk(path):
Expand All @@ -400,19 +406,19 @@ def main():

# we are trying to upload something unknown
else:
print "I don't know what %s is, so i can't upload it" % path
print("I don't know what %s is, so i can't upload it" % path)

for fullpath in files_to_check_for_upload:
key_name = get_key_name(fullpath, prefix, key_prefix)

if no_overwrite and key_name in existing_keys_to_check_against:
if b.get_key(key_name):
if not quiet:
print 'Skipping %s as it exists in s3' % fullpath
print('Skipping %s as it exists in s3' % fullpath)
continue

if not quiet:
print 'Copying %s to %s/%s' % (fullpath, bucket_name, key_name)
print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name))

if not no_op:
# 0-byte files don't work and also don't need multipart upload
Expand Down

0 comments on commit bc126af

Please sign in to comment.