Skip to content

Commit

Permalink
rectify flake8 issues
Browse files Browse the repository at this point in the history
  • Loading branch information
taddes committed Sep 4, 2024
1 parent 9e2c787 commit c20fcb1
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 25 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ exclude =
__pycache__,
venv/
# E203: Whitespace before ':'. Addresses a discrepancy with Black slice formatting.
ignore = E203
extend-ignore = E203
max-line-length = 99
12 changes: 4 additions & 8 deletions tools/spanner/write_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def load(instance, db, coll_id, name):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance)
db = instance.database(db)
print("{name} Db: {db}".format(name=name, db=db))
print(f"{name} Db: {db}")
start = datetime.now()

def create_user(txn):
Expand All @@ -102,21 +102,17 @@ def create_user(txn):
try:
db.run_in_transaction(create_user)
print(
"{name} Created user (fxa_uid: {uid}, fxa_kid: {kid})".format(
name=name, uid=fxa_uid, kid=fxa_kid
)
f"{name} Created user (fxa_uid: {fxa_uid}, fxa_kid: {fxa_kid})"
)
except AlreadyExists:
print(
"{name} Existing user (fxa_uid: {uid}}, fxa_kid: {kid}})".format(
name=name, uid=fxa_uid, kid=fxa_kid
)
f"{name} Existing user (fxa_uid: {fxa_uid}), fxa_kid: {fxa_kid})"
)

# approximately 1892 bytes
rlen = 0

print("{name} Loading..".format(name=name))
print(f"{name} Loading..")
for j in range(BATCHES):
records = []
for i in range(BATCH_SIZE):
Expand Down
30 changes: 14 additions & 16 deletions tools/user_migration/old/dump_mysql.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def conf_db(dsn):
dsn = urlparse(dsn)
"""
if dsn.scheme != "mysql":
raise BadDSNException("Invalid MySQL dsn: {}".format(dsn))
raise BadDSNException(f"Invalid MySQL dsn: {dsn}")
"""
connection = connector.connect(
user=dsn.username,
Expand Down Expand Up @@ -138,7 +138,7 @@ def dump_user_collections(schema, dsn, args):
db = conf_db(dsn)
cursor = db.cursor()
out_file = args.output.rsplit(".", 1)
out_file_name = "{}_user_collections.{}".format(out_file[0], out_file[1])
out_file_name = f"{out_file[0]}_user_collections.{out_file[1]}"
writer = DataFileWriter(open(out_file_name, "wb"), DatumWriter(), schema)
sql = """
SELECT userid, collection, last_modified from user_collections
Expand All @@ -164,7 +164,7 @@ def dump_user_collections(schema, dsn, args):
pdb.set_trace()
print(ex)
row += 1
print("Dumped {} user_collection rows in {} seconds".format(row, time.time() - start))
print(f"Dumped {row} user_collection rows in {time.time() - start} seconds")
finally:
writer.close()
cursor.close()
Expand All @@ -181,13 +181,11 @@ def dump_rows(bso_number, chunk_offset, db, writer, args):
# ttl => expiry

ivre = re.compile(r'("IV": ?"[^"]+")')
print("Querying.... bso{} @{}".format(bso_number, chunk_offset))
sql = """
print(f"Querying.... bso{bso_number} @{chunk_offset}")
sql = f"""
SELECT userid, collection, id,
ttl, modified, payload,
sortindex from bso{} LIMIT {} OFFSET {}""".format(
bso_number, args.limit, chunk_offset
)
sortindex from bso{bso_number} LIMIT {args.limit} OFFSET {chunk_offset}"""
cursor = db.cursor()
user = None
row_count = 0
Expand Down Expand Up @@ -215,11 +213,11 @@ def dump_rows(bso_number, chunk_offset, db, writer, args):
)
row_count += 1
if (chunk_offset + row_count) % 1000 == 0:
print("BSO:{} Row: {}".format(bso_number, chunk_offset + row_count))
print(f"BSO:{bso_number} Row: {chunk_offset + row_count}")
if row_count >= MAX_ROWS:
break
except Exception as e:
print("Deadline hit at: {} ({})".format(chunk_offset + row_count, e))
print(f"Deadline hit at: {chunk_offset + row_count} ({e})")
finally:
cursor.close()
return row_count
Expand All @@ -228,7 +226,7 @@ def dump_rows(bso_number, chunk_offset, db, writer, args):
def count_rows(db, bso_num=0):
cursor = db.cursor()
try:
cursor.execute("SELECT Count(*) from bso{}".format(bso_num))
cursor.execute(f"SELECT Count(*) from bso{bso_num}")
return cursor.fetchone()[0]
finally:
cursor.close()
Expand All @@ -242,8 +240,8 @@ def dump_data(bso_number, schema, dsn, args):
out_file = args.output.rsplit(".", 1)
row_count = count_rows(db, bso_number)
for chunk in range(max(1, math.trunc(math.ceil(row_count / MAX_ROWS)))):
print("Dumping {} rows from bso#{} into chunk {}".format(row_count, bso_number, chunk))
out_file_name = "{}_{}_{}.{}".format(out_file[0], bso_number, hex(chunk), out_file[1])
print(f"Dumping {row_count} rows from bso#{bso_number} into chunk {chunk}")
out_file_name = f"{out_file[0]}_{bso_number}_{hex(chunk)}.{out_file[1]}"
writer = DataFileWriter(open(out_file_name, "wb"), DatumWriter(), schema)
rows = dump_rows(
bso_number=bso_number, chunk_offset=offset, db=db, writer=writer, args=args
Expand All @@ -266,15 +264,15 @@ def main():
read_in_token_file(args.token_file)
start = time.time()
for dsn in dsns:
print("Starting: {}".format(dsn))
print(f"Starting: {dsn}")
try:
if not args.skip_collections:
dump_user_collections(col_schema, dsn, args)
for bso_num in range(args.start_bso, args.end_bso + 1):
rows = dump_data(bso_num, schema, dsn, args)
except Exception as ex:
print("Could not process {}: {}".format(dsn, ex))
print("Dumped: {} rows in {} seconds".format(rows, time.time() - start))
print(f"Could not process {dsn}: {ex}")
print(f"Dumped: {rows} rows in {time.time() - start} seconds")


if __name__ == "__main__":
Expand Down

0 comments on commit c20fcb1

Please sign in to comment.