From c20fcb1577fd55366fb1c9685448882876956c7e Mon Sep 17 00:00:00 2001 From: Taddes Date: Wed, 4 Sep 2024 15:34:27 -0400 Subject: [PATCH] rectify flake8 issues --- .flake8 | 2 +- tools/spanner/write_batch.py | 12 ++++------- tools/user_migration/old/dump_mysql.py | 30 ++++++++++++-------------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/.flake8 b/.flake8 index 775aa6715a..8cf3f77841 100644 --- a/.flake8 +++ b/.flake8 @@ -4,5 +4,5 @@ exclude = __pycache__, venv/ # E203: Whitespace before ':'. Addresses a discrepancy with Black slice formatting. -ignore = E203 +extend-ignore = E203 max-line-length = 99 diff --git a/tools/spanner/write_batch.py b/tools/spanner/write_batch.py index 87b006bfdd..6fcd5d4d0e 100644 --- a/tools/spanner/write_batch.py +++ b/tools/spanner/write_batch.py @@ -80,7 +80,7 @@ def load(instance, db, coll_id, name): spanner_client = spanner.Client() instance = spanner_client.instance(instance) db = instance.database(db) - print("{name} Db: {db}".format(name=name, db=db)) + print(f"{name} Db: {db}") start = datetime.now() def create_user(txn): @@ -102,21 +102,17 @@ def create_user(txn): try: db.run_in_transaction(create_user) print( - "{name} Created user (fxa_uid: {uid}, fxa_kid: {kid})".format( - name=name, uid=fxa_uid, kid=fxa_kid - ) + f"{name} Created user (fxa_uid: {fxa_uid}, fxa_kid: {fxa_kid})" ) except AlreadyExists: print( - "{name} Existing user (fxa_uid: {uid}}, fxa_kid: {kid}})".format( - name=name, uid=fxa_uid, kid=fxa_kid - ) + f"{name} Existing user (fxa_uid: {fxa_uid}), fxa_kid: {fxa_kid})" ) # approximately 1892 bytes rlen = 0 - print("{name} Loading..".format(name=name)) + print(f"{name} Loading..") for j in range(BATCHES): records = [] for i in range(BATCH_SIZE): diff --git a/tools/user_migration/old/dump_mysql.py b/tools/user_migration/old/dump_mysql.py index 25b430414d..4532fd152c 100644 --- a/tools/user_migration/old/dump_mysql.py +++ b/tools/user_migration/old/dump_mysql.py @@ -66,7 +66,7 @@ def conf_db(dsn): dsn = urlparse(dsn) """ if dsn.scheme != "mysql": - raise BadDSNException("Invalid MySQL dsn: {}".format(dsn)) + raise BadDSNException(f"Invalid MySQL dsn: {dsn}") """ connection = connector.connect( user=dsn.username, @@ -138,7 +138,7 @@ def dump_user_collections(schema, dsn, args): db = conf_db(dsn) cursor = db.cursor() out_file = args.output.rsplit(".", 1) - out_file_name = "{}_user_collections.{}".format(out_file[0], out_file[1]) + out_file_name = f"{out_file[0]}_user_collections.{out_file[1]}" writer = DataFileWriter(open(out_file_name, "wb"), DatumWriter(), schema) sql = """ SELECT userid, collection, last_modified from user_collections @@ -164,7 +164,7 @@ def dump_user_collections(schema, dsn, args): pdb.set_trace() print(ex) row += 1 - print("Dumped {} user_collection rows in {} seconds".format(row, time.time() - start)) + print(f"Dumped {row} user_collection rows in {time.time() - start} seconds") finally: writer.close() cursor.close() @@ -181,13 +181,11 @@ def dump_rows(bso_number, chunk_offset, db, writer, args): # ttl => expiry ivre = re.compile(r'("IV": ?"[^"]+")') - print("Querying.... bso{} @{}".format(bso_number, chunk_offset)) - sql = """ + print(f"Querying.... bso{bso_number} @{chunk_offset}") + sql = f""" SELECT userid, collection, id, ttl, modified, payload, - sortindex from bso{} LIMIT {} OFFSET {}""".format( - bso_number, args.limit, chunk_offset - ) + sortindex from bso{bso_number} LIMIT {args.limit} OFFSET {chunk_offset}""" cursor = db.cursor() user = None row_count = 0 @@ -215,11 +213,11 @@ def dump_rows(bso_number, chunk_offset, db, writer, args): ) row_count += 1 if (chunk_offset + row_count) % 1000 == 0: - print("BSO:{} Row: {}".format(bso_number, chunk_offset + row_count)) + print(f"BSO:{bso_number} Row: {chunk_offset + row_count}") if row_count >= MAX_ROWS: break except Exception as e: - print("Deadline hit at: {} ({})".format(chunk_offset + row_count, e)) + print(f"Deadline hit at: {chunk_offset + row_count} ({e})") finally: cursor.close() return row_count @@ -228,7 +226,7 @@ def dump_rows(bso_number, chunk_offset, db, writer, args): def count_rows(db, bso_num=0): cursor = db.cursor() try: - cursor.execute("SELECT Count(*) from bso{}".format(bso_num)) + cursor.execute(f"SELECT Count(*) from bso{bso_num}") return cursor.fetchone()[0] finally: cursor.close() @@ -242,8 +240,8 @@ def dump_data(bso_number, schema, dsn, args): out_file = args.output.rsplit(".", 1) row_count = count_rows(db, bso_number) for chunk in range(max(1, math.trunc(math.ceil(row_count / MAX_ROWS)))): - print("Dumping {} rows from bso#{} into chunk {}".format(row_count, bso_number, chunk)) - out_file_name = "{}_{}_{}.{}".format(out_file[0], bso_number, hex(chunk), out_file[1]) + print(f"Dumping {row_count} rows from bso#{bso_number} into chunk {chunk}") + out_file_name = f"{out_file[0]}_{bso_number}_{hex(chunk)}.{out_file[1]}" writer = DataFileWriter(open(out_file_name, "wb"), DatumWriter(), schema) rows = dump_rows( bso_number=bso_number, chunk_offset=offset, db=db, writer=writer, args=args @@ -266,15 +264,15 @@ def main(): read_in_token_file(args.token_file) start = time.time() for dsn in dsns: - print("Starting: {}".format(dsn)) + print(f"Starting: {dsn}") try: if not args.skip_collections: dump_user_collections(col_schema, dsn, args) for bso_num in range(args.start_bso, args.end_bso + 1): rows = dump_data(bso_num, schema, dsn, args) except Exception as ex: - print("Could not process {}: {}".format(dsn, ex)) - print("Dumped: {} rows in {} seconds".format(rows, time.time() - start)) + print(f"Could not process {dsn}: {ex}") + print(f"Dumped: {rows} rows in {time.time() - start} seconds") if __name__ == "__main__":