From c54dfa49dda6a7b3389dc230726293af3ffc68a3 Mon Sep 17 00:00:00 2001 From: Utkarsh Gupta Date: Mon, 30 May 2022 20:05:19 +0530 Subject: [PATCH] update black to 22.3.0 (#2171) --- benchmarks/cluster_async.py | 8 +- dev_requirements.txt | 2 +- redis/asyncio/cluster.py | 2 +- redis/asyncio/connection.py | 2 +- redis/backoff.py | 6 +- redis/commands/core.py | 131 +++++----------------------- redis/commands/json/__init__.py | 6 +- redis/commands/search/__init__.py | 12 +-- redis/commands/search/commands.py | 21 +---- redis/commands/search/field.py | 11 +-- redis/connection.py | 19 ++-- redis/sentinel.py | 9 +- tests/test_asyncio/test_commands.py | 19 ++-- tests/test_asyncio/test_encoding.py | 10 +-- tests/test_asyncio/test_json.py | 11 +-- tests/test_asyncio/test_pipeline.py | 5 +- tests/test_asyncio/test_pubsub.py | 4 +- tests/test_asyncio/test_search.py | 45 +++------- tests/test_asyncio/test_sentinel.py | 2 +- tests/test_cluster.py | 64 +++----------- tests/test_command_parser.py | 8 +- tests/test_commands.py | 100 ++++++--------------- tests/test_connection_pool.py | 70 ++++----------- tests/test_encoding.py | 6 +- tests/test_function.py | 4 +- tests/test_json.py | 23 ++--- tests/test_pipeline.py | 5 +- tests/test_search.py | 74 ++++------------ tests/test_sentinel.py | 2 +- 29 files changed, 156 insertions(+), 525 deletions(-) diff --git a/benchmarks/cluster_async.py b/benchmarks/cluster_async.py index aec3f1c403..fd2ab4603f 100644 --- a/benchmarks/cluster_async.py +++ b/benchmarks/cluster_async.py @@ -206,8 +206,8 @@ async def main(loop, gather=None): host=host, port=port, password=password, - max_connections=2 ** 31, - max_connections_per_node=2 ** 31, + max_connections=2**31, + max_connections_per_node=2**31, readonly=False, reinitialize_steps=count, skip_full_coverage_check=True, @@ -224,7 +224,7 @@ async def main(loop, gather=None): password=password, state_reload_interval=count, idle_connection_timeout=count, - pool_maxsize=2 ** 31, + pool_maxsize=2**31, ) print(f"{loop} {gather} {await warmup(aiorc)} aioredis-cluster") print(await run(aiorc, gather=gather)) @@ -238,7 +238,7 @@ async def main(loop, gather=None): reinitialize_steps=count, read_from_replicas=False, decode_responses=False, - max_connections=2 ** 31, + max_connections=2**31, ) as rca: print(f"{loop} {gather} {await warmup(rca)} redispy") print(await run(rca, gather=gather)) diff --git a/dev_requirements.txt b/dev_requirements.txt index 942edd6d0f..31ae26ebea 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,5 +1,5 @@ click==8.0.4 -black==21.11b1 +black==22.3.0 flake8==4.0.1 flynt~=0.69.0 isort==5.10.1 diff --git a/redis/asyncio/cluster.py b/redis/asyncio/cluster.py index c4e01d619a..39aa536de1 100644 --- a/redis/asyncio/cluster.py +++ b/redis/asyncio/cluster.py @@ -746,7 +746,7 @@ def __init__( host: str, port: int, server_type: Optional[str] = None, - max_connections: int = 2 ** 31, + max_connections: int = 2**31, connection_class: Type[Connection] = Connection, response_callbacks: Dict[str, Any] = RedisCluster.RESPONSE_CALLBACKS, **connection_kwargs: Any, diff --git a/redis/asyncio/connection.py b/redis/asyncio/connection.py index d9b09746ba..38465fc0d7 100644 --- a/redis/asyncio/connection.py +++ b/redis/asyncio/connection.py @@ -1390,7 +1390,7 @@ def __init__( max_connections: Optional[int] = None, **connection_kwargs, ): - max_connections = max_connections or 2 ** 31 + max_connections = max_connections or 2**31 if not isinstance(max_connections, int) or max_connections < 0: raise ValueError('"max_connections" must be a positive integer') diff --git a/redis/backoff.py b/redis/backoff.py index cbb4e73779..5ccdb919f3 100644 --- a/redis/backoff.py +++ b/redis/backoff.py @@ -49,7 +49,7 @@ def __init__(self, cap, base): self._base = base def compute(self, failures): - return min(self._cap, self._base * 2 ** failures) + return min(self._cap, self._base * 2**failures) class FullJitterBackoff(AbstractBackoff): @@ -64,7 +64,7 @@ def __init__(self, cap, base): self._base = base def compute(self, failures): - return random.uniform(0, min(self._cap, self._base * 2 ** failures)) + return random.uniform(0, min(self._cap, self._base * 2**failures)) class EqualJitterBackoff(AbstractBackoff): @@ -79,7 +79,7 @@ def __init__(self, cap, base): self._base = base def compute(self, failures): - temp = min(self._cap, self._base * 2 ** failures) / 2 + temp = min(self._cap, self._base * 2**failures) / 2 return temp + random.uniform(0, temp) diff --git a/redis/commands/core.py b/redis/commands/core.py index ad7d7a6a94..771fed9e0c 100644 --- a/redis/commands/core.py +++ b/redis/commands/core.py @@ -502,10 +502,7 @@ def client_info(self, **kwargs) -> ResponseT: return self.execute_command("CLIENT INFO", **kwargs) def client_list( - self, - _type: Union[str, None] = None, - client_id: List[EncodableT] = [], - **kwargs, + self, _type: Union[str, None] = None, client_id: List[EncodableT] = [], **kwargs ) -> ResponseT: """ Returns a list of currently connected clients. @@ -548,9 +545,7 @@ def client_getredir(self, **kwargs) -> ResponseT: return self.execute_command("CLIENT GETREDIR", **kwargs) def client_reply( - self, - reply: Union[Literal["ON"], Literal["OFF"], Literal["SKIP"]], - **kwargs, + self, reply: Union[Literal["ON"], Literal["OFF"], Literal["SKIP"]], **kwargs ) -> ResponseT: """ Enable and disable redis server replies. @@ -696,10 +691,7 @@ def client_setname(self, name: str, **kwargs) -> ResponseT: return self.execute_command("CLIENT SETNAME", name, **kwargs) def client_unblock( - self, - client_id: int, - error: bool = False, - **kwargs, + self, client_id: int, error: bool = False, **kwargs ) -> ResponseT: """ Unblocks a connection by its client id. @@ -1475,12 +1467,7 @@ def bitfield( """ return BitFieldOperation(self, key, default_overflow=default_overflow) - def bitop( - self, - operation: str, - dest: KeyT, - *keys: KeyT, - ) -> ResponseT: + def bitop(self, operation: str, dest: KeyT, *keys: KeyT) -> ResponseT: """ Perform a bitwise operation using ``operation`` between ``keys`` and store the result in ``dest``. @@ -1826,11 +1813,7 @@ def keys(self, pattern: PatternT = "*", **kwargs) -> ResponseT: return self.execute_command("KEYS", pattern, **kwargs) def lmove( - self, - first_list: str, - second_list: str, - src: str = "LEFT", - dest: str = "RIGHT", + self, first_list: str, second_list: str, src: str = "LEFT", dest: str = "RIGHT" ) -> ResponseT: """ Atomically returns and removes the first/last element of a list, @@ -1996,12 +1979,7 @@ def pexpiretime(self, key: str) -> int: """ return self.execute_command("PEXPIRETIME", key) - def psetex( - self, - name: KeyT, - time_ms: ExpiryT, - value: EncodableT, - ): + def psetex(self, name: KeyT, time_ms: ExpiryT, value: EncodableT): """ Set the value of key ``name`` to ``value`` that expires in ``time_ms`` milliseconds. ``time_ms`` can be represented by an integer or a Python @@ -2022,10 +2000,7 @@ def pttl(self, name: KeyT) -> ResponseT: return self.execute_command("PTTL", name) def hrandfield( - self, - key: str, - count: int = None, - withvalues: bool = False, + self, key: str, count: int = None, withvalues: bool = False ) -> ResponseT: """ Return a random field from the hash value stored at key. @@ -2240,12 +2215,7 @@ def setnx(self, name: KeyT, value: EncodableT) -> ResponseT: """ return self.execute_command("SETNX", name, value) - def setrange( - self, - name: KeyT, - offset: int, - value: EncodableT, - ) -> ResponseT: + def setrange(self, name: KeyT, offset: int, value: EncodableT) -> ResponseT: """ Overwrite bytes in the value of ``name`` starting at ``offset`` with ``value``. If ``offset`` plus the length of ``value`` exceeds the @@ -3259,10 +3229,7 @@ def smembers(self, name: str) -> Union[Awaitable[list], list]: return self.execute_command("SMEMBERS", name) def smismember( - self, - name: str, - values: List, - *args: List, + self, name: str, values: List, *args: List ) -> Union[Awaitable[List[bool]], List[bool]]: """ Return whether each value in ``values`` is a member of the set ``name`` @@ -3291,9 +3258,7 @@ def spop(self, name: str, count: Optional[int] = None) -> Union[str, List, None] return self.execute_command("SPOP", name, *args) def srandmember( - self, - name: str, - number: Optional[int] = None, + self, name: str, number: Optional[int] = None ) -> Union[str, List, None]: """ If ``number`` is None, returns a random member of set ``name``. @@ -3346,12 +3311,7 @@ class StreamCommands(CommandsProtocol): see: https://redis.io/topics/streams-intro """ - def xack( - self, - name: KeyT, - groupname: GroupT, - *ids: StreamIdT, - ) -> ResponseT: + def xack(self, name: KeyT, groupname: GroupT, *ids: StreamIdT) -> ResponseT: """ Acknowledges the successful processing of one or more messages. name: name of the stream. @@ -3576,10 +3536,7 @@ def xgroup_create( return self.execute_command(*pieces) def xgroup_delconsumer( - self, - name: KeyT, - groupname: GroupT, - consumername: ConsumerT, + self, name: KeyT, groupname: GroupT, consumername: ConsumerT ) -> ResponseT: """ Remove a specific consumer from a consumer group. @@ -3604,10 +3561,7 @@ def xgroup_destroy(self, name: KeyT, groupname: GroupT) -> ResponseT: return self.execute_command("XGROUP DESTROY", name, groupname) def xgroup_createconsumer( - self, - name: KeyT, - groupname: GroupT, - consumername: ConsumerT, + self, name: KeyT, groupname: GroupT, consumername: ConsumerT ) -> ResponseT: """ Consumers in a consumer group are auto-created every time a new @@ -4052,12 +4006,7 @@ def zdiffstore(self, dest: KeyT, keys: KeysT) -> ResponseT: pieces = [len(keys), *keys] return self.execute_command("ZDIFFSTORE", dest, *pieces) - def zincrby( - self, - name: KeyT, - amount: float, - value: EncodableT, - ) -> ResponseT: + def zincrby(self, name: KeyT, amount: float, value: EncodableT) -> ResponseT: """ Increment the score of ``value`` in sorted set ``name`` by ``amount`` @@ -4066,10 +4015,7 @@ def zincrby( return self.execute_command("ZINCRBY", name, amount, value) def zinter( - self, - keys: KeysT, - aggregate: Union[str, None] = None, - withscores: bool = False, + self, keys: KeysT, aggregate: Union[str, None] = None, withscores: bool = False ) -> ResponseT: """ Return the intersect of multiple sorted sets specified by ``keys``. @@ -4127,11 +4073,7 @@ def zlexcount(self, name, min, max): """ return self.execute_command("ZLEXCOUNT", name, min, max) - def zpopmax( - self, - name: KeyT, - count: Union[int, None] = None, - ) -> ResponseT: + def zpopmax(self, name: KeyT, count: Union[int, None] = None) -> ResponseT: """ Remove and return up to ``count`` members with the highest scores from the sorted set ``name``. @@ -4142,11 +4084,7 @@ def zpopmax( options = {"withscores": True} return self.execute_command("ZPOPMAX", name, *args, **options) - def zpopmin( - self, - name: KeyT, - count: Union[int, None] = None, - ) -> ResponseT: + def zpopmin(self, name: KeyT, count: Union[int, None] = None) -> ResponseT: """ Remove and return up to ``count`` members with the lowest scores from the sorted set ``name``. @@ -4158,10 +4096,7 @@ def zpopmin( return self.execute_command("ZPOPMIN", name, *args, **options) def zrandmember( - self, - key: KeyT, - count: int = None, - withscores: bool = False, + self, key: KeyT, count: int = None, withscores: bool = False ) -> ResponseT: """ Return a random element from the sorted set value stored at key. @@ -4675,11 +4610,7 @@ def zunionstore( """ return self._zaggregate("ZUNIONSTORE", dest, keys, aggregate) - def zmscore( - self, - key: KeyT, - members: List[str], - ) -> ResponseT: + def zmscore(self, key: KeyT, members: List[str]) -> ResponseT: """ Returns the scores associated with the specified members in the sorted set stored at key. @@ -5264,11 +5195,7 @@ def geoadd( return self.execute_command("GEOADD", *pieces) def geodist( - self, - name: KeyT, - place1: FieldT, - place2: FieldT, - unit: Union[str, None] = None, + self, name: KeyT, place1: FieldT, place2: FieldT, unit: Union[str, None] = None ) -> ResponseT: """ Return the distance between ``place1`` and ``place2`` members of the @@ -5407,10 +5334,7 @@ def georadiusbymember( ) def _georadiusgeneric( - self, - command: str, - *args: EncodableT, - **kwargs: Union[EncodableT, None], + self, command: str, *args: EncodableT, **kwargs: Union[EncodableT, None] ) -> ResponseT: pieces = list(args) if kwargs["unit"] and kwargs["unit"] not in ("m", "km", "mi", "ft"): @@ -5418,9 +5342,7 @@ def _georadiusgeneric( elif kwargs["unit"]: pieces.append(kwargs["unit"]) else: - pieces.append( - "m", - ) + pieces.append("m") if kwargs["any"] and kwargs["count"] is None: raise DataError("``any`` can't be provided without ``count``") @@ -5577,10 +5499,7 @@ def geosearchstore( ) def _geosearchgeneric( - self, - command: str, - *args: EncodableT, - **kwargs: Union[EncodableT, None], + self, command: str, *args: EncodableT, **kwargs: Union[EncodableT, None] ) -> ResponseT: pieces = list(args) @@ -5814,9 +5733,7 @@ class FunctionCommands: """ def function_load( - self, - code: str, - replace: Optional[bool] = False, + self, code: str, replace: Optional[bool] = False ) -> Union[Awaitable[str], str]: """ Load a library to Redis. diff --git a/redis/commands/json/__init__.py b/redis/commands/json/__init__.py index 39983be9cb..7d55023e1e 100644 --- a/redis/commands/json/__init__.py +++ b/redis/commands/json/__init__.py @@ -19,11 +19,7 @@ class JSON(JSONCommands): """ def __init__( - self, - client, - version=None, - decoder=JSONDecoder(), - encoder=JSONEncoder(), + self, client, version=None, decoder=JSONDecoder(), encoder=JSONEncoder() ): """ Create a client for talking to json. diff --git a/redis/commands/search/__init__.py b/redis/commands/search/__init__.py index b1c0e8be73..923711b8c4 100644 --- a/redis/commands/search/__init__.py +++ b/redis/commands/search/__init__.py @@ -59,20 +59,12 @@ def add_document( if self.current_chunk >= self.chunk_size: self.commit() - def add_document_hash( - self, - doc_id, - score=1.0, - replace=False, - ): + def add_document_hash(self, doc_id, score=1.0, replace=False): """ Add a hash to the batch query """ self.client._add_document_hash( - doc_id, - conn=self._pipeline, - score=score, - replace=replace, + doc_id, conn=self._pipeline, score=score, replace=replace ) self.current_chunk += 1 self.total += 1 diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py index 10b57624cd..bf6614777e 100644 --- a/redis/commands/search/commands.py +++ b/redis/commands/search/commands.py @@ -216,12 +216,7 @@ def _add_document( return self.execute_command(*args) def _add_document_hash( - self, - doc_id, - conn=None, - score=1.0, - language=None, - replace=False, + self, doc_id, conn=None, score=1.0, language=None, replace=False ): """ Internal add_document_hash used for both batch and single doc indexing @@ -293,13 +288,7 @@ def add_document( **fields, ) - def add_document_hash( - self, - doc_id, - score=1.0, - language=None, - replace=False, - ): + def add_document_hash(self, doc_id, score=1.0, language=None, replace=False): """ Add a hash document to the index. @@ -313,11 +302,7 @@ def add_document_hash( - **language**: Specify the language used for document tokenization. """ # noqa return self._add_document_hash( - doc_id, - conn=None, - score=score, - language=language, - replace=replace, + doc_id, conn=None, score=score, language=language, replace=replace ) def delete_document(self, doc_id, conn=None, delete_actual_document=False): diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py index 14328e9d3b..89ed97357a 100644 --- a/redis/commands/search/field.py +++ b/redis/commands/search/field.py @@ -108,11 +108,7 @@ class TagField(Field): CASESENSITIVE = "CASESENSITIVE" def __init__( - self, - name: str, - separator: str = ",", - case_sensitive: bool = False, - **kwargs, + self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs ): args = [Field.TAG, self.SEPARATOR, separator] if case_sensitive: @@ -159,8 +155,5 @@ def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs): attr_li.extend([key, value]) Field.__init__( - self, - name, - args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], - **kwargs, + self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs ) diff --git a/redis/connection.py b/redis/connection.py index e0dcfc6323..1bc2ae1f4e 100755 --- a/redis/connection.py +++ b/redis/connection.py @@ -40,9 +40,7 @@ except ImportError: ssl_available = False -NONBLOCKING_EXCEPTION_ERROR_NUMBERS = { - BlockingIOError: errno.EWOULDBLOCK, -} +NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {BlockingIOError: errno.EWOULDBLOCK} if ssl_available: if hasattr(ssl, "SSLWantReadError"): @@ -385,10 +383,7 @@ def __del__(self): def on_connect(self, connection, **kwargs): self._sock = connection._sock self._socket_timeout = connection.socket_timeout - kwargs = { - "protocolError": InvalidResponse, - "replyError": self.parse_error, - } + kwargs = {"protocolError": InvalidResponse, "replyError": self.parse_error} # hiredis < 0.1.3 doesn't support functions that create exceptions if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: @@ -1035,8 +1030,7 @@ def _connect(self): staple_ctx = self.ssl_ocsp_context staple_ctx.set_ocsp_client_callback( - ocsp_staple_verifier, - self.ssl_ocsp_expected_cert, + ocsp_staple_verifier, self.ssl_ocsp_expected_cert ) # need another socket @@ -1119,10 +1113,7 @@ def __init__( self._buffer_cutoff = 6000 def repr_pieces(self): - pieces = [ - ("path", self.path), - ("db", self.db), - ] + pieces = [("path", self.path), ("db", self.db)] if self.client_name: pieces.append(("client_name", self.client_name)) return pieces @@ -1288,7 +1279,7 @@ class initializer. In the case of conflicting arguments, querystring def __init__( self, connection_class=Connection, max_connections=None, **connection_kwargs ): - max_connections = max_connections or 2 ** 31 + max_connections = max_connections or 2**31 if not isinstance(max_connections, int) or max_connections < 0: raise ValueError('"max_connections" must be a positive integer') diff --git a/redis/sentinel.py b/redis/sentinel.py index b3f14907d0..d35abaf514 100644 --- a/redis/sentinel.py +++ b/redis/sentinel.py @@ -51,10 +51,7 @@ def _connect_retry(self): raise SlaveNotFoundError # Never be here def connect(self): - return self.retry.call_with_retry( - self._connect_retry, - lambda error: None, - ) + return self.retry.call_with_retry(self._connect_retry, lambda error: None) def read_response(self, disable_decoding=False): try: @@ -213,9 +210,7 @@ def __repr__(self): sentinel_addresses = [] for sentinel in self.sentinels: sentinel_addresses.append( - "{host}:{port}".format_map( - sentinel.connection_pool.connection_kwargs, - ) + "{host}:{port}".format_map(sentinel.connection_pool.connection_kwargs) ) return f'{type(self).__name__}' diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py index 650ce27956..6fa702d51c 100644 --- a/tests/test_asyncio/test_commands.py +++ b/tests/test_asyncio/test_commands.py @@ -2534,17 +2534,14 @@ async def test_xclaim(self, r: redis.Redis): # reclaim the message as consumer1, but use the justid argument # which only returns message ids - assert ( - await r.xclaim( - stream, - group, - consumer1, - min_idle_time=0, - message_ids=(message_id,), - justid=True, - ) - == [message_id] - ) + assert await r.xclaim( + stream, + group, + consumer1, + min_idle_time=0, + message_ids=(message_id,), + justid=True, + ) == [message_id] @skip_if_server_version_lt("5.0.0") async def test_xclaim_trimmed(self, r: redis.Redis): diff --git a/tests/test_asyncio/test_encoding.py b/tests/test_asyncio/test_encoding.py index da2983738e..133ea3783c 100644 --- a/tests/test_asyncio/test_encoding.py +++ b/tests/test_asyncio/test_encoding.py @@ -68,18 +68,12 @@ async def test_list_encoding(self, r: redis.Redis): @pytest.mark.onlynoncluster class TestEncodingErrors: async def test_ignore(self, create_redis): - r = await create_redis( - decode_responses=True, - encoding_errors="ignore", - ) + r = await create_redis(decode_responses=True, encoding_errors="ignore") await r.set("a", b"foo\xff") assert await r.get("a") == "foo" async def test_replace(self, create_redis): - r = await create_redis( - decode_responses=True, - encoding_errors="replace", - ) + r = await create_redis(decode_responses=True, encoding_errors="replace") await r.set("a", b"foo\xff") assert await r.get("a") == "foo\ufffd" diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py index c203c6d15e..a045dd7c1a 100644 --- a/tests/test_asyncio/test_json.py +++ b/tests/test_asyncio/test_json.py @@ -155,16 +155,7 @@ async def test_arrindex(modclient: redis.Redis): @pytest.mark.redismod async def test_arrinsert(modclient: redis.Redis): await modclient.json().set("arr", Path.root_path(), [0, 4]) - assert 5 - -await modclient.json().arrinsert( - "arr", - Path.root_path(), - 1, - *[ - 1, - 2, - 3, - ], - ) + assert 5 - -await modclient.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3]) assert [0, 1, 2, 3, 4] == await modclient.json().get("arr") # test prepends diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py index 5bb1a8a4e0..50a1051f4f 100644 --- a/tests/test_asyncio/test_pipeline.py +++ b/tests/test_asyncio/test_pipeline.py @@ -37,10 +37,7 @@ async def test_pipeline(self, r): async def test_pipeline_memoryview(self, r): async with r.pipeline() as pipe: (pipe.set("a", memoryview(b"a1")).get("a")) - assert await pipe.execute() == [ - True, - b"a1", - ] + assert await pipe.execute() == [True, b"a1"] async def test_pipeline_length(self, r): async with r.pipeline() as pipe: diff --git a/tests/test_asyncio/test_pubsub.py b/tests/test_asyncio/test_pubsub.py index 4037040f7c..6c76bf334e 100644 --- a/tests/test_asyncio/test_pubsub.py +++ b/tests/test_asyncio/test_pubsub.py @@ -425,9 +425,7 @@ def message_handler(self, message): @pytest_asyncio.fixture() async def r(self, create_redis): - return await create_redis( - decode_responses=True, - ) + return await create_redis(decode_responses=True) async def test_channel_subscribe_unsubscribe(self, r: redis.Redis): p = r.pubsub() diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py index 69ae54d027..5aaa56f159 100644 --- a/tests/test_asyncio/test_search.py +++ b/tests/test_asyncio/test_search.py @@ -919,18 +919,14 @@ async def test_aggregations_groupby(modclient: redis.Redis): random_num=8, ) - req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count(), - ) + req = aggregations.AggregateRequest("redis").group_by("@parent", reducers.count()) res = (await modclient.ft().aggregate(req)).rows[0] assert res[1] == "redis" assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count_distinct("@title"), + "@parent", reducers.count_distinct("@title") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -938,8 +934,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count_distinctish("@title"), + "@parent", reducers.count_distinctish("@title") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -947,8 +942,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.sum("@random_num"), + "@parent", reducers.sum("@random_num") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -956,8 +950,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "21" # 10+8+3 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.min("@random_num"), + "@parent", reducers.min("@random_num") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -965,8 +958,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "3" # min(10,8,3) req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.max("@random_num"), + "@parent", reducers.max("@random_num") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -974,8 +966,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "10" # max(10,8,3) req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.avg("@random_num"), + "@parent", reducers.avg("@random_num") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -983,8 +974,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "7" # (10+3+8)/3 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.stddev("random_num"), + "@parent", reducers.stddev("random_num") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -992,8 +982,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "3.60555127546" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.quantile("@random_num", 0.5), + "@parent", reducers.quantile("@random_num", 0.5) ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -1001,8 +990,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == "8" # median of 3,8,10 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.tolist("@title"), + "@parent", reducers.tolist("@title") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -1010,16 +998,14 @@ async def test_aggregations_groupby(modclient: redis.Redis): assert res[3] == ["RediSearch", "RedisAI", "RedisJson"] req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.first_value("@title").alias("first"), + "@parent", reducers.first_value("@title").alias("first") ) res = (await modclient.ft().aggregate(req)).rows[0] assert res == ["parent", "redis", "first", "RediSearch"] req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.random_sample("@title", 2).alias("random"), + "@parent", reducers.random_sample("@title", 2).alias("random") ) res = (await modclient.ft().aggregate(req)).rows[0] @@ -1031,12 +1017,7 @@ async def test_aggregations_groupby(modclient: redis.Redis): @pytest.mark.redismod async def test_aggregations_sort_by_and_limit(modclient: redis.Redis): - await modclient.ft().create_index( - ( - TextField("t1"), - TextField("t2"), - ) - ) + await modclient.ft().create_index((TextField("t1"), TextField("t2"))) await modclient.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"}) await modclient.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"}) diff --git a/tests/test_asyncio/test_sentinel.py b/tests/test_asyncio/test_sentinel.py index cd6810c1b5..4130e67400 100644 --- a/tests/test_asyncio/test_sentinel.py +++ b/tests/test_asyncio/test_sentinel.py @@ -203,7 +203,7 @@ async def test_master_for(cluster, sentinel, master_ip): @pytest.mark.onlynoncluster async def test_slave_for(cluster, sentinel): cluster.slaves = [ - {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False}, + {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False} ] slave = sentinel.slave_for("mymaster", db=9) assert await slave.ping() diff --git a/tests/test_cluster.py b/tests/test_cluster.py index de41a107dc..c74af202d8 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -44,12 +44,7 @@ default_host = "127.0.0.1" default_port = 7000 default_cluster_slots = [ - [ - 0, - 8191, - ["127.0.0.1", 7000, "node_0"], - ["127.0.0.1", 7003, "node_3"], - ], + [0, 8191, ["127.0.0.1", 7000, "node_0"], ["127.0.0.1", 7003, "node_3"]], [8192, 16383, ["127.0.0.1", 7001, "node_1"], ["127.0.0.1", 7002, "node_2"]], ] @@ -905,16 +900,8 @@ def test_cluster_count_failure_report(self, r): @skip_if_redis_enterprise() def test_cluster_delslots(self): cluster_slots = [ - [ - 0, - 8191, - ["127.0.0.1", 7000, "node_0"], - ], - [ - 8192, - 16383, - ["127.0.0.1", 7001, "node_1"], - ], + [0, 8191, ["127.0.0.1", 7000, "node_0"]], + [8192, 16383, ["127.0.0.1", 7001, "node_1"]], ] r = get_mocked_redis_client( host=default_host, port=default_port, cluster_slots=cluster_slots @@ -2123,34 +2110,14 @@ def create_mocked_redis_node(host, port, **kwargs): """ if port == 7000: result = [ - [ - 0, - 5460, - ["127.0.0.1", 7000], - ["127.0.0.1", 7003], - ], - [ - 5461, - 10922, - ["127.0.0.1", 7001], - ["127.0.0.1", 7004], - ], + [0, 5460, ["127.0.0.1", 7000], ["127.0.0.1", 7003]], + [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]], ] elif port == 7001: result = [ - [ - 0, - 5460, - ["127.0.0.1", 7001], - ["127.0.0.1", 7003], - ], - [ - 5461, - 10922, - ["127.0.0.1", 7000], - ["127.0.0.1", 7004], - ], + [0, 5460, ["127.0.0.1", 7001], ["127.0.0.1", 7003]], + [5461, 10922, ["127.0.0.1", 7000], ["127.0.0.1", 7004]], ] else: result = [] @@ -2217,16 +2184,8 @@ def create_mocked_redis_node(host, port, **kwargs): def execute_command(*args, **kwargs): if args[0] == "CLUSTER SLOTS": return [ - [ - 0, - 8191, - ["127.0.0.1", 7001, "node_1"], - ], - [ - 8192, - 16383, - ["127.0.0.1", 7002, "node_2"], - ], + [0, 8191, ["127.0.0.1", 7001, "node_1"]], + [8192, 16383, ["127.0.0.1", 7002, "node_2"]], ] elif args[0] == "INFO": return {"cluster_enabled": True} @@ -2673,10 +2632,7 @@ def test_pipeline_readonly(self, r): with r.pipeline() as readonly_pipe: readonly_pipe.get("foo71").zrange("foo88", 0, 5, withscores=True) - assert readonly_pipe.execute() == [ - b"a1", - [(b"z1", 1.0), (b"z2", 4)], - ] + assert readonly_pipe.execute() == [b"a1", [(b"z1", 1.0), (b"z2", 4)]] def test_moved_redirection_on_slave_with_default(self, r): """ diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py index 1457e27531..134909fdcd 100644 --- a/tests/test_command_parser.py +++ b/tests/test_command_parser.py @@ -75,13 +75,7 @@ def test_get_moveable_keys(self, r): @skip_if_server_version_lt("7.0.0") def test_get_eval_keys_with_0_keys(self, r): commands_parser = CommandsParser(r) - args = [ - "EVAL", - "return {ARGV[1],ARGV[2]}", - 0, - "key1", - "key2", - ] + args = ["EVAL", "return {ARGV[1],ARGV[2]}", 0, "key1", "key2"] assert commands_parser.get_keys(r, *args) == [] def test_get_pubsub_keys(self, r): diff --git a/tests/test_commands.py b/tests/test_commands.py index a3972a5f22..d4a564c0ad 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -89,10 +89,7 @@ def teardown(): request.addfinalizer(teardown) assert r.acl_setuser( - username, - enabled=True, - passwords=["+strong_password"], - commands=["+acl"], + username, enabled=True, passwords=["+strong_password"], commands=["+acl"] ) assert r.auth(username=username, password="strong_password") is True @@ -128,11 +125,7 @@ def teardown(): request.addfinalizer(teardown) - r.acl_setuser( - username, - keys=["*"], - commands=["+set"], - ) + r.acl_setuser(username, keys=["*"], commands=["+set"]) assert r.acl_dryrun(username, "set", "key", "value") == b"OK" assert r.acl_dryrun(username, "get", "key").startswith( b"This user has no permissions to run the" @@ -3360,18 +3353,15 @@ def test_geosearch_with(self, r): (2.19093829393386841, 41.43379028184083523), ] ] - assert ( - r.geosearch( - "barcelona", - longitude=2.191, - latitude=41.433, - radius=1, - unit="km", - withdist=True, - withcoord=True, - ) - == [[b"place1", 0.0881, (2.19093829393386841, 41.43379028184083523)]] - ) + assert r.geosearch( + "barcelona", + longitude=2.191, + latitude=41.433, + radius=1, + unit="km", + withdist=True, + withcoord=True, + ) == [[b"place1", 0.0881, (2.19093829393386841, 41.43379028184083523)]] assert r.geosearch( "barcelona", longitude=2.191, @@ -3701,7 +3691,7 @@ def test_xack(self, r): def test_xadd(self, r): stream = "stream" message_id = r.xadd(stream, {"foo": "bar"}) - assert re.match(br"[0-9]+\-[0-9]+", message_id) + assert re.match(rb"[0-9]+\-[0-9]+", message_id) # explicit message id message_id = b"9999999999999999999-0" @@ -3847,17 +3837,14 @@ def test_xclaim(self, r): # reclaim the message as consumer1, but use the justid argument # which only returns message ids - assert ( - r.xclaim( - stream, - group, - consumer1, - min_idle_time=0, - message_ids=(message_id,), - justid=True, - ) - == [message_id] - ) + assert r.xclaim( + stream, + group, + consumer1, + min_idle_time=0, + message_ids=(message_id,), + justid=True, + ) == [message_id] @skip_if_server_version_lt("7.0.0") def test_xclaim_trimmed(self, r): @@ -4228,34 +4215,17 @@ def test_xread(self, r): expected = [ [ stream.encode(), - [ - get_stream_message(r, stream, m1), - get_stream_message(r, stream, m2), - ], + [get_stream_message(r, stream, m1), get_stream_message(r, stream, m2)], ] ] # xread starting at 0 returns both messages assert r.xread(streams={stream: 0}) == expected - expected = [ - [ - stream.encode(), - [ - get_stream_message(r, stream, m1), - ], - ] - ] + expected = [[stream.encode(), [get_stream_message(r, stream, m1)]]] # xread starting at 0 and count=1 returns only the first message assert r.xread(streams={stream: 0}, count=1) == expected - expected = [ - [ - stream.encode(), - [ - get_stream_message(r, stream, m2), - ], - ] - ] + expected = [[stream.encode(), [get_stream_message(r, stream, m2)]]] # xread starting at m1 returns only the second message assert r.xread(streams={stream: m1}) == expected @@ -4274,10 +4244,7 @@ def test_xreadgroup(self, r): expected = [ [ stream.encode(), - [ - get_stream_message(r, stream, m1), - get_stream_message(r, stream, m2), - ], + [get_stream_message(r, stream, m1), get_stream_message(r, stream, m2)], ] ] # xread starting at 0 returns both messages @@ -4286,14 +4253,7 @@ def test_xreadgroup(self, r): r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) - expected = [ - [ - stream.encode(), - [ - get_stream_message(r, stream, m1), - ], - ] - ] + expected = [[stream.encode(), [get_stream_message(r, stream, m1)]]] # xread with count=1 returns only the first message assert r.xreadgroup(group, consumer, streams={stream: ">"}, count=1) == expected @@ -4320,15 +4280,7 @@ def test_xreadgroup(self, r): r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, "0") # delete all the messages in the stream - expected = [ - [ - stream.encode(), - [ - (m1, {}), - (m2, {}), - ], - ] - ] + expected = [[stream.encode(), [(m1, {}), (m2, {})]]] r.xreadgroup(group, consumer, streams={stream: ">"}) r.xtrim(stream, 0) assert r.xreadgroup(group, consumer, streams={stream: "0"}) == expected diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py index 3e1fbaed27..a836f5b2c7 100644 --- a/tests/test_connection_pool.py +++ b/tests/test_connection_pool.py @@ -202,33 +202,23 @@ class TestConnectionPoolURLParsing: def test_hostname(self): pool = redis.ConnectionPool.from_url("redis://my.host") assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "my.host", - } + assert pool.connection_kwargs == {"host": "my.host"} def test_quoted_hostname(self): pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+") assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "my / host +=+", - } + assert pool.connection_kwargs == {"host": "my / host +=+"} def test_port(self): pool = redis.ConnectionPool.from_url("redis://localhost:6380") assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "port": 6380, - } + assert pool.connection_kwargs == {"host": "localhost", "port": 6380} @skip_if_server_version_lt("6.0.0") def test_username(self): pool = redis.ConnectionPool.from_url("redis://myuser:@localhost") assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "username": "myuser", - } + assert pool.connection_kwargs == {"host": "localhost", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): @@ -244,10 +234,7 @@ def test_quoted_username(self): def test_password(self): pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost") assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "password": "mypassword", - } + assert pool.connection_kwargs == {"host": "localhost", "password": "mypassword"} def test_quoted_password(self): pool = redis.ConnectionPool.from_url( @@ -272,26 +259,17 @@ def test_username_and_password(self): def test_db_as_argument(self): pool = redis.ConnectionPool.from_url("redis://localhost", db=1) assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "db": 1, - } + assert pool.connection_kwargs == {"host": "localhost", "db": 1} def test_db_in_path(self): pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1) assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "db": 2, - } + assert pool.connection_kwargs == {"host": "localhost", "db": 2} def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1) assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - "host": "localhost", - "db": 3, - } + assert pool.connection_kwargs == {"host": "localhost", "db": 3} def test_extra_typed_querystring_options(self): pool = redis.ConnectionPool.from_url( @@ -351,9 +329,7 @@ def test_calling_from_subclass_returns_correct_instance(self): def test_client_creates_connection_pool(self): r = redis.Redis.from_url("redis://myhost") assert r.connection_pool.connection_class == redis.Connection - assert r.connection_pool.connection_kwargs == { - "host": "myhost", - } + assert r.connection_pool.connection_kwargs == {"host": "myhost"} def test_invalid_scheme_raises_error(self): with pytest.raises(ValueError) as cm: @@ -368,18 +344,13 @@ class TestConnectionPoolUnixSocketURLParsing: def test_defaults(self): pool = redis.ConnectionPool.from_url("unix:///socket") assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - "path": "/socket", - } + assert pool.connection_kwargs == {"path": "/socket"} @skip_if_server_version_lt("6.0.0") def test_username(self): pool = redis.ConnectionPool.from_url("unix://myuser:@/socket") assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - "path": "/socket", - "username": "myuser", - } + assert pool.connection_kwargs == {"path": "/socket", "username": "myuser"} @skip_if_server_version_lt("6.0.0") def test_quoted_username(self): @@ -395,10 +366,7 @@ def test_quoted_username(self): def test_password(self): pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket") assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - "path": "/socket", - "password": "mypassword", - } + assert pool.connection_kwargs == {"path": "/socket", "password": "mypassword"} def test_quoted_password(self): pool = redis.ConnectionPool.from_url( @@ -423,18 +391,12 @@ def test_quoted_path(self): def test_db_as_argument(self): pool = redis.ConnectionPool.from_url("unix:///socket", db=1) assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - "path": "/socket", - "db": 1, - } + assert pool.connection_kwargs == {"path": "/socket", "db": 1} def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1) assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - "path": "/socket", - "db": 2, - } + assert pool.connection_kwargs == {"path": "/socket", "db": 2} def test_client_name_in_querystring(self): pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client") @@ -460,9 +422,7 @@ class TestSSLConnectionURLParsing: def test_host(self): pool = redis.ConnectionPool.from_url("rediss://my.host") assert pool.connection_class == redis.SSLConnection - assert pool.connection_kwargs == { - "host": "my.host", - } + assert pool.connection_kwargs == {"host": "my.host"} def test_connection_class_override(self): class MyConnection(redis.SSLConnection): diff --git a/tests/test_encoding.py b/tests/test_encoding.py index bd0f09fcc0..d7d1fb106d 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -13,11 +13,7 @@ def r(self, request): @pytest.fixture() def r_no_decode(self, request): - return _get_client( - redis.Redis, - request=request, - decode_responses=False, - ) + return _get_client(redis.Redis, request=request, decode_responses=False) def test_simple_encoding(self, r_no_decode): unicode_string = chr(3456) + "abcd" + chr(3421) diff --git a/tests/test_function.py b/tests/test_function.py index e6c4bcd586..70f6b19869 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -63,7 +63,7 @@ def test_function_list(self, r): "LUA", "functions", [["name", "myfunc", "description", None, "flags", ["no-writes"]]], - ], + ] ] assert r.function_list() == res assert r.function_list(library="*lib") == res @@ -83,7 +83,7 @@ def test_function_list_on_cluster(self, r): "LUA", "functions", [["name", "myfunc", "description", None, "flags", ["no-writes"]]], - ], + ] ] primaries = r.get_primaries() res = {} diff --git a/tests/test_json.py b/tests/test_json.py index 2b754564bc..1cc448c5f9 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -171,16 +171,7 @@ def test_arrindex(client): @pytest.mark.redismod def test_arrinsert(client): client.json().set("arr", Path.root_path(), [0, 4]) - assert 5 - -client.json().arrinsert( - "arr", - Path.root_path(), - 1, - *[ - 1, - 2, - 3, - ], - ) + assert 5 - -client.json().arrinsert("arr", Path.root_path(), 1, *[1, 2, 3]) assert [0, 1, 2, 3, 4] == client.json().get("arr") # test prepends @@ -1011,10 +1002,8 @@ def test_resp_dollar(client): ], "A1_B3_C3": [1], }, - "A1_B4": { - "A1_B4_C1": "foo", - }, - }, + "A1_B4": {"A1_B4_C1": "foo"}, + } }, "L2": { "a": { @@ -1032,10 +1021,8 @@ def test_resp_dollar(client): ], "A2_B3_C3": [2], }, - "A2_B4": { - "A2_B4_C1": "bar", - }, - }, + "A2_B4": {"A2_B4_C1": "bar"}, + } }, } client.json().set("doc1", "$", data) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 0518893f07..03377d8350 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -33,10 +33,7 @@ def test_pipeline(self, r): def test_pipeline_memoryview(self, r): with r.pipeline() as pipe: (pipe.set("a", memoryview(b"a1")).get("a")) - assert pipe.execute() == [ - True, - b"a1", - ] + assert pipe.execute() == [True, b"a1"] def test_pipeline_length(self, r): with r.pipeline() as pipe: diff --git a/tests/test_search.py b/tests/test_search.py index aee37cdd6f..dba914aeaa 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -888,18 +888,14 @@ def test_aggregations_groupby(client): random_num=8, ) - req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count(), - ) + req = aggregations.AggregateRequest("redis").group_by("@parent", reducers.count()) res = client.ft().aggregate(req).rows[0] assert res[1] == "redis" assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count_distinct("@title"), + "@parent", reducers.count_distinct("@title") ) res = client.ft().aggregate(req).rows[0] @@ -907,8 +903,7 @@ def test_aggregations_groupby(client): assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.count_distinctish("@title"), + "@parent", reducers.count_distinctish("@title") ) res = client.ft().aggregate(req).rows[0] @@ -916,8 +911,7 @@ def test_aggregations_groupby(client): assert res[3] == "3" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.sum("@random_num"), + "@parent", reducers.sum("@random_num") ) res = client.ft().aggregate(req).rows[0] @@ -925,8 +919,7 @@ def test_aggregations_groupby(client): assert res[3] == "21" # 10+8+3 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.min("@random_num"), + "@parent", reducers.min("@random_num") ) res = client.ft().aggregate(req).rows[0] @@ -934,8 +927,7 @@ def test_aggregations_groupby(client): assert res[3] == "3" # min(10,8,3) req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.max("@random_num"), + "@parent", reducers.max("@random_num") ) res = client.ft().aggregate(req).rows[0] @@ -943,8 +935,7 @@ def test_aggregations_groupby(client): assert res[3] == "10" # max(10,8,3) req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.avg("@random_num"), + "@parent", reducers.avg("@random_num") ) res = client.ft().aggregate(req).rows[0] @@ -953,8 +944,7 @@ def test_aggregations_groupby(client): assert res[index + 1] == "7" # (10+3+8)/3 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.stddev("random_num"), + "@parent", reducers.stddev("random_num") ) res = client.ft().aggregate(req).rows[0] @@ -962,8 +952,7 @@ def test_aggregations_groupby(client): assert res[3] == "3.60555127546" req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.quantile("@random_num", 0.5), + "@parent", reducers.quantile("@random_num", 0.5) ) res = client.ft().aggregate(req).rows[0] @@ -971,8 +960,7 @@ def test_aggregations_groupby(client): assert res[3] == "8" # median of 3,8,10 req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.tolist("@title"), + "@parent", reducers.tolist("@title") ) res = client.ft().aggregate(req).rows[0] @@ -980,16 +968,14 @@ def test_aggregations_groupby(client): assert res[3] == ["RediSearch", "RedisAI", "RedisJson"] req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.first_value("@title").alias("first"), + "@parent", reducers.first_value("@title").alias("first") ) res = client.ft().aggregate(req).rows[0] assert res == ["parent", "redis", "first", "RediSearch"] req = aggregations.AggregateRequest("redis").group_by( - "@parent", - reducers.random_sample("@title", 2).alias("random"), + "@parent", reducers.random_sample("@title", 2).alias("random") ) res = client.ft().aggregate(req).rows[0] @@ -1001,12 +987,7 @@ def test_aggregations_groupby(client): @pytest.mark.redismod def test_aggregations_sort_by_and_limit(client): - client.ft().create_index( - ( - TextField("t1"), - TextField("t2"), - ) - ) + client.ft().create_index((TextField("t1"), TextField("t2"))) client.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"}) client.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"}) @@ -1039,12 +1020,7 @@ def test_aggregations_sort_by_and_limit(client): @pytest.mark.redismod def test_aggregations_load(client): - client.ft().create_index( - ( - TextField("t1"), - TextField("t2"), - ) - ) + client.ft().create_index((TextField("t1"), TextField("t2"))) client.ft().client.hset("doc1", mapping={"t1": "hello", "t2": "world"}) @@ -1093,10 +1069,7 @@ def test_aggregations_apply(client): @pytest.mark.redismod def test_aggregations_filter(client): client.ft().create_index( - ( - TextField("name", sortable=True), - NumericField("age", sortable=True), - ) + (TextField("name", sortable=True), NumericField("age", sortable=True)) ) client.ft().client.hset("doc1", mapping={"name": "bar", "age": "25"}) @@ -1341,10 +1314,7 @@ def test_search_return_fields(client): # create index on definition = IndexDefinition(index_type=IndexType.JSON) - SCHEMA = ( - TextField("$.t"), - NumericField("$.flt"), - ) + SCHEMA = (TextField("$.t"), NumericField("$.flt")) client.ft().create_index(SCHEMA, definition=definition) waitForIndex(client, getattr(client.ft(), "index_name", "idx")) @@ -1363,11 +1333,7 @@ def test_search_return_fields(client): def test_synupdate(client): definition = IndexDefinition(index_type=IndexType.HASH) client.ft().create_index( - ( - TextField("title"), - TextField("body"), - ), - definition=definition, + (TextField("title"), TextField("body")), definition=definition ) client.ft().synupdate("id1", True, "boy", "child", "offspring") @@ -1386,11 +1352,7 @@ def test_synupdate(client): def test_syndump(client): definition = IndexDefinition(index_type=IndexType.HASH) client.ft().create_index( - ( - TextField("title"), - TextField("body"), - ), - definition=definition, + (TextField("title"), TextField("body")), definition=definition ) client.ft().synupdate("id1", False, "boy", "child", "offspring") diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py index 0357443a14..8542a0bfc3 100644 --- a/tests/test_sentinel.py +++ b/tests/test_sentinel.py @@ -188,7 +188,7 @@ def test_master_for(cluster, sentinel, master_ip): @pytest.mark.onlynoncluster def test_slave_for(cluster, sentinel): cluster.slaves = [ - {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False}, + {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False} ] slave = sentinel.slave_for("mymaster", db=9) assert slave.ping()