Skip to content

Commit

Permalink
style fix (#13)
Browse files Browse the repository at this point in the history
  • Loading branch information
zach-iee authored Aug 24, 2023
1 parent 1856a34 commit e4408a9
Show file tree
Hide file tree
Showing 5 changed files with 141 additions and 67 deletions.
2 changes: 1 addition & 1 deletion redis/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
from redis.backoff import default_backoff
from redis.client import CaseInsensitiveDict, PubSub, Redis, parse_scan
from redis.commands import READ_COMMANDS, CommandsParser, RedisClusterCommands
from redis.connection import ConnectionPool, DefaultParser, Encoder, parse_url
from redis.commands.helpers import list_or_args
from redis.connection import ConnectionPool, DefaultParser, Encoder, parse_url
from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
from redis.exceptions import (
AskError,
Expand Down
10 changes: 8 additions & 2 deletions redis/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -883,8 +883,14 @@ def pack_command(self, *args):
# server. If you need to send `COMMAND GETKEYS` to the server, please reach out
# to Doogie and Zach to discuss the use case.
# ref: https://github.com/redis/redis/pull/12380
if len(args) > 1 and args[0].lower() == b'command' and args[1].lower().startswith(b'getkeys'):
raise Exception(f'Redis command "{args[0].decode()} {args[1].decode()}" is not supported')
if (
len(args) > 1
and args[0].lower() == b"command"
and args[1].lower().startswith(b"getkeys")
):
raise Exception(
f'Redis command "command {args[1].decode()}" is not supported'
)

buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))

Expand Down
53 changes: 36 additions & 17 deletions tests/test_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -1757,7 +1757,9 @@ def test_cluster_zinter(self, r):
assert r.zinter(["{foo}a", "{foo}b", "{foo}c"]) == [b"a3", b"a1"]
# invalid aggregation
with pytest.raises(DataError):
r.zinter(["{foo}a", "{foo}b", "{foo}c"], aggregate="foo", withscores=True)
r.zinter(
["{foo}a", "{foo}b", "{foo}c"], aggregate="foo", withscores=True
)
# aggregate with SUM
assert r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
(b"a3", 8),
Expand All @@ -1772,18 +1774,20 @@ def test_cluster_zinter(self, r):
["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
) == [(b"a1", 1), (b"a3", 1)]
# with weights
assert r.zinter({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
(b"a3", 20),
(b"a1", 23),
]
assert r.zinter(
{"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True
) == [(b"a3", 20), (b"a1", 23)]

def test_cluster_zinterstore_sum(self, r):
with pytest.raises(Exception):
r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
assert r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 2
assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 8), (b"a1", 9)]
assert r.zrange("{foo}d", 0, -1, withscores=True) == [
(b"a3", 8),
(b"a1", 9),
]

def test_cluster_zinterstore_max(self, r):
with pytest.raises(Exception):
Expand All @@ -1794,7 +1798,10 @@ def test_cluster_zinterstore_max(self, r):
r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX")
== 2
)
assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 5), (b"a1", 6)]
assert r.zrange("{foo}d", 0, -1, withscores=True) == [
(b"a3", 5),
(b"a1", 6),
]

def test_cluster_zinterstore_min(self, r):
with pytest.raises(Exception):
Expand All @@ -1805,15 +1812,21 @@ def test_cluster_zinterstore_min(self, r):
r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN")
== 2
)
assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a1", 1), (b"a3", 3)]
assert r.zrange("{foo}d", 0, -1, withscores=True) == [
(b"a1", 1),
(b"a3", 3),
]

def test_cluster_zinterstore_with_weight(self, r):
with pytest.raises(Exception):
r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
assert r.zinterstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 2
assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 20), (b"a1", 23)]
assert r.zrange("{foo}d", 0, -1, withscores=True) == [
(b"a3", 20),
(b"a1", 23),
]

@skip_if_server_version_lt("4.9.0")
def test_cluster_bzpopmax(self, r):
Expand Down Expand Up @@ -1868,7 +1881,12 @@ def test_cluster_zunion(self, r):
r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
# sum
assert r.zunion(["{foo}a", "{foo}b", "{foo}c"]) == [b"a2", b"a4", b"a3", b"a1"]
assert r.zunion(["{foo}a", "{foo}b", "{foo}c"]) == [
b"a2",
b"a4",
b"a3",
b"a1",
]
assert r.zunion(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
(b"a2", 3),
(b"a4", 4),
Expand All @@ -1884,12 +1902,9 @@ def test_cluster_zunion(self, r):
["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
) == [(b"a1", 1), (b"a2", 1), (b"a3", 1), (b"a4", 4)]
# with weight
assert r.zunion({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
(b"a2", 5),
(b"a4", 12),
(b"a3", 20),
(b"a1", 23),
]
assert r.zunion(
{"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True
) == [(b"a2", 5), (b"a4", 12), (b"a3", 20), (b"a1", 23)]

def test_cluster_zunionstore_sum(self, r):
assert r.zunionstore("{foo}d", ["{foo}" + str(i) for i in range(0, 256)]) == 0
Expand Down Expand Up @@ -2061,7 +2076,11 @@ def test_cluster_georadius_store_dist(self, r):

r.geoadd("{foo}barcelona", values)
r.georadius(
"{foo}barcelona", 2.191, 41.433, 1000, store_dist="{foo}places_barcelona"
"{foo}barcelona",
2.191,
41.433,
1000,
store_dist="{foo}places_barcelona",
)
# instead of save the geo score, the distance is saved.
assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301
Expand Down
29 changes: 25 additions & 4 deletions tests/test_command_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,17 @@ def test_get_moveable_keys(self, r):
]
args2 = ["XREAD", "COUNT", 2, b"STREAMS", "mystream", "writers", 0, 0]
args3 = ["ZUNIONSTORE", "out", 2, "zset1", "zset2", "WEIGHTS", 2, 3]
args4 = ["GEORADIUS", "Sicily", 15, 37, 200, "km", "WITHCOORD", b"STORE", "out"]
args4 = [
"GEORADIUS",
"Sicily",
15,
37,
200,
"km",
"WITHCOORD",
b"STORE",
"out",
]
args5 = ["MEMORY USAGE", "foo"]
args6 = [
"MIGRATE",
Expand All @@ -53,11 +63,22 @@ def test_get_moveable_keys(self, r):
args7 = ["MIGRATE", "192.168.1.34", 6379, "key1", 0, 5000]

assert sorted(commands_parser.get_keys(r, *args1)) == ["key1", "key2"]
assert sorted(commands_parser.get_keys(r, *args2)) == ["mystream", "writers"]
assert sorted(commands_parser.get_keys(r, *args3)) == ["out", "zset1", "zset2"]
assert sorted(commands_parser.get_keys(r, *args2)) == [
"mystream",
"writers",
]
assert sorted(commands_parser.get_keys(r, *args3)) == [
"out",
"zset1",
"zset2",
]
assert sorted(commands_parser.get_keys(r, *args4)) == ["Sicily", "out"]
assert sorted(commands_parser.get_keys(r, *args5)) == ["foo"]
assert sorted(commands_parser.get_keys(r, *args6)) == ["key1", "key2", "key3"]
assert sorted(commands_parser.get_keys(r, *args6)) == [
"key1",
"key2",
"key3",
]
assert sorted(commands_parser.get_keys(r, *args7)) == ["key1"]

# A bug in redis<7.0 causes this to fail: https://github.com/redis/redis/issues/9493
Expand Down
114 changes: 71 additions & 43 deletions tests/test_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -3026,38 +3026,37 @@ def test_sort_store(self, r):

@pytest.mark.onlynoncluster
def test_sort_all_options(self, r):
with pytest.raises(Exception):
r["user:1:username"] = "zeus"
r["user:2:username"] = "titan"
r["user:3:username"] = "hermes"
r["user:4:username"] = "hercules"
r["user:5:username"] = "apollo"
r["user:6:username"] = "athena"
r["user:7:username"] = "hades"
r["user:8:username"] = "dionysus"

r["user:1:favorite_drink"] = "yuengling"
r["user:2:favorite_drink"] = "rum"
r["user:3:favorite_drink"] = "vodka"
r["user:4:favorite_drink"] = "milk"
r["user:5:favorite_drink"] = "pinot noir"
r["user:6:favorite_drink"] = "water"
r["user:7:favorite_drink"] = "gin"
r["user:8:favorite_drink"] = "apple juice"

r.rpush("gods", "5", "8", "3", "1", "2", "7", "6", "4")
num = r.sort(
"gods",
start=2,
num=4,
by="user:*:username",
get="user:*:favorite_drink",
desc=True,
alpha=True,
store="sorted",
)
assert num == 4
assert r.lrange("sorted", 0, 10) == [b"vodka", b"milk", b"gin", b"apple juice"]
r["user:1:username"] = "zeus"
r["user:2:username"] = "titan"
r["user:3:username"] = "hermes"
r["user:4:username"] = "hercules"
r["user:5:username"] = "apollo"
r["user:6:username"] = "athena"
r["user:7:username"] = "hades"
r["user:8:username"] = "dionysus"

r["user:1:favorite_drink"] = "yuengling"
r["user:2:favorite_drink"] = "rum"
r["user:3:favorite_drink"] = "vodka"
r["user:4:favorite_drink"] = "milk"
r["user:5:favorite_drink"] = "pinot noir"
r["user:6:favorite_drink"] = "water"
r["user:7:favorite_drink"] = "gin"
r["user:8:favorite_drink"] = "apple juice"

r.rpush("gods", "5", "8", "3", "1", "2", "7", "6", "4")
num = r.sort(
"gods",
start=2,
num=4,
by="user:*:username",
get="user:*:favorite_drink",
desc=True,
alpha=True,
store="sorted",
)
assert num == 4
assert r.lrange("sorted", 0, 10) == [b"vodka", b"milk", b"gin", b"apple juice"]

@skip_if_server_version_lt("7.0.0")
@pytest.mark.onlynoncluster
Expand Down Expand Up @@ -3604,7 +3603,11 @@ def test_georadius_with(self, r):
assert r.georadius(
"barcelona", 2.191, 41.433, 1, unit="km", withhash=True, withcoord=True
) == [
[b"place1", 3471609698139488, (2.19093829393386841, 41.43379028184083523)]
[
b"place1",
3471609698139488,
(2.19093829393386841, 41.43379028184083523),
]
]

# test no values.
Expand Down Expand Up @@ -3704,7 +3707,12 @@ def test_georadiusmember(self, r):
assert r.georadiusbymember("barcelona", "place1", 10) == [b"place1"]

assert r.georadiusbymember(
"barcelona", "place1", 4000, withdist=True, withcoord=True, withhash=True
"barcelona",
"place1",
4000,
withdist=True,
withcoord=True,
withhash=True,
) == [
[
b"\x80place2",
Expand All @@ -3729,9 +3737,9 @@ def test_georadiusmember_count(self, r):
b"\x80place2",
)
r.geoadd("barcelona", values)
assert r.georadiusbymember("barcelona", "place1", 4000, count=1, any=True) == [
b"\x80place2"
]
assert r.georadiusbymember(
"barcelona", "place1", 4000, count=1, any=True
) == [b"\x80place2"]

@skip_if_server_version_lt("5.0.0")
def test_xack(self, r):
Expand Down Expand Up @@ -3863,7 +3871,12 @@ def test_xautoclaim(self, r):
stream, group, consumer1, min_idle_time=0, start_id=0, justid=True
) == [message_id1, message_id2]
assert r.xautoclaim(
stream, group, consumer1, min_idle_time=0, start_id=message_id2, justid=True
stream,
group,
consumer1,
min_idle_time=0,
start_id=message_id2,
justid=True,
) == [message_id2]

@skip_if_server_version_lt("6.2.0")
Expand Down Expand Up @@ -4234,7 +4247,9 @@ def test_xpending_range_idle(self, r):

response = r.xpending_range(stream, group, min="-", max="+", count=5)
assert len(response) == 2
response = r.xpending_range(stream, group, min="-", max="+", count=5, idle=1000)
response = r.xpending_range(
stream, group, min="-", max="+", count=5, idle=1000
)
assert len(response) == 0

def test_xpending_range_negative(self, r):
Expand Down Expand Up @@ -4292,7 +4307,10 @@ def test_xread(self, r):
expected = [
[
stream.encode(),
[get_stream_message(r, stream, m1), get_stream_message(r, stream, m2)],
[
get_stream_message(r, stream, m1),
get_stream_message(r, stream, m2),
],
]
]
# xread starting at 0 returns both messages
Expand Down Expand Up @@ -4322,7 +4340,10 @@ def test_xreadgroup(self, r):
expected = [
[
stream.encode(),
[get_stream_message(r, stream, m1), get_stream_message(r, stream, m2)],
[
get_stream_message(r, stream, m1),
get_stream_message(r, stream, m2),
],
]
]
# xread starting at 0 returns both messages
Expand All @@ -4333,7 +4354,10 @@ def test_xreadgroup(self, r):

expected = [[stream.encode(), [get_stream_message(r, stream, m1)]]]
# xread with count=1 returns only the first message
assert r.xreadgroup(group, consumer, streams={stream: ">"}, count=1) == expected
assert (
r.xreadgroup(group, consumer, streams={stream: ">"}, count=1)
== expected
)

r.xgroup_destroy(stream, group)

Expand All @@ -4349,7 +4373,11 @@ def test_xreadgroup(self, r):
r.xgroup_destroy(stream, group)
r.xgroup_create(stream, group, "0")
assert (
len(r.xreadgroup(group, consumer, streams={stream: ">"}, noack=True)[0][1])
len(
r.xreadgroup(group, consumer, streams={stream: ">"}, noack=True)[0][
1
]
)
== 2
)
# now there should be nothing pending
Expand Down

0 comments on commit e4408a9

Please sign in to comment.