aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDrew DeVault <sir@cmpwn.com>2021-12-29 10:20:54 +0100
committerdispatch <dispatch@listserv.local>2021-12-29 09:21:04 +0000
commitf6cc4a17c4456d2a47d8b62a38fe14425230af7d (patch)
tree2661c802c394aee4ee4213a121fed9c4176fd184
parent87e12d179410852004d283b26262f8e8ca2f4c27 (diff)
downloadaports-patches/3850.tar.gz
aports-patches/3850.tar.bz2
aports-patches/3850.tar.xz
community/py3-redis: upgrade to 4.1.0patches/3850
I'm starting to get tired of fighting upstream on the nonfree integrations.
-rw-r--r--community/py3-redis/0001-all-remove-support-for-nonfree-Redis-modules.patch10939
-rw-r--r--community/py3-redis/0002-Drop-tests-test_ssl.py.patch183
-rw-r--r--community/py3-redis/APKBUILD10
3 files changed, 10285 insertions, 847 deletions
diff --git a/community/py3-redis/0001-all-remove-support-for-nonfree-Redis-modules.patch b/community/py3-redis/0001-all-remove-support-for-nonfree-Redis-modules.patch
index df625df446..a643465948 100644
--- a/community/py3-redis/0001-all-remove-support-for-nonfree-Redis-modules.patch
+++ b/community/py3-redis/0001-all-remove-support-for-nonfree-Redis-modules.patch
@@ -1,37 +1,75 @@
-From b2d9df7c788793a498f8aa0e0561bd1a6851da55 Mon Sep 17 00:00:00 2001
+From 0bcd65daea3bc8ecda992c2e1b023a2cc5de4d1c Mon Sep 17 00:00:00 2001
From: Drew DeVault <sir@cmpwn.com>
Date: Tue, 16 Nov 2021 08:23:10 +0100
-Subject: [PATCH] all: remove support for nonfree Redis modules
+Subject: [PATCH 1/2] all: remove support for nonfree Redis modules
---
- redis/client.py | 5 +-
- redis/commands/__init__.py | 2 -
- redis/commands/json/__init__.py | 120 --
- redis/commands/json/commands.py | 232 ----
- redis/commands/json/decoders.py | 59 -
+ redis/__init__.py | 12 -
+ redis/client.py | 11 +-
+ redis/cluster.py | 2150 -----------------
+ redis/commands/__init__.py | 8 +-
+ redis/commands/bf/__init__.py | 204 --
+ redis/commands/bf/commands.py | 498 ----
+ redis/commands/bf/info.py | 85 -
+ redis/commands/cluster.py | 412 ----
+ redis/commands/graph/__init__.py | 162 --
+ redis/commands/graph/commands.py | 202 --
+ redis/commands/graph/edge.py | 87 -
+ redis/commands/graph/exceptions.py | 3 -
+ redis/commands/graph/node.py | 84 -
+ redis/commands/graph/path.py | 74 -
+ redis/commands/graph/query_result.py | 362 ---
+ redis/commands/json/__init__.py | 118 -
+ redis/commands/json/commands.py | 329 ---
+ redis/commands/json/decoders.py | 60 -
redis/commands/json/path.py | 16 -
- redis/commands/redismodules.py | 35 -
- redis/commands/search/__init__.py | 96 --
+ redis/commands/redismodules.py | 83 -
+ redis/commands/search/__init__.py | 96 -
redis/commands/search/_util.py | 7 -
- redis/commands/search/aggregation.py | 406 -------
- redis/commands/search/commands.py | 706 -----------
+ redis/commands/search/aggregation.py | 357 ---
+ redis/commands/search/commands.py | 790 -------
redis/commands/search/document.py | 13 -
- redis/commands/search/field.py | 94 --
- redis/commands/search/indexDefinition.py | 80 --
- redis/commands/search/query.py | 325 -----
- redis/commands/search/querystring.py | 321 -----
- redis/commands/search/reducers.py | 178 ---
- redis/commands/search/result.py | 73 --
- redis/commands/search/suggestion.py | 53 -
- redis/commands/timeseries/__init__.py | 85 --
- redis/commands/timeseries/commands.py | 775 ------------
- redis/commands/timeseries/info.py | 82 --
- redis/commands/timeseries/utils.py | 49 -
- tests/test_connection.py | 21 -
- tests/test_json.py | 1416 ----------------------
- tests/test_search.py | 1315 --------------------
- tests/test_timeseries.py | 588 ---------
- 27 files changed, 2 insertions(+), 7150 deletions(-)
+ redis/commands/search/field.py | 92 -
+ redis/commands/search/indexDefinition.py | 79 -
+ redis/commands/search/query.py | 322 ---
+ redis/commands/search/querystring.py | 314 ---
+ redis/commands/search/reducers.py | 178 --
+ redis/commands/search/result.py | 73 -
+ redis/commands/search/suggestion.py | 51 -
+ redis/commands/sentinel.py | 93 -
+ redis/commands/timeseries/__init__.py | 80 -
+ redis/commands/timeseries/commands.py | 768 -------
+ redis/commands/timeseries/info.py | 82 -
+ redis/commands/timeseries/utils.py | 44 -
+ redis/sentinel.py | 337 ---
+ setup.py | 5 -
+ tests/test_bloom.py | 383 ----
+ tests/test_cluster.py | 2664 ----------------------
+ tests/test_commands.py | 12 -
+ tests/test_connection.py | 22 -
+ tests/test_graph.py | 477 ----
+ tests/test_graph_utils/__init__.py | 0
+ tests/test_graph_utils/test_edge.py | 77 -
+ tests/test_graph_utils/test_node.py | 52 -
+ tests/test_graph_utils/test_path.py | 91 -
+ tests/test_json.py | 1432 ------------
+ tests/test_pubsub.py | 11 -
+ tests/test_search.py | 1457 ------------
+ tests/test_sentinel.py | 234 --
+ tests/test_timeseries.py | 514 -----
+ 53 files changed, 5 insertions(+), 16162 deletions(-)
+ delete mode 100644 redis/cluster.py
+ delete mode 100644 redis/commands/bf/__init__.py
+ delete mode 100644 redis/commands/bf/commands.py
+ delete mode 100644 redis/commands/bf/info.py
+ delete mode 100644 redis/commands/cluster.py
+ delete mode 100644 redis/commands/graph/__init__.py
+ delete mode 100644 redis/commands/graph/commands.py
+ delete mode 100644 redis/commands/graph/edge.py
+ delete mode 100644 redis/commands/graph/exceptions.py
+ delete mode 100644 redis/commands/graph/node.py
+ delete mode 100644 redis/commands/graph/path.py
+ delete mode 100644 redis/commands/graph/query_result.py
delete mode 100644 redis/commands/json/__init__.py
delete mode 100644 redis/commands/json/commands.py
delete mode 100644 redis/commands/json/decoders.py
@@ -49,68 +87,4523 @@ Subject: [PATCH] all: remove support for nonfree Redis modules
delete mode 100644 redis/commands/search/reducers.py
delete mode 100644 redis/commands/search/result.py
delete mode 100644 redis/commands/search/suggestion.py
+ delete mode 100644 redis/commands/sentinel.py
delete mode 100644 redis/commands/timeseries/__init__.py
delete mode 100644 redis/commands/timeseries/commands.py
delete mode 100644 redis/commands/timeseries/info.py
delete mode 100644 redis/commands/timeseries/utils.py
+ delete mode 100644 redis/sentinel.py
+ delete mode 100644 tests/test_bloom.py
+ delete mode 100644 tests/test_cluster.py
+ delete mode 100644 tests/test_graph.py
+ delete mode 100644 tests/test_graph_utils/__init__.py
+ delete mode 100644 tests/test_graph_utils/test_edge.py
+ delete mode 100644 tests/test_graph_utils/test_node.py
+ delete mode 100644 tests/test_graph_utils/test_path.py
delete mode 100644 tests/test_json.py
delete mode 100644 tests/test_search.py
+ delete mode 100644 tests/test_sentinel.py
delete mode 100644 tests/test_timeseries.py
+diff --git a/redis/__init__.py b/redis/__init__.py
+index 35044be..f0b8623 100644
+--- a/redis/__init__.py
++++ b/redis/__init__.py
+@@ -6,7 +6,6 @@ else:
+ import importlib_metadata as metadata
+
+ from redis.client import Redis, StrictRedis
+-from redis.cluster import RedisCluster
+ from redis.connection import (
+ BlockingConnectionPool,
+ Connection,
+@@ -29,12 +28,6 @@ from redis.exceptions import (
+ TimeoutError,
+ WatchError,
+ )
+-from redis.sentinel import (
+- Sentinel,
+- SentinelConnectionPool,
+- SentinelManagedConnection,
+- SentinelManagedSSLConnection,
+-)
+ from redis.utils import from_url
+
+
+@@ -68,13 +61,8 @@ __all__ = [
+ "PubSubError",
+ "ReadOnlyError",
+ "Redis",
+- "RedisCluster",
+ "RedisError",
+ "ResponseError",
+- "Sentinel",
+- "SentinelConnectionPool",
+- "SentinelManagedConnection",
+- "SentinelManagedSSLConnection",
+ "SSLConnection",
+ "StrictRedis",
+ "TimeoutError",
diff --git a/redis/client.py b/redis/client.py
-index dc6693d..93fc459 100755
+index 0984a7c..bf7f596 100755
--- a/redis/client.py
+++ b/redis/client.py
-@@ -6,8 +6,7 @@ import re
- import threading
+@@ -5,14 +5,9 @@ import threading
import time
import warnings
--from redis.commands import (CoreCommands, RedisModuleCommands,
-- SentinelCommands, list_or_args)
-+from redis.commands import CoreCommands, SentinelCommands, list_or_args
- from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
- SSLConnection)
- from redis.lock import Lock
-@@ -607,7 +606,7 @@ def parse_set_result(response, **options):
- return response and str_if_bytes(response) == 'OK'
+ from itertools import chain
+-
+-from redis.commands import (
+- CoreCommands,
+- RedisModuleCommands,
+- SentinelCommands,
+- list_or_args,
+-)
++from redis.commands import CoreCommands, list_or_args
+ from redis.connection import ConnectionPool, SSLConnection, UnixDomainSocketConnection
++from redis.lock import Lock
+ from redis.exceptions import (
+ ConnectionError,
+ ExecAbortError,
+@@ -642,7 +637,7 @@ def parse_set_result(response, **options):
+ return response and str_if_bytes(response) == "OK"
--class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
-+class Redis(CoreCommands, SentinelCommands, object):
+-class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
++class Redis(CoreCommands, object):
"""
Implementation of the Redis protocol.
+diff --git a/redis/cluster.py b/redis/cluster.py
+deleted file mode 100644
+index 5707a9d..0000000
+--- a/redis/cluster.py
++++ /dev/null
+@@ -1,2150 +0,0 @@
+-import copy
+-import logging
+-import random
+-import socket
+-import sys
+-import threading
+-import time
+-from collections import OrderedDict
+-
+-from redis.client import CaseInsensitiveDict, PubSub, Redis
+-from redis.commands import CommandsParser, RedisClusterCommands
+-from redis.connection import ConnectionPool, DefaultParser, Encoder, parse_url
+-from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
+-from redis.exceptions import (
+- AskError,
+- BusyLoadingError,
+- ClusterCrossSlotError,
+- ClusterDownError,
+- ClusterError,
+- ConnectionError,
+- DataError,
+- MasterDownError,
+- MovedError,
+- RedisClusterException,
+- RedisError,
+- ResponseError,
+- SlotNotCoveredError,
+- TimeoutError,
+- TryAgainError,
+-)
+-from redis.utils import (
+- dict_merge,
+- list_keys_to_dict,
+- merge_result,
+- safe_str,
+- str_if_bytes,
+-)
+-
+-log = logging.getLogger(__name__)
+-
+-
+-def get_node_name(host, port):
+- return f"{host}:{port}"
+-
+-
+-def get_connection(redis_node, *args, **options):
+- return redis_node.connection or redis_node.connection_pool.get_connection(
+- args[0], **options
+- )
+-
+-
+-def parse_scan_result(command, res, **options):
+- keys_list = []
+- for primary_res in res.values():
+- keys_list += primary_res[1]
+- return 0, keys_list
+-
+-
+-def parse_pubsub_numsub(command, res, **options):
+- numsub_d = OrderedDict()
+- for numsub_tups in res.values():
+- for channel, numsubbed in numsub_tups:
+- try:
+- numsub_d[channel] += numsubbed
+- except KeyError:
+- numsub_d[channel] = numsubbed
+-
+- ret_numsub = [(channel, numsub) for channel, numsub in numsub_d.items()]
+- return ret_numsub
+-
+-
+-def parse_cluster_slots(resp, **options):
+- current_host = options.get("current_host", "")
+-
+- def fix_server(*args):
+- return str_if_bytes(args[0]) or current_host, args[1]
+-
+- slots = {}
+- for slot in resp:
+- start, end, primary = slot[:3]
+- replicas = slot[3:]
+- slots[start, end] = {
+- "primary": fix_server(*primary),
+- "replicas": [fix_server(*replica) for replica in replicas],
+- }
+-
+- return slots
+-
+-
+-PRIMARY = "primary"
+-REPLICA = "replica"
+-SLOT_ID = "slot-id"
+-
+-REDIS_ALLOWED_KEYS = (
+- "charset",
+- "connection_class",
+- "connection_pool",
+- "client_name",
+- "db",
+- "decode_responses",
+- "encoding",
+- "encoding_errors",
+- "errors",
+- "host",
+- "max_connections",
+- "nodes_flag",
+- "redis_connect_func",
+- "password",
+- "port",
+- "retry",
+- "retry_on_timeout",
+- "socket_connect_timeout",
+- "socket_keepalive",
+- "socket_keepalive_options",
+- "socket_timeout",
+- "ssl",
+- "ssl_ca_certs",
+- "ssl_certfile",
+- "ssl_cert_reqs",
+- "ssl_keyfile",
+- "unix_socket_path",
+- "username",
+-)
+-KWARGS_DISABLED_KEYS = (
+- "host",
+- "port",
+-)
+-
+-# Not complete, but covers the major ones
+-# https://redis.io/commands
+-READ_COMMANDS = frozenset(
+- [
+- "BITCOUNT",
+- "BITPOS",
+- "EXISTS",
+- "GEODIST",
+- "GEOHASH",
+- "GEOPOS",
+- "GEORADIUS",
+- "GEORADIUSBYMEMBER",
+- "GET",
+- "GETBIT",
+- "GETRANGE",
+- "HEXISTS",
+- "HGET",
+- "HGETALL",
+- "HKEYS",
+- "HLEN",
+- "HMGET",
+- "HSTRLEN",
+- "HVALS",
+- "KEYS",
+- "LINDEX",
+- "LLEN",
+- "LRANGE",
+- "MGET",
+- "PTTL",
+- "RANDOMKEY",
+- "SCARD",
+- "SDIFF",
+- "SINTER",
+- "SISMEMBER",
+- "SMEMBERS",
+- "SRANDMEMBER",
+- "STRLEN",
+- "SUNION",
+- "TTL",
+- "ZCARD",
+- "ZCOUNT",
+- "ZRANGE",
+- "ZSCORE",
+- ]
+-)
+-
+-
+-def cleanup_kwargs(**kwargs):
+- """
+- Remove unsupported or disabled keys from kwargs
+- """
+- connection_kwargs = {
+- k: v
+- for k, v in kwargs.items()
+- if k in REDIS_ALLOWED_KEYS and k not in KWARGS_DISABLED_KEYS
+- }
+-
+- return connection_kwargs
+-
+-
+-class ClusterParser(DefaultParser):
+- EXCEPTION_CLASSES = dict_merge(
+- DefaultParser.EXCEPTION_CLASSES,
+- {
+- "ASK": AskError,
+- "TRYAGAIN": TryAgainError,
+- "MOVED": MovedError,
+- "CLUSTERDOWN": ClusterDownError,
+- "CROSSSLOT": ClusterCrossSlotError,
+- "MASTERDOWN": MasterDownError,
+- },
+- )
+-
+-
+-class RedisCluster(RedisClusterCommands):
+- RedisClusterRequestTTL = 16
+-
+- PRIMARIES = "primaries"
+- REPLICAS = "replicas"
+- ALL_NODES = "all"
+- RANDOM = "random"
+- DEFAULT_NODE = "default-node"
+-
+- NODE_FLAGS = {PRIMARIES, REPLICAS, ALL_NODES, RANDOM, DEFAULT_NODE}
+-
+- COMMAND_FLAGS = dict_merge(
+- list_keys_to_dict(
+- [
+- "ACL CAT",
+- "ACL DELUSER",
+- "ACL GENPASS",
+- "ACL GETUSER",
+- "ACL HELP",
+- "ACL LIST",
+- "ACL LOG",
+- "ACL LOAD",
+- "ACL SAVE",
+- "ACL SETUSER",
+- "ACL USERS",
+- "ACL WHOAMI",
+- "CLIENT LIST",
+- "CLIENT SETNAME",
+- "CLIENT GETNAME",
+- "CONFIG SET",
+- "CONFIG REWRITE",
+- "CONFIG RESETSTAT",
+- "TIME",
+- "PUBSUB CHANNELS",
+- "PUBSUB NUMPAT",
+- "PUBSUB NUMSUB",
+- "PING",
+- "INFO",
+- "SHUTDOWN",
+- "KEYS",
+- "SCAN",
+- "FLUSHALL",
+- "FLUSHDB",
+- "DBSIZE",
+- "BGSAVE",
+- "SLOWLOG GET",
+- "SLOWLOG LEN",
+- "SLOWLOG RESET",
+- "WAIT",
+- "SAVE",
+- "MEMORY PURGE",
+- "MEMORY MALLOC-STATS",
+- "MEMORY STATS",
+- "LASTSAVE",
+- "CLIENT TRACKINGINFO",
+- "CLIENT PAUSE",
+- "CLIENT UNPAUSE",
+- "CLIENT UNBLOCK",
+- "CLIENT ID",
+- "CLIENT REPLY",
+- "CLIENT GETREDIR",
+- "CLIENT INFO",
+- "CLIENT KILL",
+- "READONLY",
+- "READWRITE",
+- "CLUSTER INFO",
+- "CLUSTER MEET",
+- "CLUSTER NODES",
+- "CLUSTER REPLICAS",
+- "CLUSTER RESET",
+- "CLUSTER SET-CONFIG-EPOCH",
+- "CLUSTER SLOTS",
+- "CLUSTER COUNT-FAILURE-REPORTS",
+- "CLUSTER KEYSLOT",
+- "COMMAND",
+- "COMMAND COUNT",
+- "COMMAND GETKEYS",
+- "CONFIG GET",
+- "DEBUG",
+- "RANDOMKEY",
+- "READONLY",
+- "READWRITE",
+- "TIME",
+- ],
+- DEFAULT_NODE,
+- ),
+- list_keys_to_dict(
+- [
+- "CLUSTER COUNTKEYSINSLOT",
+- "CLUSTER DELSLOTS",
+- "CLUSTER GETKEYSINSLOT",
+- "CLUSTER SETSLOT",
+- ],
+- SLOT_ID,
+- ),
+- )
+-
+- CLUSTER_COMMANDS_RESPONSE_CALLBACKS = {
+- "CLUSTER ADDSLOTS": bool,
+- "CLUSTER COUNT-FAILURE-REPORTS": int,
+- "CLUSTER COUNTKEYSINSLOT": int,
+- "CLUSTER DELSLOTS": bool,
+- "CLUSTER FAILOVER": bool,
+- "CLUSTER FORGET": bool,
+- "CLUSTER GETKEYSINSLOT": list,
+- "CLUSTER KEYSLOT": int,
+- "CLUSTER MEET": bool,
+- "CLUSTER REPLICATE": bool,
+- "CLUSTER RESET": bool,
+- "CLUSTER SAVECONFIG": bool,
+- "CLUSTER SET-CONFIG-EPOCH": bool,
+- "CLUSTER SETSLOT": bool,
+- "CLUSTER SLOTS": parse_cluster_slots,
+- "ASKING": bool,
+- "READONLY": bool,
+- "READWRITE": bool,
+- }
+-
+- RESULT_CALLBACKS = dict_merge(
+- list_keys_to_dict(
+- [
+- "PUBSUB NUMSUB",
+- ],
+- parse_pubsub_numsub,
+- ),
+- list_keys_to_dict(
+- [
+- "PUBSUB NUMPAT",
+- ],
+- lambda command, res: sum(list(res.values())),
+- ),
+- list_keys_to_dict(
+- [
+- "KEYS",
+- "PUBSUB CHANNELS",
+- ],
+- merge_result,
+- ),
+- list_keys_to_dict(
+- [
+- "PING",
+- "CONFIG SET",
+- "CONFIG REWRITE",
+- "CONFIG RESETSTAT",
+- "CLIENT SETNAME",
+- "BGSAVE",
+- "SLOWLOG RESET",
+- "SAVE",
+- "MEMORY PURGE",
+- "CLIENT PAUSE",
+- "CLIENT UNPAUSE",
+- ],
+- lambda command, res: all(res.values()) if isinstance(res, dict) else res,
+- ),
+- list_keys_to_dict(
+- [
+- "DBSIZE",
+- "WAIT",
+- ],
+- lambda command, res: sum(res.values()) if isinstance(res, dict) else res,
+- ),
+- list_keys_to_dict(
+- [
+- "CLIENT UNBLOCK",
+- ],
+- lambda command, res: 1 if sum(res.values()) > 0 else 0,
+- ),
+- list_keys_to_dict(
+- [
+- "SCAN",
+- ],
+- parse_scan_result,
+- ),
+- )
+-
+- ERRORS_ALLOW_RETRY = (
+- ConnectionError,
+- TimeoutError,
+- ClusterDownError,
+- )
+-
+- def __init__(
+- self,
+- host=None,
+- port=6379,
+- startup_nodes=None,
+- cluster_error_retry_attempts=3,
+- require_full_coverage=True,
+- skip_full_coverage_check=False,
+- reinitialize_steps=10,
+- read_from_replicas=False,
+- url=None,
+- **kwargs,
+- ):
+- """
+- Initialize a new RedisCluster client.
+-
+- :startup_nodes: 'list[ClusterNode]'
+- List of nodes from which initial bootstrapping can be done
+- :host: 'str'
+- Can be used to point to a startup node
+- :port: 'int'
+- Can be used to point to a startup node
+- :require_full_coverage: 'bool'
+- If set to True, as it is by default, all slots must be covered.
+- If set to False and not all slots are covered, the instance
+- creation will succeed only if 'cluster-require-full-coverage'
+- configuration is set to 'no' in all of the cluster's nodes.
+- Otherwise, RedisClusterException will be thrown.
+- :skip_full_coverage_check: 'bool'
+- If require_full_coverage is set to False, a check of
+- cluster-require-full-coverage config will be executed against all
+- nodes. Set skip_full_coverage_check to True to skip this check.
+- Useful for clusters without the CONFIG command (like ElastiCache)
+- :read_from_replicas: 'bool'
+- Enable read from replicas in READONLY mode. You can read possibly
+- stale data.
+- When set to true, read commands will be assigned between the
+- primary and its replications in a Round-Robin manner.
+- :cluster_error_retry_attempts: 'int'
+- Retry command execution attempts when encountering ClusterDownError
+- or ConnectionError
+- :reinitialize_steps: 'int'
+- Specifies the number of MOVED errors that need to occur before
+- reinitializing the whole cluster topology. If a MOVED error occurs
+- and the cluster does not need to be reinitialized on this current
+- error handling, only the MOVED slot will be patched with the
+- redirected node.
+- To reinitialize the cluster on every MOVED error, set
+- reinitialize_steps to 1.
+- To avoid reinitializing the cluster on moved errors, set
+- reinitialize_steps to 0.
+-
+- :**kwargs:
+- Extra arguments that will be sent into Redis instance when created
+- (See Official redis-py doc for supported kwargs
+- [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
+- Some kwargs are not supported and will raise a
+- RedisClusterException:
+- - db (Redis do not support database SELECT in cluster mode)
+- """
+- log.info("Creating a new instance of RedisCluster client")
+-
+- if startup_nodes is None:
+- startup_nodes = []
+-
+- if "db" in kwargs:
+- # Argument 'db' is not possible to use in cluster mode
+- raise RedisClusterException(
+- "Argument 'db' is not possible to use in cluster mode"
+- )
+-
+- # Get the startup node/s
+- from_url = False
+- if url is not None:
+- from_url = True
+- url_options = parse_url(url)
+- if "path" in url_options:
+- raise RedisClusterException(
+- "RedisCluster does not currently support Unix Domain "
+- "Socket connections"
+- )
+- if "db" in url_options and url_options["db"] != 0:
+- # Argument 'db' is not possible to use in cluster mode
+- raise RedisClusterException(
+- "A ``db`` querystring option can only be 0 in cluster mode"
+- )
+- kwargs.update(url_options)
+- host = kwargs.get("host")
+- port = kwargs.get("port", port)
+- startup_nodes.append(ClusterNode(host, port))
+- elif host is not None and port is not None:
+- startup_nodes.append(ClusterNode(host, port))
+- elif len(startup_nodes) == 0:
+- # No startup node was provided
+- raise RedisClusterException(
+- "RedisCluster requires at least one node to discover the "
+- "cluster. Please provide one of the followings:\n"
+- "1. host and port, for example:\n"
+- " RedisCluster(host='localhost', port=6379)\n"
+- "2. list of startup nodes, for example:\n"
+- " RedisCluster(startup_nodes=[ClusterNode('localhost', 6379),"
+- " ClusterNode('localhost', 6378)])"
+- )
+- log.debug(f"startup_nodes : {startup_nodes}")
+- # Update the connection arguments
+- # Whenever a new connection is established, RedisCluster's on_connect
+- # method should be run
+- # If the user passed on_connect function we'll save it and run it
+- # inside the RedisCluster.on_connect() function
+- self.user_on_connect_func = kwargs.pop("redis_connect_func", None)
+- kwargs.update({"redis_connect_func": self.on_connect})
+- kwargs = cleanup_kwargs(**kwargs)
+-
+- self.encoder = Encoder(
+- kwargs.get("encoding", "utf-8"),
+- kwargs.get("encoding_errors", "strict"),
+- kwargs.get("decode_responses", False),
+- )
+- self.cluster_error_retry_attempts = cluster_error_retry_attempts
+- self.command_flags = self.__class__.COMMAND_FLAGS.copy()
+- self.node_flags = self.__class__.NODE_FLAGS.copy()
+- self.read_from_replicas = read_from_replicas
+- self.reinitialize_counter = 0
+- self.reinitialize_steps = reinitialize_steps
+- self.nodes_manager = None
+- self.nodes_manager = NodesManager(
+- startup_nodes=startup_nodes,
+- from_url=from_url,
+- require_full_coverage=require_full_coverage,
+- skip_full_coverage_check=skip_full_coverage_check,
+- **kwargs,
+- )
+-
+- self.cluster_response_callbacks = CaseInsensitiveDict(
+- self.__class__.CLUSTER_COMMANDS_RESPONSE_CALLBACKS
+- )
+- self.result_callbacks = CaseInsensitiveDict(self.__class__.RESULT_CALLBACKS)
+- self.commands_parser = CommandsParser(self)
+- self._lock = threading.Lock()
+-
+- def __enter__(self):
+- return self
+-
+- def __exit__(self, exc_type, exc_value, traceback):
+- self.close()
+-
+- def __del__(self):
+- self.close()
+-
+- def disconnect_connection_pools(self):
+- for node in self.get_nodes():
+- if node.redis_connection:
+- try:
+- node.redis_connection.connection_pool.disconnect()
+- except OSError:
+- # Client was already disconnected. do nothing
+- pass
+-
+- @classmethod
+- def from_url(cls, url, **kwargs):
+- """
+- Return a Redis client object configured from the given URL
+-
+- For example::
+-
+- redis://[[username]:[password]]@localhost:6379/0
+- rediss://[[username]:[password]]@localhost:6379/0
+- unix://[[username]:[password]]@/path/to/socket.sock?db=0
+-
+- Three URL schemes are supported:
+-
+- - `redis://` creates a TCP socket connection. See more at:
+- <https://www.iana.org/assignments/uri-schemes/prov/redis>
+- - `rediss://` creates a SSL wrapped TCP socket connection. See more at:
+- <https://www.iana.org/assignments/uri-schemes/prov/rediss>
+- - ``unix://``: creates a Unix Domain Socket connection.
+-
+- The username, password, hostname, path and all querystring values
+- are passed through urllib.parse.unquote in order to replace any
+- percent-encoded values with their corresponding characters.
+-
+- There are several ways to specify a database number. The first value
+- found will be used:
+- 1. A ``db`` querystring option, e.g. redis://localhost?db=0
+- 2. If using the redis:// or rediss:// schemes, the path argument
+- of the url, e.g. redis://localhost/0
+- 3. A ``db`` keyword argument to this function.
+-
+- If none of these options are specified, the default db=0 is used.
+-
+- All querystring options are cast to their appropriate Python types.
+- Boolean arguments can be specified with string values "True"/"False"
+- or "Yes"/"No". Values that cannot be properly cast cause a
+- ``ValueError`` to be raised. Once parsed, the querystring arguments
+- and keyword arguments are passed to the ``ConnectionPool``'s
+- class initializer. In the case of conflicting arguments, querystring
+- arguments always win.
+-
+- """
+- return cls(url=url, **kwargs)
+-
+- def on_connect(self, connection):
+- """
+- Initialize the connection, authenticate and select a database and send
+- READONLY if it is set during object initialization.
+- """
+- connection.set_parser(ClusterParser)
+- connection.on_connect()
+-
+- if self.read_from_replicas:
+- # Sending READONLY command to server to configure connection as
+- # readonly. Since each cluster node may change its server type due
+- # to a failover, we should establish a READONLY connection
+- # regardless of the server type. If this is a primary connection,
+- # READONLY would not affect executing write commands.
+- connection.send_command("READONLY")
+- if str_if_bytes(connection.read_response()) != "OK":
+- raise ConnectionError("READONLY command failed")
+-
+- if self.user_on_connect_func is not None:
+- self.user_on_connect_func(connection)
+-
+- def get_redis_connection(self, node):
+- if not node.redis_connection:
+- with self._lock:
+- if not node.redis_connection:
+- self.nodes_manager.create_redis_connections([node])
+- return node.redis_connection
+-
+- def get_node(self, host=None, port=None, node_name=None):
+- return self.nodes_manager.get_node(host, port, node_name)
+-
+- def get_primaries(self):
+- return self.nodes_manager.get_nodes_by_server_type(PRIMARY)
+-
+- def get_replicas(self):
+- return self.nodes_manager.get_nodes_by_server_type(REPLICA)
+-
+- def get_random_node(self):
+- return random.choice(list(self.nodes_manager.nodes_cache.values()))
+-
+- def get_nodes(self):
+- return list(self.nodes_manager.nodes_cache.values())
+-
+- def get_node_from_key(self, key, replica=False):
+- """
+- Get the node that holds the key's slot.
+- If replica set to True but the slot doesn't have any replicas, None is
+- returned.
+- """
+- slot = self.keyslot(key)
+- slot_cache = self.nodes_manager.slots_cache.get(slot)
+- if slot_cache is None or len(slot_cache) == 0:
+- raise SlotNotCoveredError(f'Slot "{slot}" is not covered by the cluster.')
+- if replica and len(self.nodes_manager.slots_cache[slot]) < 2:
+- return None
+- elif replica:
+- node_idx = 1
+- else:
+- # primary
+- node_idx = 0
+-
+- return slot_cache[node_idx]
+-
+- def get_default_node(self):
+- """
+- Get the cluster's default node
+- """
+- return self.nodes_manager.default_node
+-
+- def set_default_node(self, node):
+- """
+- Set the default node of the cluster.
+- :param node: 'ClusterNode'
+- :return True if the default node was set, else False
+- """
+- if node is None or self.get_node(node_name=node.name) is None:
+- log.info(
+- "The requested node does not exist in the cluster, so "
+- "the default node was not changed."
+- )
+- return False
+- self.nodes_manager.default_node = node
+- log.info(f"Changed the default cluster node to {node}")
+- return True
+-
+- def monitor(self, target_node=None):
+- """
+- Returns a Monitor object for the specified target node.
+- The default cluster node will be selected if no target node was
+- specified.
+- Monitor is useful for handling the MONITOR command to the redis server.
+- next_command() method returns one command from monitor
+- listen() method yields commands from monitor.
+- """
+- if target_node is None:
+- target_node = self.get_default_node()
+- if target_node.redis_connection is None:
+- raise RedisClusterException(
+- f"Cluster Node {target_node.name} has no redis_connection"
+- )
+- return target_node.redis_connection.monitor()
+-
+- def pubsub(self, node=None, host=None, port=None, **kwargs):
+- """
+- Allows passing a ClusterNode, or host&port, to get a pubsub instance
+- connected to the specified node
+- """
+- return ClusterPubSub(self, node=node, host=host, port=port, **kwargs)
+-
+- def pipeline(self, transaction=None, shard_hint=None):
+- """
+- Cluster impl:
+- Pipelines do not work in cluster mode the same way they
+- do in normal mode. Create a clone of this object so
+- that simulating pipelines will work correctly. Each
+- command will be called directly when used and
+- when calling execute() will only return the result stack.
+- """
+- if shard_hint:
+- raise RedisClusterException("shard_hint is deprecated in cluster mode")
+-
+- if transaction:
+- raise RedisClusterException("transaction is deprecated in cluster mode")
+-
+- return ClusterPipeline(
+- nodes_manager=self.nodes_manager,
+- startup_nodes=self.nodes_manager.startup_nodes,
+- result_callbacks=self.result_callbacks,
+- cluster_response_callbacks=self.cluster_response_callbacks,
+- cluster_error_retry_attempts=self.cluster_error_retry_attempts,
+- read_from_replicas=self.read_from_replicas,
+- reinitialize_steps=self.reinitialize_steps,
+- )
+-
+- def _determine_nodes(self, *args, **kwargs):
+- command = args[0]
+- nodes_flag = kwargs.pop("nodes_flag", None)
+- if nodes_flag is not None:
+- # nodes flag passed by the user
+- command_flag = nodes_flag
+- else:
+- # get the nodes group for this command if it was predefined
+- command_flag = self.command_flags.get(command)
+- if command_flag:
+- log.debug(f"Target node/s for {command}: {command_flag}")
+- if command_flag == self.__class__.RANDOM:
+- # return a random node
+- return [self.get_random_node()]
+- elif command_flag == self.__class__.PRIMARIES:
+- # return all primaries
+- return self.get_primaries()
+- elif command_flag == self.__class__.REPLICAS:
+- # return all replicas
+- return self.get_replicas()
+- elif command_flag == self.__class__.ALL_NODES:
+- # return all nodes
+- return self.get_nodes()
+- elif command_flag == self.__class__.DEFAULT_NODE:
+- # return the cluster's default node
+- return [self.nodes_manager.default_node]
+- else:
+- # get the node that holds the key's slot
+- slot = self.determine_slot(*args)
+- node = self.nodes_manager.get_node_from_slot(
+- slot, self.read_from_replicas and command in READ_COMMANDS
+- )
+- log.debug(f"Target for {args}: slot {slot}")
+- return [node]
+-
+- def _should_reinitialized(self):
+- # To reinitialize the cluster on every MOVED error,
+- # set reinitialize_steps to 1.
+- # To avoid reinitializing the cluster on moved errors, set
+- # reinitialize_steps to 0.
+- if self.reinitialize_steps == 0:
+- return False
+- else:
+- return self.reinitialize_counter % self.reinitialize_steps == 0
+-
+- def keyslot(self, key):
+- """
+- Calculate keyslot for a given key.
+- See Keys distribution model in https://redis.io/topics/cluster-spec
+- """
+- k = self.encoder.encode(key)
+- return key_slot(k)
+-
+- def _get_command_keys(self, *args):
+- """
+- Get the keys in the command. If the command has no keys in in, None is
+- returned.
+- """
+- redis_conn = self.get_default_node().redis_connection
+- return self.commands_parser.get_keys(redis_conn, *args)
+-
+- def determine_slot(self, *args):
+- """
+- Figure out what slot based on command and args
+- """
+- if self.command_flags.get(args[0]) == SLOT_ID:
+- # The command contains the slot ID
+- return args[1]
+-
+- # Get the keys in the command
+- keys = self._get_command_keys(*args)
+- if keys is None or len(keys) == 0:
+- raise RedisClusterException(
+- "No way to dispatch this command to Redis Cluster. "
+- "Missing key.\nYou can execute the command by specifying "
+- f"target nodes.\nCommand: {args}"
+- )
+-
+- if len(keys) > 1:
+- # multi-key command, we need to make sure all keys are mapped to
+- # the same slot
+- slots = {self.keyslot(key) for key in keys}
+- if len(slots) != 1:
+- raise RedisClusterException(
+- f"{args[0]} - all keys must map to the same key slot"
+- )
+- return slots.pop()
+- else:
+- # single key command
+- return self.keyslot(keys[0])
+-
+- def reinitialize_caches(self):
+- self.nodes_manager.initialize()
+-
+- def get_encoder(self):
+- """
+- Get the connections' encoder
+- """
+- return self.encoder
+-
+- def get_connection_kwargs(self):
+- """
+- Get the connections' key-word arguments
+- """
+- return self.nodes_manager.connection_kwargs
+-
+- def _is_nodes_flag(self, target_nodes):
+- return isinstance(target_nodes, str) and target_nodes in self.node_flags
+-
+- def _parse_target_nodes(self, target_nodes):
+- if isinstance(target_nodes, list):
+- nodes = target_nodes
+- elif isinstance(target_nodes, ClusterNode):
+- # Supports passing a single ClusterNode as a variable
+- nodes = [target_nodes]
+- elif isinstance(target_nodes, dict):
+- # Supports dictionaries of the format {node_name: node}.
+- # It enables to execute commands with multi nodes as follows:
+- # rc.cluster_save_config(rc.get_primaries())
+- nodes = target_nodes.values()
+- else:
+- raise TypeError(
+- "target_nodes type can be one of the following: "
+- "node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
+- "ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
+- f"The passed type is {type(target_nodes)}"
+- )
+- return nodes
+-
+- def execute_command(self, *args, **kwargs):
+- """
+- Wrapper for ERRORS_ALLOW_RETRY error handling.
+-
+- It will try the number of times specified by the config option
+- "self.cluster_error_retry_attempts" which defaults to 3 unless manually
+- configured.
+-
+- If it reaches the number of times, the command will raise the exception
+-
+- Key argument :target_nodes: can be passed with the following types:
+- nodes_flag: PRIMARIES, REPLICAS, ALL_NODES, RANDOM
+- ClusterNode
+- list<ClusterNode>
+- dict<Any, ClusterNode>
+- """
+- target_nodes_specified = False
+- target_nodes = None
+- passed_targets = kwargs.pop("target_nodes", None)
+- if passed_targets is not None and not self._is_nodes_flag(passed_targets):
+- target_nodes = self._parse_target_nodes(passed_targets)
+- target_nodes_specified = True
+- # If an error that allows retrying was thrown, the nodes and slots
+- # cache were reinitialized. We will retry executing the command with
+- # the updated cluster setup only when the target nodes can be
+- # determined again with the new cache tables. Therefore, when target
+- # nodes were passed to this function, we cannot retry the command
+- # execution since the nodes may not be valid anymore after the tables
+- # were reinitialized. So in case of passed target nodes,
+- # retry_attempts will be set to 1.
+- retry_attempts = (
+- 1 if target_nodes_specified else self.cluster_error_retry_attempts
+- )
+- exception = None
+- for _ in range(0, retry_attempts):
+- try:
+- res = {}
+- if not target_nodes_specified:
+- # Determine the nodes to execute the command on
+- target_nodes = self._determine_nodes(
+- *args, **kwargs, nodes_flag=passed_targets
+- )
+- if not target_nodes:
+- raise RedisClusterException(
+- f"No targets were found to execute {args} command on"
+- )
+- for node in target_nodes:
+- res[node.name] = self._execute_command(node, *args, **kwargs)
+- # Return the processed result
+- return self._process_result(args[0], res, **kwargs)
+- except BaseException as e:
+- if type(e) in RedisCluster.ERRORS_ALLOW_RETRY:
+- # The nodes and slots cache were reinitialized.
+- # Try again with the new cluster setup.
+- exception = e
+- else:
+- # All other errors should be raised.
+- raise e
+-
+- # If it fails the configured number of times then raise exception back
+- # to caller of this method
+- raise exception
+-
+- def _execute_command(self, target_node, *args, **kwargs):
+- """
+- Send a command to a node in the cluster
+- """
+- command = args[0]
+- redis_node = None
+- connection = None
+- redirect_addr = None
+- asking = False
+- moved = False
+- ttl = int(self.RedisClusterRequestTTL)
+- connection_error_retry_counter = 0
+-
+- while ttl > 0:
+- ttl -= 1
+- try:
+- if asking:
+- target_node = self.get_node(node_name=redirect_addr)
+- elif moved:
+- # MOVED occurred and the slots cache was updated,
+- # refresh the target node
+- slot = self.determine_slot(*args)
+- target_node = self.nodes_manager.get_node_from_slot(
+- slot, self.read_from_replicas and command in READ_COMMANDS
+- )
+- moved = False
+-
+- log.debug(
+- f"Executing command {command} on target node: "
+- f"{target_node.server_type} {target_node.name}"
+- )
+- redis_node = self.get_redis_connection(target_node)
+- connection = get_connection(redis_node, *args, **kwargs)
+- if asking:
+- connection.send_command("ASKING")
+- redis_node.parse_response(connection, "ASKING", **kwargs)
+- asking = False
+-
+- connection.send_command(*args)
+- response = redis_node.parse_response(connection, command, **kwargs)
+- if command in self.cluster_response_callbacks:
+- response = self.cluster_response_callbacks[command](
+- response, **kwargs
+- )
+- return response
+-
+- except (RedisClusterException, BusyLoadingError) as e:
+- log.exception(type(e))
+- raise
+- except (ConnectionError, TimeoutError) as e:
+- log.exception(type(e))
+- # ConnectionError can also be raised if we couldn't get a
+- # connection from the pool before timing out, so check that
+- # this is an actual connection before attempting to disconnect.
+- if connection is not None:
+- connection.disconnect()
+- connection_error_retry_counter += 1
+-
+- # Give the node 0.25 seconds to get back up and retry again
+- # with same node and configuration. After 5 attempts then try
+- # to reinitialize the cluster and see if the nodes
+- # configuration has changed or not
+- if connection_error_retry_counter < 5:
+- time.sleep(0.25)
+- else:
+- # Hard force of reinitialize of the node/slots setup
+- # and try again with the new setup
+- self.nodes_manager.initialize()
+- raise
+- except MovedError as e:
+- # First, we will try to patch the slots/nodes cache with the
+- # redirected node output and try again. If MovedError exceeds
+- # 'reinitialize_steps' number of times, we will force
+- # reinitializing the tables, and then try again.
+- # 'reinitialize_steps' counter will increase faster when
+- # the same client object is shared between multiple threads. To
+- # reduce the frequency you can set this variable in the
+- # RedisCluster constructor.
+- log.exception("MovedError")
+- self.reinitialize_counter += 1
+- if self._should_reinitialized():
+- self.nodes_manager.initialize()
+- # Reset the counter
+- self.reinitialize_counter = 0
+- else:
+- self.nodes_manager.update_moved_exception(e)
+- moved = True
+- except TryAgainError:
+- log.exception("TryAgainError")
+-
+- if ttl < self.RedisClusterRequestTTL / 2:
+- time.sleep(0.05)
+- except AskError as e:
+- log.exception("AskError")
+-
+- redirect_addr = get_node_name(host=e.host, port=e.port)
+- asking = True
+- except ClusterDownError as e:
+- log.exception("ClusterDownError")
+- # ClusterDownError can occur during a failover and to get
+- # self-healed, we will try to reinitialize the cluster layout
+- # and retry executing the command
+- time.sleep(0.25)
+- self.nodes_manager.initialize()
+- raise e
+- except ResponseError as e:
+- message = e.__str__()
+- log.exception(f"ResponseError: {message}")
+- raise e
+- except BaseException as e:
+- log.exception("BaseException")
+- if connection:
+- connection.disconnect()
+- raise e
+- finally:
+- if connection is not None:
+- redis_node.connection_pool.release(connection)
+-
+- raise ClusterError("TTL exhausted.")
+-
+- def close(self):
+- try:
+- with self._lock:
+- if self.nodes_manager:
+- self.nodes_manager.close()
+- except AttributeError:
+- # RedisCluster's __init__ can fail before nodes_manager is set
+- pass
+-
+- def _process_result(self, command, res, **kwargs):
+- """
+- Process the result of the executed command.
+- The function would return a dict or a single value.
+-
+- :type command: str
+- :type res: dict
+-
+- `res` should be in the following format:
+- Dict<node_name, command_result>
+- """
+- if command in self.result_callbacks:
+- return self.result_callbacks[command](command, res, **kwargs)
+- elif len(res) == 1:
+- # When we execute the command on a single node, we can
+- # remove the dictionary and return a single response
+- return list(res.values())[0]
+- else:
+- return res
+-
+-
+-class ClusterNode:
+- def __init__(self, host, port, server_type=None, redis_connection=None):
+- if host == "localhost":
+- host = socket.gethostbyname(host)
+-
+- self.host = host
+- self.port = port
+- self.name = get_node_name(host, port)
+- self.server_type = server_type
+- self.redis_connection = redis_connection
+-
+- def __repr__(self):
+- return (
+- f"[host={self.host},"
+- f"port={self.port},"
+- f"name={self.name},"
+- f"server_type={self.server_type},"
+- f"redis_connection={self.redis_connection}]"
+- )
+-
+- def __eq__(self, obj):
+- return isinstance(obj, ClusterNode) and obj.name == self.name
+-
+- def __del__(self):
+- if self.redis_connection is not None:
+- self.redis_connection.close()
+-
+-
+-class LoadBalancer:
+- """
+- Round-Robin Load Balancing
+- """
+-
+- def __init__(self, start_index=0):
+- self.primary_to_idx = {}
+- self.start_index = start_index
+-
+- def get_server_index(self, primary, list_size):
+- server_index = self.primary_to_idx.setdefault(primary, self.start_index)
+- # Update the index
+- self.primary_to_idx[primary] = (server_index + 1) % list_size
+- return server_index
+-
+- def reset(self):
+- self.primary_to_idx.clear()
+-
+-
+-class NodesManager:
+- def __init__(
+- self,
+- startup_nodes,
+- from_url=False,
+- require_full_coverage=True,
+- skip_full_coverage_check=False,
+- lock=None,
+- **kwargs,
+- ):
+- self.nodes_cache = {}
+- self.slots_cache = {}
+- self.startup_nodes = {}
+- self.default_node = None
+- self.populate_startup_nodes(startup_nodes)
+- self.from_url = from_url
+- self._require_full_coverage = require_full_coverage
+- self._skip_full_coverage_check = skip_full_coverage_check
+- self._moved_exception = None
+- self.connection_kwargs = kwargs
+- self.read_load_balancer = LoadBalancer()
+- if lock is None:
+- lock = threading.Lock()
+- self._lock = lock
+- self.initialize()
+-
+- def get_node(self, host=None, port=None, node_name=None):
+- """
+- Get the requested node from the cluster's nodes.
+- nodes.
+- :return: ClusterNode if the node exists, else None
+- """
+- if host and port:
+- # the user passed host and port
+- if host == "localhost":
+- host = socket.gethostbyname(host)
+- return self.nodes_cache.get(get_node_name(host=host, port=port))
+- elif node_name:
+- return self.nodes_cache.get(node_name)
+- else:
+- log.error(
+- "get_node requires one of the following: "
+- "1. node name "
+- "2. host and port"
+- )
+- return None
+-
+- def update_moved_exception(self, exception):
+- self._moved_exception = exception
+-
+- def _update_moved_slots(self):
+- """
+- Update the slot's node with the redirected one
+- """
+- e = self._moved_exception
+- redirected_node = self.get_node(host=e.host, port=e.port)
+- if redirected_node is not None:
+- # The node already exists
+- if redirected_node.server_type is not PRIMARY:
+- # Update the node's server type
+- redirected_node.server_type = PRIMARY
+- else:
+- # This is a new node, we will add it to the nodes cache
+- redirected_node = ClusterNode(e.host, e.port, PRIMARY)
+- self.nodes_cache[redirected_node.name] = redirected_node
+- if redirected_node in self.slots_cache[e.slot_id]:
+- # The MOVED error resulted from a failover, and the new slot owner
+- # had previously been a replica.
+- old_primary = self.slots_cache[e.slot_id][0]
+- # Update the old primary to be a replica and add it to the end of
+- # the slot's node list
+- old_primary.server_type = REPLICA
+- self.slots_cache[e.slot_id].append(old_primary)
+- # Remove the old replica, which is now a primary, from the slot's
+- # node list
+- self.slots_cache[e.slot_id].remove(redirected_node)
+- # Override the old primary with the new one
+- self.slots_cache[e.slot_id][0] = redirected_node
+- if self.default_node == old_primary:
+- # Update the default node with the new primary
+- self.default_node = redirected_node
+- else:
+- # The new slot owner is a new server, or a server from a different
+- # shard. We need to remove all current nodes from the slot's list
+- # (including replications) and add just the new node.
+- self.slots_cache[e.slot_id] = [redirected_node]
+- # Reset moved_exception
+- self._moved_exception = None
+-
+- def get_node_from_slot(self, slot, read_from_replicas=False, server_type=None):
+- """
+- Gets a node that servers this hash slot
+- """
+- if self._moved_exception:
+- with self._lock:
+- if self._moved_exception:
+- self._update_moved_slots()
+-
+- if self.slots_cache.get(slot) is None or len(self.slots_cache[slot]) == 0:
+- raise SlotNotCoveredError(
+- f'Slot "{slot}" not covered by the cluster. '
+- f'"require_full_coverage={self._require_full_coverage}"'
+- )
+-
+- if read_from_replicas is True:
+- # get the server index in a Round-Robin manner
+- primary_name = self.slots_cache[slot][0].name
+- node_idx = self.read_load_balancer.get_server_index(
+- primary_name, len(self.slots_cache[slot])
+- )
+- elif (
+- server_type is None
+- or server_type == PRIMARY
+- or len(self.slots_cache[slot]) == 1
+- ):
+- # return a primary
+- node_idx = 0
+- else:
+- # return a replica
+- # randomly choose one of the replicas
+- node_idx = random.randint(1, len(self.slots_cache[slot]) - 1)
+-
+- return self.slots_cache[slot][node_idx]
+-
+- def get_nodes_by_server_type(self, server_type):
+- """
+- Get all nodes with the specified server type
+- :param server_type: 'primary' or 'replica'
+- :return: list of ClusterNode
+- """
+- return [
+- node
+- for node in self.nodes_cache.values()
+- if node.server_type == server_type
+- ]
+-
+- def populate_startup_nodes(self, nodes):
+- """
+- Populate all startup nodes and filters out any duplicates
+- """
+- for n in nodes:
+- self.startup_nodes[n.name] = n
+-
+- def cluster_require_full_coverage(self, cluster_nodes):
+- """
+- if exists 'cluster-require-full-coverage no' config on redis servers,
+- then even all slots are not covered, cluster still will be able to
+- respond
+- """
+-
+- def node_require_full_coverage(node):
+- try:
+- return (
+- "yes"
+- in node.redis_connection.config_get(
+- "cluster-require-full-coverage"
+- ).values()
+- )
+- except ConnectionError:
+- return False
+- except Exception as e:
+- raise RedisClusterException(
+- 'ERROR sending "config get cluster-require-full-coverage"'
+- f" command to redis server: {node.name}, {e}"
+- )
+-
+- # at least one node should have cluster-require-full-coverage yes
+- return any(node_require_full_coverage(node) for node in cluster_nodes.values())
+-
+- def check_slots_coverage(self, slots_cache):
+- # Validate if all slots are covered or if we should try next
+- # startup node
+- for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
+- if i not in slots_cache:
+- return False
+- return True
+-
+- def create_redis_connections(self, nodes):
+- """
+- This function will create a redis connection to all nodes in :nodes:
+- """
+- for node in nodes:
+- if node.redis_connection is None:
+- node.redis_connection = self.create_redis_node(
+- host=node.host,
+- port=node.port,
+- **self.connection_kwargs,
+- )
+-
+- def create_redis_node(self, host, port, **kwargs):
+- if self.from_url:
+- # Create a redis node with a costumed connection pool
+- kwargs.update({"host": host})
+- kwargs.update({"port": port})
+- r = Redis(connection_pool=ConnectionPool(**kwargs))
+- else:
+- r = Redis(host=host, port=port, **kwargs)
+- return r
+-
+- def initialize(self):
+- """
+- Initializes the nodes cache, slots cache and redis connections.
+- :startup_nodes:
+- Responsible for discovering other nodes in the cluster
+- """
+- log.debug("Initializing the nodes' topology of the cluster")
+- self.reset()
+- tmp_nodes_cache = {}
+- tmp_slots = {}
+- disagreements = []
+- startup_nodes_reachable = False
+- fully_covered = False
+- kwargs = self.connection_kwargs
+- for startup_node in self.startup_nodes.values():
+- try:
+- if startup_node.redis_connection:
+- r = startup_node.redis_connection
+- else:
+- # Create a new Redis connection and let Redis decode the
+- # responses so we won't need to handle that
+- copy_kwargs = copy.deepcopy(kwargs)
+- copy_kwargs.update({"decode_responses": True, "encoding": "utf-8"})
+- r = self.create_redis_node(
+- startup_node.host, startup_node.port, **copy_kwargs
+- )
+- self.startup_nodes[startup_node.name].redis_connection = r
+- # Make sure cluster mode is enabled on this node
+- if bool(r.info().get("cluster_enabled")) is False:
+- raise RedisClusterException(
+- "Cluster mode is not enabled on this node"
+- )
+- cluster_slots = str_if_bytes(r.execute_command("CLUSTER SLOTS"))
+- startup_nodes_reachable = True
+- except (ConnectionError, TimeoutError) as e:
+- msg = e.__str__
+- log.exception(
+- "An exception occurred while trying to"
+- " initialize the cluster using the seed node"
+- f" {startup_node.name}:\n{msg}"
+- )
+- continue
+- except ResponseError as e:
+- log.exception('ReseponseError sending "cluster slots" to redis server')
+-
+- # Isn't a cluster connection, so it won't parse these
+- # exceptions automatically
+- message = e.__str__()
+- if "CLUSTERDOWN" in message or "MASTERDOWN" in message:
+- continue
+- else:
+- raise RedisClusterException(
+- 'ERROR sending "cluster slots" command to redis '
+- f"server: {startup_node}. error: {message}"
+- )
+- except Exception as e:
+- message = e.__str__()
+- raise RedisClusterException(
+- 'ERROR sending "cluster slots" command to redis '
+- f"server {startup_node.name}. error: {message}"
+- )
+-
+- # CLUSTER SLOTS command results in the following output:
+- # [[slot_section[from_slot,to_slot,master,replica1,...,replicaN]]]
+- # where each node contains the following list: [IP, port, node_id]
+- # Therefore, cluster_slots[0][2][0] will be the IP address of the
+- # primary node of the first slot section.
+- # If there's only one server in the cluster, its ``host`` is ''
+- # Fix it to the host in startup_nodes
+- if (
+- len(cluster_slots) == 1
+- and len(cluster_slots[0][2][0]) == 0
+- and len(self.startup_nodes) == 1
+- ):
+- cluster_slots[0][2][0] = startup_node.host
+-
+- for slot in cluster_slots:
+- primary_node = slot[2]
+- host = primary_node[0]
+- if host == "":
+- host = startup_node.host
+- port = int(primary_node[1])
+-
+- target_node = tmp_nodes_cache.get(get_node_name(host, port))
+- if target_node is None:
+- target_node = ClusterNode(host, port, PRIMARY)
+- # add this node to the nodes cache
+- tmp_nodes_cache[target_node.name] = target_node
+-
+- for i in range(int(slot[0]), int(slot[1]) + 1):
+- if i not in tmp_slots:
+- tmp_slots[i] = []
+- tmp_slots[i].append(target_node)
+- replica_nodes = [slot[j] for j in range(3, len(slot))]
+-
+- for replica_node in replica_nodes:
+- host = replica_node[0]
+- port = replica_node[1]
+-
+- target_replica_node = tmp_nodes_cache.get(
+- get_node_name(host, port)
+- )
+- if target_replica_node is None:
+- target_replica_node = ClusterNode(host, port, REPLICA)
+- tmp_slots[i].append(target_replica_node)
+- # add this node to the nodes cache
+- tmp_nodes_cache[
+- target_replica_node.name
+- ] = target_replica_node
+- else:
+- # Validate that 2 nodes want to use the same slot cache
+- # setup
+- tmp_slot = tmp_slots[i][0]
+- if tmp_slot.name != target_node.name:
+- disagreements.append(
+- f"{tmp_slot.name} vs {target_node.name} on slot: {i}"
+- )
+-
+- if len(disagreements) > 5:
+- raise RedisClusterException(
+- f"startup_nodes could not agree on a valid "
+- f'slots cache: {", ".join(disagreements)}'
+- )
+-
+- fully_covered = self.check_slots_coverage(tmp_slots)
+- if fully_covered:
+- # Don't need to continue to the next startup node if all
+- # slots are covered
+- break
+-
+- if not startup_nodes_reachable:
+- raise RedisClusterException(
+- "Redis Cluster cannot be connected. Please provide at least "
+- "one reachable node. "
+- )
+-
+- # Create Redis connections to all nodes
+- self.create_redis_connections(list(tmp_nodes_cache.values()))
+-
+- # Check if the slots are not fully covered
+- if not fully_covered and self._require_full_coverage:
+- # Despite the requirement that the slots be covered, there
+- # isn't a full coverage
+- raise RedisClusterException(
+- f"All slots are not covered after query all startup_nodes. "
+- f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} "
+- f"covered..."
+- )
+- elif not fully_covered and not self._require_full_coverage:
+- # The user set require_full_coverage to False.
+- # In case of full coverage requirement in the cluster's Redis
+- # configurations, we will raise an exception. Otherwise, we may
+- # continue with partial coverage.
+- # see Redis Cluster configuration parameters in
+- # https://redis.io/topics/cluster-tutorial
+- if (
+- not self._skip_full_coverage_check
+- and self.cluster_require_full_coverage(tmp_nodes_cache)
+- ):
+- raise RedisClusterException(
+- "Not all slots are covered but the cluster's "
+- "configuration requires full coverage. Set "
+- "cluster-require-full-coverage configuration to no on "
+- "all of the cluster nodes if you wish the cluster to "
+- "be able to serve without being fully covered."
+- f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} "
+- f"covered..."
+- )
+-
+- # Set the tmp variables to the real variables
+- self.nodes_cache = tmp_nodes_cache
+- self.slots_cache = tmp_slots
+- # Set the default node
+- self.default_node = self.get_nodes_by_server_type(PRIMARY)[0]
+- # Populate the startup nodes with all discovered nodes
+- self.populate_startup_nodes(self.nodes_cache.values())
+- # If initialize was called after a MovedError, clear it
+- self._moved_exception = None
+-
+- def close(self):
+- self.default_node = None
+- for node in self.nodes_cache.values():
+- if node.redis_connection:
+- node.redis_connection.close()
+-
+- def reset(self):
+- try:
+- self.read_load_balancer.reset()
+- except TypeError:
+- # The read_load_balancer is None, do nothing
+- pass
+-
+-
+-class ClusterPubSub(PubSub):
+- """
+- Wrapper for PubSub class.
+-
+- IMPORTANT: before using ClusterPubSub, read about the known limitations
+- with pubsub in Cluster mode and learn how to workaround them:
+- https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
+- """
+-
+- def __init__(self, redis_cluster, node=None, host=None, port=None, **kwargs):
+- """
+- When a pubsub instance is created without specifying a node, a single
+- node will be transparently chosen for the pubsub connection on the
+- first command execution. The node will be determined by:
+- 1. Hashing the channel name in the request to find its keyslot
+- 2. Selecting a node that handles the keyslot: If read_from_replicas is
+- set to true, a replica can be selected.
+-
+- :type redis_cluster: RedisCluster
+- :type node: ClusterNode
+- :type host: str
+- :type port: int
+- """
+- log.info("Creating new instance of ClusterPubSub")
+- self.node = None
+- self.set_pubsub_node(redis_cluster, node, host, port)
+- connection_pool = (
+- None
+- if self.node is None
+- else redis_cluster.get_redis_connection(self.node).connection_pool
+- )
+- self.cluster = redis_cluster
+- super().__init__(
+- **kwargs, connection_pool=connection_pool, encoder=redis_cluster.encoder
+- )
+-
+- def set_pubsub_node(self, cluster, node=None, host=None, port=None):
+- """
+- The pubsub node will be set according to the passed node, host and port
+- When none of the node, host, or port are specified - the node is set
+- to None and will be determined by the keyslot of the channel in the
+- first command to be executed.
+- RedisClusterException will be thrown if the passed node does not exist
+- in the cluster.
+- If host is passed without port, or vice versa, a DataError will be
+- thrown.
+- :type cluster: RedisCluster
+- :type node: ClusterNode
+- :type host: str
+- :type port: int
+- """
+- if node is not None:
+- # node is passed by the user
+- self._raise_on_invalid_node(cluster, node, node.host, node.port)
+- pubsub_node = node
+- elif host is not None and port is not None:
+- # host and port passed by the user
+- node = cluster.get_node(host=host, port=port)
+- self._raise_on_invalid_node(cluster, node, host, port)
+- pubsub_node = node
+- elif any([host, port]) is True:
+- # only 'host' or 'port' passed
+- raise DataError("Passing a host requires passing a port, " "and vice versa")
+- else:
+- # nothing passed by the user. set node to None
+- pubsub_node = None
+-
+- self.node = pubsub_node
+-
+- def get_pubsub_node(self):
+- """
+- Get the node that is being used as the pubsub connection
+- """
+- return self.node
+-
+- def _raise_on_invalid_node(self, redis_cluster, node, host, port):
+- """
+- Raise a RedisClusterException if the node is None or doesn't exist in
+- the cluster.
+- """
+- if node is None or redis_cluster.get_node(node_name=node.name) is None:
+- raise RedisClusterException(
+- f"Node {host}:{port} doesn't exist in the cluster"
+- )
+-
+- def execute_command(self, *args, **kwargs):
+- """
+- Execute a publish/subscribe command.
+-
+- Taken code from redis-py and tweak to make it work within a cluster.
+- """
+- # NOTE: don't parse the response in this function -- it could pull a
+- # legitimate message off the stack if the connection is already
+- # subscribed to one or more channels
+-
+- if self.connection is None:
+- if self.connection_pool is None:
+- if len(args) > 1:
+- # Hash the first channel and get one of the nodes holding
+- # this slot
+- channel = args[1]
+- slot = self.cluster.keyslot(channel)
+- node = self.cluster.nodes_manager.get_node_from_slot(
+- slot, self.cluster.read_from_replicas
+- )
+- else:
+- # Get a random node
+- node = self.cluster.get_random_node()
+- self.node = node
+- redis_connection = self.cluster.get_redis_connection(node)
+- self.connection_pool = redis_connection.connection_pool
+- self.connection = self.connection_pool.get_connection(
+- "pubsub", self.shard_hint
+- )
+- # register a callback that re-subscribes to any channels we
+- # were listening to when we were disconnected
+- self.connection.register_connect_callback(self.on_connect)
+- connection = self.connection
+- self._execute(connection, connection.send_command, *args)
+-
+- def get_redis_connection(self):
+- """
+- Get the Redis connection of the pubsub connected node.
+- """
+- if self.node is not None:
+- return self.node.redis_connection
+-
+-
+-class ClusterPipeline(RedisCluster):
+- """
+- Support for Redis pipeline
+- in cluster mode
+- """
+-
+- ERRORS_ALLOW_RETRY = (
+- ConnectionError,
+- TimeoutError,
+- MovedError,
+- AskError,
+- TryAgainError,
+- )
+-
+- def __init__(
+- self,
+- nodes_manager,
+- result_callbacks=None,
+- cluster_response_callbacks=None,
+- startup_nodes=None,
+- read_from_replicas=False,
+- cluster_error_retry_attempts=5,
+- reinitialize_steps=10,
+- **kwargs,
+- ):
+- """ """
+- log.info("Creating new instance of ClusterPipeline")
+- self.command_stack = []
+- self.nodes_manager = nodes_manager
+- self.refresh_table_asap = False
+- self.result_callbacks = (
+- result_callbacks or self.__class__.RESULT_CALLBACKS.copy()
+- )
+- self.startup_nodes = startup_nodes if startup_nodes else []
+- self.read_from_replicas = read_from_replicas
+- self.command_flags = self.__class__.COMMAND_FLAGS.copy()
+- self.cluster_response_callbacks = cluster_response_callbacks
+- self.cluster_error_retry_attempts = cluster_error_retry_attempts
+- self.reinitialize_counter = 0
+- self.reinitialize_steps = reinitialize_steps
+- self.encoder = Encoder(
+- kwargs.get("encoding", "utf-8"),
+- kwargs.get("encoding_errors", "strict"),
+- kwargs.get("decode_responses", False),
+- )
+-
+- # The commands parser refers to the parent
+- # so that we don't push the COMMAND command
+- # onto the stack
+- self.commands_parser = CommandsParser(super())
+-
+- def __repr__(self):
+- """ """
+- return f"{type(self).__name__}"
+-
+- def __enter__(self):
+- """ """
+- return self
+-
+- def __exit__(self, exc_type, exc_value, traceback):
+- """ """
+- self.reset()
+-
+- def __del__(self):
+- try:
+- self.reset()
+- except Exception:
+- pass
+-
+- def __len__(self):
+- """ """
+- return len(self.command_stack)
+-
+- def __nonzero__(self):
+- "Pipeline instances should always evaluate to True on Python 2.7"
+- return True
+-
+- def __bool__(self):
+- "Pipeline instances should always evaluate to True on Python 3+"
+- return True
+-
+- def execute_command(self, *args, **kwargs):
+- """
+- Wrapper function for pipeline_execute_command
+- """
+- return self.pipeline_execute_command(*args, **kwargs)
+-
+- def pipeline_execute_command(self, *args, **options):
+- """
+- Appends the executed command to the pipeline's command stack
+- """
+- self.command_stack.append(
+- PipelineCommand(args, options, len(self.command_stack))
+- )
+- return self
+-
+- def raise_first_error(self, stack):
+- """
+- Raise the first exception on the stack
+- """
+- for c in stack:
+- r = c.result
+- if isinstance(r, Exception):
+- self.annotate_exception(r, c.position + 1, c.args)
+- raise r
+-
+- def annotate_exception(self, exception, number, command):
+- """
+- Provides extra context to the exception prior to it being handled
+- """
+- cmd = " ".join(map(safe_str, command))
+- msg = (
+- f"Command # {number} ({cmd}) of pipeline "
+- f"caused error: {exception.args[0]}"
+- )
+- exception.args = (msg,) + exception.args[1:]
+-
+- def execute(self, raise_on_error=True):
+- """
+- Execute all the commands in the current pipeline
+- """
+- stack = self.command_stack
+- try:
+- return self.send_cluster_commands(stack, raise_on_error)
+- finally:
+- self.reset()
+-
+- def reset(self):
+- """
+- Reset back to empty pipeline.
+- """
+- self.command_stack = []
+-
+- self.scripts = set()
+-
+- # TODO: Implement
+- # make sure to reset the connection state in the event that we were
+- # watching something
+- # if self.watching and self.connection:
+- # try:
+- # # call this manually since our unwatch or
+- # # immediate_execute_command methods can call reset()
+- # self.connection.send_command('UNWATCH')
+- # self.connection.read_response()
+- # except ConnectionError:
+- # # disconnect will also remove any previous WATCHes
+- # self.connection.disconnect()
+-
+- # clean up the other instance attributes
+- self.watching = False
+- self.explicit_transaction = False
+-
+- # TODO: Implement
+- # we can safely return the connection to the pool here since we're
+- # sure we're no longer WATCHing anything
+- # if self.connection:
+- # self.connection_pool.release(self.connection)
+- # self.connection = None
+-
+- def send_cluster_commands(
+- self, stack, raise_on_error=True, allow_redirections=True
+- ):
+- """
+- Wrapper for CLUSTERDOWN error handling.
+-
+- If the cluster reports it is down it is assumed that:
+- - connection_pool was disconnected
+- - connection_pool was reseted
+- - refereh_table_asap set to True
+-
+- It will try the number of times specified by
+- the config option "self.cluster_error_retry_attempts"
+- which defaults to 3 unless manually configured.
+-
+- If it reaches the number of times, the command will
+- raises ClusterDownException.
+- """
+- if not stack:
+- return []
+-
+- for _ in range(0, self.cluster_error_retry_attempts):
+- try:
+- return self._send_cluster_commands(
+- stack,
+- raise_on_error=raise_on_error,
+- allow_redirections=allow_redirections,
+- )
+- except ClusterDownError:
+- # Try again with the new cluster setup. All other errors
+- # should be raised.
+- pass
+-
+- # If it fails the configured number of times then raise
+- # exception back to caller of this method
+- raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster")
+-
+- def _send_cluster_commands(
+- self, stack, raise_on_error=True, allow_redirections=True
+- ):
+- """
+- Send a bunch of cluster commands to the redis cluster.
+-
+- `allow_redirections` If the pipeline should follow
+- `ASK` & `MOVED` responses automatically. If set
+- to false it will raise RedisClusterException.
+- """
+- # the first time sending the commands we send all of
+- # the commands that were queued up.
+- # if we have to run through it again, we only retry
+- # the commands that failed.
+- attempt = sorted(stack, key=lambda x: x.position)
+-
+- # build a list of node objects based on node names we need to
+- nodes = {}
+-
+- # as we move through each command that still needs to be processed,
+- # we figure out the slot number that command maps to, then from
+- # the slot determine the node.
+- for c in attempt:
+- # refer to our internal node -> slot table that
+- # tells us where a given
+- # command should route to.
+- slot = self.determine_slot(*c.args)
+- node = self.nodes_manager.get_node_from_slot(
+- slot, self.read_from_replicas and c.args[0] in READ_COMMANDS
+- )
+-
+- # now that we know the name of the node
+- # ( it's just a string in the form of host:port )
+- # we can build a list of commands for each node.
+- node_name = node.name
+- if node_name not in nodes:
+- redis_node = self.get_redis_connection(node)
+- connection = get_connection(redis_node, c.args)
+- nodes[node_name] = NodeCommands(
+- redis_node.parse_response, redis_node.connection_pool, connection
+- )
+-
+- nodes[node_name].append(c)
+-
+- # send the commands in sequence.
+- # we write to all the open sockets for each node first,
+- # before reading anything
+- # this allows us to flush all the requests out across the
+- # network essentially in parallel
+- # so that we can read them all in parallel as they come back.
+- # we dont' multiplex on the sockets as they come available,
+- # but that shouldn't make too much difference.
+- node_commands = nodes.values()
+- for n in node_commands:
+- n.write()
+-
+- for n in node_commands:
+- n.read()
+-
+- # release all of the redis connections we allocated earlier
+- # back into the connection pool.
+- # we used to do this step as part of a try/finally block,
+- # but it is really dangerous to
+- # release connections back into the pool if for some
+- # reason the socket has data still left in it
+- # from a previous operation. The write and
+- # read operations already have try/catch around them for
+- # all known types of errors including connection
+- # and socket level errors.
+- # So if we hit an exception, something really bad
+- # happened and putting any oF
+- # these connections back into the pool is a very bad idea.
+- # the socket might have unread buffer still sitting in it,
+- # and then the next time we read from it we pass the
+- # buffered result back from a previous command and
+- # every single request after to that connection will always get
+- # a mismatched result.
+- for n in nodes.values():
+- n.connection_pool.release(n.connection)
+-
+- # if the response isn't an exception it is a
+- # valid response from the node
+- # we're all done with that command, YAY!
+- # if we have more commands to attempt, we've run into problems.
+- # collect all the commands we are allowed to retry.
+- # (MOVED, ASK, or connection errors or timeout errors)
+- attempt = sorted(
+- (
+- c
+- for c in attempt
+- if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
+- ),
+- key=lambda x: x.position,
+- )
+- if attempt and allow_redirections:
+- # RETRY MAGIC HAPPENS HERE!
+- # send these remaing comamnds one at a time using `execute_command`
+- # in the main client. This keeps our retry logic
+- # in one place mostly,
+- # and allows us to be more confident in correctness of behavior.
+- # at this point any speed gains from pipelining have been lost
+- # anyway, so we might as well make the best
+- # attempt to get the correct behavior.
+- #
+- # The client command will handle retries for each
+- # individual command sequentially as we pass each
+- # one into `execute_command`. Any exceptions
+- # that bubble out should only appear once all
+- # retries have been exhausted.
+- #
+- # If a lot of commands have failed, we'll be setting the
+- # flag to rebuild the slots table from scratch.
+- # So MOVED errors should correct themselves fairly quickly.
+- log.exception(
+- f"An exception occurred during pipeline execution. "
+- f"args: {attempt[-1].args}, "
+- f"error: {type(attempt[-1].result).__name__} "
+- f"{str(attempt[-1].result)}"
+- )
+- self.reinitialize_counter += 1
+- if self._should_reinitialized():
+- self.nodes_manager.initialize()
+- for c in attempt:
+- try:
+- # send each command individually like we
+- # do in the main client.
+- c.result = super().execute_command(*c.args, **c.options)
+- except RedisError as e:
+- c.result = e
+-
+- # turn the response back into a simple flat array that corresponds
+- # to the sequence of commands issued in the stack in pipeline.execute()
+- response = [c.result for c in sorted(stack, key=lambda x: x.position)]
+-
+- if raise_on_error:
+- self.raise_first_error(stack)
+-
+- return response
+-
+- def _fail_on_redirect(self, allow_redirections):
+- """ """
+- if not allow_redirections:
+- raise RedisClusterException(
+- "ASK & MOVED redirection not allowed in this pipeline"
+- )
+-
+- def eval(self):
+- """ """
+- raise RedisClusterException("method eval() is not implemented")
+-
+- def multi(self):
+- """ """
+- raise RedisClusterException("method multi() is not implemented")
+-
+- def immediate_execute_command(self, *args, **options):
+- """ """
+- raise RedisClusterException(
+- "method immediate_execute_command() is not implemented"
+- )
+-
+- def _execute_transaction(self, *args, **kwargs):
+- """ """
+- raise RedisClusterException("method _execute_transaction() is not implemented")
+-
+- def load_scripts(self):
+- """ """
+- raise RedisClusterException("method load_scripts() is not implemented")
+-
+- def watch(self, *names):
+- """ """
+- raise RedisClusterException("method watch() is not implemented")
+-
+- def unwatch(self):
+- """ """
+- raise RedisClusterException("method unwatch() is not implemented")
+-
+- def script_load_for_pipeline(self, *args, **kwargs):
+- """ """
+- raise RedisClusterException(
+- "method script_load_for_pipeline() is not implemented"
+- )
+-
+- def delete(self, *names):
+- """
+- "Delete a key specified by ``names``"
+- """
+- if len(names) != 1:
+- raise RedisClusterException(
+- "deleting multiple keys is not " "implemented in pipeline command"
+- )
+-
+- return self.execute_command("DEL", names[0])
+-
+-
+-def block_pipeline_command(func):
+- """
+- Prints error because some pipelined commands should
+- be blocked when running in cluster-mode
+- """
+-
+- def inner(*args, **kwargs):
+- raise RedisClusterException(
+- f"ERROR: Calling pipelined function {func.__name__} is blocked "
+- f"when running redis in cluster mode..."
+- )
+-
+- return inner
+-
+-
+-# Blocked pipeline commands
+-ClusterPipeline.bitop = block_pipeline_command(RedisCluster.bitop)
+-ClusterPipeline.brpoplpush = block_pipeline_command(RedisCluster.brpoplpush)
+-ClusterPipeline.client_getname = block_pipeline_command(RedisCluster.client_getname)
+-ClusterPipeline.client_list = block_pipeline_command(RedisCluster.client_list)
+-ClusterPipeline.client_setname = block_pipeline_command(RedisCluster.client_setname)
+-ClusterPipeline.config_set = block_pipeline_command(RedisCluster.config_set)
+-ClusterPipeline.dbsize = block_pipeline_command(RedisCluster.dbsize)
+-ClusterPipeline.flushall = block_pipeline_command(RedisCluster.flushall)
+-ClusterPipeline.flushdb = block_pipeline_command(RedisCluster.flushdb)
+-ClusterPipeline.keys = block_pipeline_command(RedisCluster.keys)
+-ClusterPipeline.mget = block_pipeline_command(RedisCluster.mget)
+-ClusterPipeline.move = block_pipeline_command(RedisCluster.move)
+-ClusterPipeline.mset = block_pipeline_command(RedisCluster.mset)
+-ClusterPipeline.msetnx = block_pipeline_command(RedisCluster.msetnx)
+-ClusterPipeline.pfmerge = block_pipeline_command(RedisCluster.pfmerge)
+-ClusterPipeline.pfcount = block_pipeline_command(RedisCluster.pfcount)
+-ClusterPipeline.ping = block_pipeline_command(RedisCluster.ping)
+-ClusterPipeline.publish = block_pipeline_command(RedisCluster.publish)
+-ClusterPipeline.randomkey = block_pipeline_command(RedisCluster.randomkey)
+-ClusterPipeline.rename = block_pipeline_command(RedisCluster.rename)
+-ClusterPipeline.renamenx = block_pipeline_command(RedisCluster.renamenx)
+-ClusterPipeline.rpoplpush = block_pipeline_command(RedisCluster.rpoplpush)
+-ClusterPipeline.scan = block_pipeline_command(RedisCluster.scan)
+-ClusterPipeline.sdiff = block_pipeline_command(RedisCluster.sdiff)
+-ClusterPipeline.sdiffstore = block_pipeline_command(RedisCluster.sdiffstore)
+-ClusterPipeline.sinter = block_pipeline_command(RedisCluster.sinter)
+-ClusterPipeline.sinterstore = block_pipeline_command(RedisCluster.sinterstore)
+-ClusterPipeline.smove = block_pipeline_command(RedisCluster.smove)
+-ClusterPipeline.sort = block_pipeline_command(RedisCluster.sort)
+-ClusterPipeline.sunion = block_pipeline_command(RedisCluster.sunion)
+-ClusterPipeline.sunionstore = block_pipeline_command(RedisCluster.sunionstore)
+-ClusterPipeline.readwrite = block_pipeline_command(RedisCluster.readwrite)
+-ClusterPipeline.readonly = block_pipeline_command(RedisCluster.readonly)
+-
+-
+-class PipelineCommand:
+- """ """
+-
+- def __init__(self, args, options=None, position=None):
+- self.args = args
+- if options is None:
+- options = {}
+- self.options = options
+- self.position = position
+- self.result = None
+- self.node = None
+- self.asking = False
+-
+-
+-class NodeCommands:
+- """ """
+-
+- def __init__(self, parse_response, connection_pool, connection):
+- """ """
+- self.parse_response = parse_response
+- self.connection_pool = connection_pool
+- self.connection = connection
+- self.commands = []
+-
+- def append(self, c):
+- """ """
+- self.commands.append(c)
+-
+- def write(self):
+- """
+- Code borrowed from Redis so it can be fixed
+- """
+- connection = self.connection
+- commands = self.commands
+-
+- # We are going to clobber the commands with the write, so go ahead
+- # and ensure that nothing is sitting there from a previous run.
+- for c in commands:
+- c.result = None
+-
+- # build up all commands into a single request to increase network perf
+- # send all the commands and catch connection and timeout errors.
+- try:
+- connection.send_packed_command(
+- connection.pack_commands([c.args for c in commands])
+- )
+- except (ConnectionError, TimeoutError) as e:
+- for c in commands:
+- c.result = e
+-
+- def read(self):
+- """ """
+- connection = self.connection
+- for c in self.commands:
+-
+- # if there is a result on this command,
+- # it means we ran into an exception
+- # like a connection error. Trying to parse
+- # a response on a connection that
+- # is no longer open will result in a
+- # connection error raised by redis-py.
+- # but redis-py doesn't check in parse_response
+- # that the sock object is
+- # still set and if you try to
+- # read from a closed connection, it will
+- # result in an AttributeError because
+- # it will do a readline() call on None.
+- # This can have all kinds of nasty side-effects.
+- # Treating this case as a connection error
+- # is fine because it will dump
+- # the connection object back into the
+- # pool and on the next write, it will
+- # explicitly open the connection and all will be well.
+- if c.result is None:
+- try:
+- c.result = self.parse_response(connection, c.args[0], **c.options)
+- except (ConnectionError, TimeoutError) as e:
+- for c in self.commands:
+- c.result = e
+- return
+- except RedisError:
+- c.result = sys.exc_info()[1]
diff --git a/redis/commands/__init__.py b/redis/commands/__init__.py
-index f1ddaaa..6e4aa0f 100644
+index 07fa7f1..d6fea59 100644
--- a/redis/commands/__init__.py
+++ b/redis/commands/__init__.py
-@@ -1,11 +1,9 @@
+@@ -1,15 +1,11 @@
+-from .cluster import RedisClusterCommands
from .core import CoreCommands
--from .redismodules import RedisModuleCommands
from .helpers import list_or_args
- from .sentinel import SentinelCommands
+ from .parser import CommandsParser
+-from .redismodules import RedisModuleCommands
+-from .sentinel import SentinelCommands
__all__ = [
- 'CoreCommands',
-- 'RedisModuleCommands',
- 'SentinelCommands',
- 'list_or_args'
+- "RedisClusterCommands",
+ "CommandsParser",
+ "CoreCommands",
+ "list_or_args",
+- "RedisModuleCommands",
+- "SentinelCommands",
++ 'CoreCommands',
++ 'list_or_args'
]
+diff --git a/redis/commands/bf/__init__.py b/redis/commands/bf/__init__.py
+deleted file mode 100644
+index f34e11d..0000000
+--- a/redis/commands/bf/__init__.py
++++ /dev/null
+@@ -1,204 +0,0 @@
+-from redis.client import bool_ok
+-
+-from ..helpers import parse_to_list
+-from .commands import * # noqa
+-from .info import BFInfo, CFInfo, CMSInfo, TDigestInfo, TopKInfo
+-
+-
+-class AbstractBloom(object):
+- """
+- The client allows to interact with RedisBloom and use all of
+- it's functionality.
+-
+- - BF for Bloom Filter
+- - CF for Cuckoo Filter
+- - CMS for Count-Min Sketch
+- - TOPK for TopK Data Structure
+- - TDIGEST for estimate rank statistics
+- """
+-
+- @staticmethod
+- def appendItems(params, items):
+- """Append ITEMS to params."""
+- params.extend(["ITEMS"])
+- params += items
+-
+- @staticmethod
+- def appendError(params, error):
+- """Append ERROR to params."""
+- if error is not None:
+- params.extend(["ERROR", error])
+-
+- @staticmethod
+- def appendCapacity(params, capacity):
+- """Append CAPACITY to params."""
+- if capacity is not None:
+- params.extend(["CAPACITY", capacity])
+-
+- @staticmethod
+- def appendExpansion(params, expansion):
+- """Append EXPANSION to params."""
+- if expansion is not None:
+- params.extend(["EXPANSION", expansion])
+-
+- @staticmethod
+- def appendNoScale(params, noScale):
+- """Append NONSCALING tag to params."""
+- if noScale is not None:
+- params.extend(["NONSCALING"])
+-
+- @staticmethod
+- def appendWeights(params, weights):
+- """Append WEIGHTS to params."""
+- if len(weights) > 0:
+- params.append("WEIGHTS")
+- params += weights
+-
+- @staticmethod
+- def appendNoCreate(params, noCreate):
+- """Append NOCREATE tag to params."""
+- if noCreate is not None:
+- params.extend(["NOCREATE"])
+-
+- @staticmethod
+- def appendItemsAndIncrements(params, items, increments):
+- """Append pairs of items and increments to params."""
+- for i in range(len(items)):
+- params.append(items[i])
+- params.append(increments[i])
+-
+- @staticmethod
+- def appendValuesAndWeights(params, items, weights):
+- """Append pairs of items and weights to params."""
+- for i in range(len(items)):
+- params.append(items[i])
+- params.append(weights[i])
+-
+- @staticmethod
+- def appendMaxIterations(params, max_iterations):
+- """Append MAXITERATIONS to params."""
+- if max_iterations is not None:
+- params.extend(["MAXITERATIONS", max_iterations])
+-
+- @staticmethod
+- def appendBucketSize(params, bucket_size):
+- """Append BUCKETSIZE to params."""
+- if bucket_size is not None:
+- params.extend(["BUCKETSIZE", bucket_size])
+-
+-
+-class CMSBloom(CMSCommands, AbstractBloom):
+- def __init__(self, client, **kwargs):
+- """Create a new RedisBloom client."""
+- # Set the module commands' callbacks
+- MODULE_CALLBACKS = {
+- CMS_INITBYDIM: bool_ok,
+- CMS_INITBYPROB: bool_ok,
+- # CMS_INCRBY: spaceHolder,
+- # CMS_QUERY: spaceHolder,
+- CMS_MERGE: bool_ok,
+- CMS_INFO: CMSInfo,
+- }
+-
+- self.client = client
+- self.commandmixin = CMSCommands
+- self.execute_command = client.execute_command
+-
+- for k, v in MODULE_CALLBACKS.items():
+- self.client.set_response_callback(k, v)
+-
+-
+-class TOPKBloom(TOPKCommands, AbstractBloom):
+- def __init__(self, client, **kwargs):
+- """Create a new RedisBloom client."""
+- # Set the module commands' callbacks
+- MODULE_CALLBACKS = {
+- TOPK_RESERVE: bool_ok,
+- TOPK_ADD: parse_to_list,
+- TOPK_INCRBY: parse_to_list,
+- # TOPK_QUERY: spaceHolder,
+- # TOPK_COUNT: spaceHolder,
+- TOPK_LIST: parse_to_list,
+- TOPK_INFO: TopKInfo,
+- }
+-
+- self.client = client
+- self.commandmixin = TOPKCommands
+- self.execute_command = client.execute_command
+-
+- for k, v in MODULE_CALLBACKS.items():
+- self.client.set_response_callback(k, v)
+-
+-
+-class CFBloom(CFCommands, AbstractBloom):
+- def __init__(self, client, **kwargs):
+- """Create a new RedisBloom client."""
+- # Set the module commands' callbacks
+- MODULE_CALLBACKS = {
+- CF_RESERVE: bool_ok,
+- # CF_ADD: spaceHolder,
+- # CF_ADDNX: spaceHolder,
+- # CF_INSERT: spaceHolder,
+- # CF_INSERTNX: spaceHolder,
+- # CF_EXISTS: spaceHolder,
+- # CF_DEL: spaceHolder,
+- # CF_COUNT: spaceHolder,
+- # CF_SCANDUMP: spaceHolder,
+- # CF_LOADCHUNK: spaceHolder,
+- CF_INFO: CFInfo,
+- }
+-
+- self.client = client
+- self.commandmixin = CFCommands
+- self.execute_command = client.execute_command
+-
+- for k, v in MODULE_CALLBACKS.items():
+- self.client.set_response_callback(k, v)
+-
+-
+-class TDigestBloom(TDigestCommands, AbstractBloom):
+- def __init__(self, client, **kwargs):
+- """Create a new RedisBloom client."""
+- # Set the module commands' callbacks
+- MODULE_CALLBACKS = {
+- TDIGEST_CREATE: bool_ok,
+- # TDIGEST_RESET: bool_ok,
+- # TDIGEST_ADD: spaceHolder,
+- # TDIGEST_MERGE: spaceHolder,
+- TDIGEST_CDF: float,
+- TDIGEST_QUANTILE: float,
+- TDIGEST_MIN: float,
+- TDIGEST_MAX: float,
+- TDIGEST_INFO: TDigestInfo,
+- }
+-
+- self.client = client
+- self.commandmixin = TDigestCommands
+- self.execute_command = client.execute_command
+-
+- for k, v in MODULE_CALLBACKS.items():
+- self.client.set_response_callback(k, v)
+-
+-
+-class BFBloom(BFCommands, AbstractBloom):
+- def __init__(self, client, **kwargs):
+- """Create a new RedisBloom client."""
+- # Set the module commands' callbacks
+- MODULE_CALLBACKS = {
+- BF_RESERVE: bool_ok,
+- # BF_ADD: spaceHolder,
+- # BF_MADD: spaceHolder,
+- # BF_INSERT: spaceHolder,
+- # BF_EXISTS: spaceHolder,
+- # BF_MEXISTS: spaceHolder,
+- # BF_SCANDUMP: spaceHolder,
+- # BF_LOADCHUNK: spaceHolder,
+- BF_INFO: BFInfo,
+- }
+-
+- self.client = client
+- self.commandmixin = BFCommands
+- self.execute_command = client.execute_command
+-
+- for k, v in MODULE_CALLBACKS.items():
+- self.client.set_response_callback(k, v)
+diff --git a/redis/commands/bf/commands.py b/redis/commands/bf/commands.py
+deleted file mode 100644
+index 7fc507d..0000000
+--- a/redis/commands/bf/commands.py
++++ /dev/null
+@@ -1,498 +0,0 @@
+-from redis.client import NEVER_DECODE
+-from redis.exceptions import ModuleError
+-from redis.utils import HIREDIS_AVAILABLE
+-
+-BF_RESERVE = "BF.RESERVE"
+-BF_ADD = "BF.ADD"
+-BF_MADD = "BF.MADD"
+-BF_INSERT = "BF.INSERT"
+-BF_EXISTS = "BF.EXISTS"
+-BF_MEXISTS = "BF.MEXISTS"
+-BF_SCANDUMP = "BF.SCANDUMP"
+-BF_LOADCHUNK = "BF.LOADCHUNK"
+-BF_INFO = "BF.INFO"
+-
+-CF_RESERVE = "CF.RESERVE"
+-CF_ADD = "CF.ADD"
+-CF_ADDNX = "CF.ADDNX"
+-CF_INSERT = "CF.INSERT"
+-CF_INSERTNX = "CF.INSERTNX"
+-CF_EXISTS = "CF.EXISTS"
+-CF_DEL = "CF.DEL"
+-CF_COUNT = "CF.COUNT"
+-CF_SCANDUMP = "CF.SCANDUMP"
+-CF_LOADCHUNK = "CF.LOADCHUNK"
+-CF_INFO = "CF.INFO"
+-
+-CMS_INITBYDIM = "CMS.INITBYDIM"
+-CMS_INITBYPROB = "CMS.INITBYPROB"
+-CMS_INCRBY = "CMS.INCRBY"
+-CMS_QUERY = "CMS.QUERY"
+-CMS_MERGE = "CMS.MERGE"
+-CMS_INFO = "CMS.INFO"
+-
+-TOPK_RESERVE = "TOPK.RESERVE"
+-TOPK_ADD = "TOPK.ADD"
+-TOPK_INCRBY = "TOPK.INCRBY"
+-TOPK_QUERY = "TOPK.QUERY"
+-TOPK_COUNT = "TOPK.COUNT"
+-TOPK_LIST = "TOPK.LIST"
+-TOPK_INFO = "TOPK.INFO"
+-
+-TDIGEST_CREATE = "TDIGEST.CREATE"
+-TDIGEST_RESET = "TDIGEST.RESET"
+-TDIGEST_ADD = "TDIGEST.ADD"
+-TDIGEST_MERGE = "TDIGEST.MERGE"
+-TDIGEST_CDF = "TDIGEST.CDF"
+-TDIGEST_QUANTILE = "TDIGEST.QUANTILE"
+-TDIGEST_MIN = "TDIGEST.MIN"
+-TDIGEST_MAX = "TDIGEST.MAX"
+-TDIGEST_INFO = "TDIGEST.INFO"
+-
+-
+-class BFCommands:
+- """Bloom Filter commands."""
+-
+- # region Bloom Filter Functions
+- def create(self, key, errorRate, capacity, expansion=None, noScale=None):
+- """
+- Create a new Bloom Filter `key` with desired probability of false positives
+- `errorRate` expected entries to be inserted as `capacity`.
+- Default expansion value is 2. By default, filter is auto-scaling.
+- For more information see `BF.RESERVE <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfreserve>`_.
+- """ # noqa
+- params = [key, errorRate, capacity]
+- self.appendExpansion(params, expansion)
+- self.appendNoScale(params, noScale)
+- return self.execute_command(BF_RESERVE, *params)
+-
+- def add(self, key, item):
+- """
+- Add to a Bloom Filter `key` an `item`.
+- For more information see `BF.ADD <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfadd>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(BF_ADD, *params)
+-
+- def madd(self, key, *items):
+- """
+- Add to a Bloom Filter `key` multiple `items`.
+- For more information see `BF.MADD <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfmadd>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(BF_MADD, *params)
+-
+- def insert(
+- self,
+- key,
+- items,
+- capacity=None,
+- error=None,
+- noCreate=None,
+- expansion=None,
+- noScale=None,
+- ):
+- """
+- Add to a Bloom Filter `key` multiple `items`.
+-
+- If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter
+- `key` will be created with desired probability of false positives `errorRate`
+- and expected entries to be inserted as `size`.
+- For more information see `BF.INSERT <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfinsert>`_.
+- """ # noqa
+- params = [key]
+- self.appendCapacity(params, capacity)
+- self.appendError(params, error)
+- self.appendExpansion(params, expansion)
+- self.appendNoCreate(params, noCreate)
+- self.appendNoScale(params, noScale)
+- self.appendItems(params, items)
+-
+- return self.execute_command(BF_INSERT, *params)
+-
+- def exists(self, key, item):
+- """
+- Check whether an `item` exists in Bloom Filter `key`.
+- For more information see `BF.EXISTS <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfexists>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(BF_EXISTS, *params)
+-
+- def mexists(self, key, *items):
+- """
+- Check whether `items` exist in Bloom Filter `key`.
+- For more information see `BF.MEXISTS <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfmexists>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(BF_MEXISTS, *params)
+-
+- def scandump(self, key, iter):
+- """
+- Begin an incremental save of the bloom filter `key`.
+-
+- This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model.
+- The first time this command is called, the value of `iter` should be 0.
+- This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
+- For more information see `BF.SCANDUMP <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfscandump>`_.
+- """ # noqa
+- if HIREDIS_AVAILABLE:
+- raise ModuleError("This command cannot be used when hiredis is available.")
+-
+- params = [key, iter]
+- options = {}
+- options[NEVER_DECODE] = []
+- return self.execute_command(BF_SCANDUMP, *params, **options)
+-
+- def loadchunk(self, key, iter, data):
+- """
+- Restore a filter previously saved using SCANDUMP.
+-
+- See the SCANDUMP command for example usage.
+- This command will overwrite any bloom filter stored under key.
+- Ensure that the bloom filter will not be modified between invocations.
+- For more information see `BF.LOADCHUNK <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfloadchunk>`_.
+- """ # noqa
+- params = [key, iter, data]
+- return self.execute_command(BF_LOADCHUNK, *params)
+-
+- def info(self, key):
+- """
+- Return capacity, size, number of filters, number of items inserted, and expansion rate.
+- For more information see `BF.INFO <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfinfo>`_.
+- """ # noqa
+- return self.execute_command(BF_INFO, key)
+-
+-
+-class CFCommands:
+- """Cuckoo Filter commands."""
+-
+- # region Cuckoo Filter Functions
+- def create(
+- self, key, capacity, expansion=None, bucket_size=None, max_iterations=None
+- ):
+- """
+- Create a new Cuckoo Filter `key` an initial `capacity` items.
+- For more information see `CF.RESERVE <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfreserve>`_.
+- """ # noqa
+- params = [key, capacity]
+- self.appendExpansion(params, expansion)
+- self.appendBucketSize(params, bucket_size)
+- self.appendMaxIterations(params, max_iterations)
+- return self.execute_command(CF_RESERVE, *params)
+-
+- def add(self, key, item):
+- """
+- Add an `item` to a Cuckoo Filter `key`.
+- For more information see `CF.ADD <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfadd>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(CF_ADD, *params)
+-
+- def addnx(self, key, item):
+- """
+- Add an `item` to a Cuckoo Filter `key` only if item does not yet exist.
+- Command might be slower that `add`.
+- For more information see `CF.ADDNX <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfaddnx>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(CF_ADDNX, *params)
+-
+- def insert(self, key, items, capacity=None, nocreate=None):
+- """
+- Add multiple `items` to a Cuckoo Filter `key`, allowing the filter
+- to be created with a custom `capacity` if it does not yet exist.
+- `items` must be provided as a list.
+- For more information see `CF.INSERT <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinsert>`_.
+- """ # noqa
+- params = [key]
+- self.appendCapacity(params, capacity)
+- self.appendNoCreate(params, nocreate)
+- self.appendItems(params, items)
+- return self.execute_command(CF_INSERT, *params)
+-
+- def insertnx(self, key, items, capacity=None, nocreate=None):
+- """
+- Add multiple `items` to a Cuckoo Filter `key` only if they do not exist yet,
+- allowing the filter to be created with a custom `capacity` if it does not yet exist.
+- `items` must be provided as a list.
+- For more information see `CF.INSERTNX <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinsertnx>`_.
+- """ # noqa
+- params = [key]
+- self.appendCapacity(params, capacity)
+- self.appendNoCreate(params, nocreate)
+- self.appendItems(params, items)
+- return self.execute_command(CF_INSERTNX, *params)
+-
+- def exists(self, key, item):
+- """
+- Check whether an `item` exists in Cuckoo Filter `key`.
+- For more information see `CF.EXISTS <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfexists>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(CF_EXISTS, *params)
+-
+- def delete(self, key, item):
+- """
+- Delete `item` from `key`.
+- For more information see `CF.DEL <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfdel>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(CF_DEL, *params)
+-
+- def count(self, key, item):
+- """
+- Return the number of times an `item` may be in the `key`.
+- For more information see `CF.COUNT <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfcount>`_.
+- """ # noqa
+- params = [key, item]
+- return self.execute_command(CF_COUNT, *params)
+-
+- def scandump(self, key, iter):
+- """
+- Begin an incremental save of the Cuckoo filter `key`.
+-
+- This is useful for large Cuckoo filters which cannot fit into the normal
+- SAVE and RESTORE model.
+- The first time this command is called, the value of `iter` should be 0.
+- This command will return successive (iter, data) pairs until
+- (0, NULL) to indicate completion.
+- For more information see `CF.SCANDUMP <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfscandump>`_.
+- """ # noqa
+- params = [key, iter]
+- return self.execute_command(CF_SCANDUMP, *params)
+-
+- def loadchunk(self, key, iter, data):
+- """
+- Restore a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage.
+-
+- This command will overwrite any Cuckoo filter stored under key.
+- Ensure that the Cuckoo filter will not be modified between invocations.
+- For more information see `CF.LOADCHUNK <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfloadchunk>`_.
+- """ # noqa
+- params = [key, iter, data]
+- return self.execute_command(CF_LOADCHUNK, *params)
+-
+- def info(self, key):
+- """
+- Return size, number of buckets, number of filter, number of items inserted,
+- number of items deleted, bucket size, expansion rate, and max iteration.
+- For more information see `CF.INFO <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinfo>`_.
+- """ # noqa
+- return self.execute_command(CF_INFO, key)
+-
+-
+-class TOPKCommands:
+- """TOP-k Filter commands."""
+-
+- def reserve(self, key, k, width, depth, decay):
+- """
+- Create a new Top-K Filter `key` with desired probability of false
+- positives `errorRate` expected entries to be inserted as `size`.
+- For more information see `TOPK.RESERVE <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkreserve>`_.
+- """ # noqa
+- params = [key, k, width, depth, decay]
+- return self.execute_command(TOPK_RESERVE, *params)
+-
+- def add(self, key, *items):
+- """
+- Add one `item` or more to a Top-K Filter `key`.
+- For more information see `TOPK.ADD <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkadd>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(TOPK_ADD, *params)
+-
+- def incrby(self, key, items, increments):
+- """
+- Add/increase `items` to a Top-K Sketch `key` by ''increments''.
+- Both `items` and `increments` are lists.
+- For more information see `TOPK.INCRBY <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkincrby>`_.
+-
+- Example:
+-
+- >>> topkincrby('A', ['foo'], [1])
+- """ # noqa
+- params = [key]
+- self.appendItemsAndIncrements(params, items, increments)
+- return self.execute_command(TOPK_INCRBY, *params)
+-
+- def query(self, key, *items):
+- """
+- Check whether one `item` or more is a Top-K item at `key`.
+- For more information see `TOPK.QUERY <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkquery>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(TOPK_QUERY, *params)
+-
+- def count(self, key, *items):
+- """
+- Return count for one `item` or more from `key`.
+- For more information see `TOPK.COUNT <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkcount>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(TOPK_COUNT, *params)
+-
+- def list(self, key, withcount=False):
+- """
+- Return full list of items in Top-K list of `key`.
+- If `withcount` set to True, return full list of items
+- with probabilistic count in Top-K list of `key`.
+- For more information see `TOPK.LIST <https://oss.redis.com/redisbloom/master/TopK_Commands/#topklist>`_.
+- """ # noqa
+- params = [key]
+- if withcount:
+- params.append("WITHCOUNT")
+- return self.execute_command(TOPK_LIST, *params)
+-
+- def info(self, key):
+- """
+- Return k, width, depth and decay values of `key`.
+- For more information see `TOPK.INFO <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkinfo>`_.
+- """ # noqa
+- return self.execute_command(TOPK_INFO, key)
+-
+-
+-class TDigestCommands:
+- def create(self, key, compression):
+- """
+- Allocate the memory and initialize the t-digest.
+- For more information see `TDIGEST.CREATE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestcreate>`_.
+- """ # noqa
+- params = [key, compression]
+- return self.execute_command(TDIGEST_CREATE, *params)
+-
+- def reset(self, key):
+- """
+- Reset the sketch `key` to zero - empty out the sketch and re-initialize it.
+- For more information see `TDIGEST.RESET <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestreset>`_.
+- """ # noqa
+- return self.execute_command(TDIGEST_RESET, key)
+-
+- def add(self, key, values, weights):
+- """
+- Add one or more samples (value with weight) to a sketch `key`.
+- Both `values` and `weights` are lists.
+- For more information see `TDIGEST.ADD <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestadd>`_.
+-
+- Example:
+-
+- >>> tdigestadd('A', [1500.0], [1.0])
+- """ # noqa
+- params = [key]
+- self.appendValuesAndWeights(params, values, weights)
+- return self.execute_command(TDIGEST_ADD, *params)
+-
+- def merge(self, toKey, fromKey):
+- """
+- Merge all of the values from 'fromKey' to 'toKey' sketch.
+- For more information see `TDIGEST.MERGE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmerge>`_.
+- """ # noqa
+- params = [toKey, fromKey]
+- return self.execute_command(TDIGEST_MERGE, *params)
+-
+- def min(self, key):
+- """
+- Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty.
+- For more information see `TDIGEST.MIN <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmin>`_.
+- """ # noqa
+- return self.execute_command(TDIGEST_MIN, key)
+-
+- def max(self, key):
+- """
+- Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty.
+- For more information see `TDIGEST.MAX <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmax>`_.
+- """ # noqa
+- return self.execute_command(TDIGEST_MAX, key)
+-
+- def quantile(self, key, quantile):
+- """
+- Return double value estimate of the cutoff such that a specified fraction of the data
+- added to this TDigest would be less than or equal to the cutoff.
+- For more information see `TDIGEST.QUANTILE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestquantile>`_.
+- """ # noqa
+- params = [key, quantile]
+- return self.execute_command(TDIGEST_QUANTILE, *params)
+-
+- def cdf(self, key, value):
+- """
+- Return double fraction of all points added which are <= value.
+- For more information see `TDIGEST.CDF <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestcdf>`_.
+- """ # noqa
+- params = [key, value]
+- return self.execute_command(TDIGEST_CDF, *params)
+-
+- def info(self, key):
+- """
+- Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight
+- and Total Compressions.
+- For more information see `TDIGEST.INFO <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestinfo>`_.
+- """ # noqa
+- return self.execute_command(TDIGEST_INFO, key)
+-
+-
+-class CMSCommands:
+- """Count-Min Sketch Commands"""
+-
+- # region Count-Min Sketch Functions
+- def initbydim(self, key, width, depth):
+- """
+- Initialize a Count-Min Sketch `key` to dimensions (`width`, `depth`) specified by user.
+- For more information see `CMS.INITBYDIM <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinitbydim>`_.
+- """ # noqa
+- params = [key, width, depth]
+- return self.execute_command(CMS_INITBYDIM, *params)
+-
+- def initbyprob(self, key, error, probability):
+- """
+- Initialize a Count-Min Sketch `key` to characteristics (`error`, `probability`) specified by user.
+- For more information see `CMS.INITBYPROB <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinitbyprob>`_.
+- """ # noqa
+- params = [key, error, probability]
+- return self.execute_command(CMS_INITBYPROB, *params)
+-
+- def incrby(self, key, items, increments):
+- """
+- Add/increase `items` to a Count-Min Sketch `key` by ''increments''.
+- Both `items` and `increments` are lists.
+- For more information see `CMS.INCRBY <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsincrby>`_.
+-
+- Example:
+-
+- >>> cmsincrby('A', ['foo'], [1])
+- """ # noqa
+- params = [key]
+- self.appendItemsAndIncrements(params, items, increments)
+- return self.execute_command(CMS_INCRBY, *params)
+-
+- def query(self, key, *items):
+- """
+- Return count for an `item` from `key`. Multiple items can be queried with one call.
+- For more information see `CMS.QUERY <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsquery>`_.
+- """ # noqa
+- params = [key]
+- params += items
+- return self.execute_command(CMS_QUERY, *params)
+-
+- def merge(self, destKey, numKeys, srcKeys, weights=[]):
+- """
+- Merge `numKeys` of sketches into `destKey`. Sketches specified in `srcKeys`.
+- All sketches must have identical width and depth.
+- `Weights` can be used to multiply certain sketches. Default weight is 1.
+- Both `srcKeys` and `weights` are lists.
+- For more information see `CMS.MERGE <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsmerge>`_.
+- """ # noqa
+- params = [destKey, numKeys]
+- params += srcKeys
+- self.appendWeights(params, weights)
+- return self.execute_command(CMS_MERGE, *params)
+-
+- def info(self, key):
+- """
+- Return width, depth and total count of the sketch.
+- For more information see `CMS.INFO <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinfo>`_.
+- """ # noqa
+- return self.execute_command(CMS_INFO, key)
+diff --git a/redis/commands/bf/info.py b/redis/commands/bf/info.py
+deleted file mode 100644
+index 24c5419..0000000
+--- a/redis/commands/bf/info.py
++++ /dev/null
+@@ -1,85 +0,0 @@
+-from ..helpers import nativestr
+-
+-
+-class BFInfo(object):
+- capacity = None
+- size = None
+- filterNum = None
+- insertedNum = None
+- expansionRate = None
+-
+- def __init__(self, args):
+- response = dict(zip(map(nativestr, args[::2]), args[1::2]))
+- self.capacity = response["Capacity"]
+- self.size = response["Size"]
+- self.filterNum = response["Number of filters"]
+- self.insertedNum = response["Number of items inserted"]
+- self.expansionRate = response["Expansion rate"]
+-
+-
+-class CFInfo(object):
+- size = None
+- bucketNum = None
+- filterNum = None
+- insertedNum = None
+- deletedNum = None
+- bucketSize = None
+- expansionRate = None
+- maxIteration = None
+-
+- def __init__(self, args):
+- response = dict(zip(map(nativestr, args[::2]), args[1::2]))
+- self.size = response["Size"]
+- self.bucketNum = response["Number of buckets"]
+- self.filterNum = response["Number of filters"]
+- self.insertedNum = response["Number of items inserted"]
+- self.deletedNum = response["Number of items deleted"]
+- self.bucketSize = response["Bucket size"]
+- self.expansionRate = response["Expansion rate"]
+- self.maxIteration = response["Max iterations"]
+-
+-
+-class CMSInfo(object):
+- width = None
+- depth = None
+- count = None
+-
+- def __init__(self, args):
+- response = dict(zip(map(nativestr, args[::2]), args[1::2]))
+- self.width = response["width"]
+- self.depth = response["depth"]
+- self.count = response["count"]
+-
+-
+-class TopKInfo(object):
+- k = None
+- width = None
+- depth = None
+- decay = None
+-
+- def __init__(self, args):
+- response = dict(zip(map(nativestr, args[::2]), args[1::2]))
+- self.k = response["k"]
+- self.width = response["width"]
+- self.depth = response["depth"]
+- self.decay = response["decay"]
+-
+-
+-class TDigestInfo(object):
+- compression = None
+- capacity = None
+- mergedNodes = None
+- unmergedNodes = None
+- mergedWeight = None
+- unmergedWeight = None
+- totalCompressions = None
+-
+- def __init__(self, args):
+- response = dict(zip(map(nativestr, args[::2]), args[1::2]))
+- self.compression = response["Compression"]
+- self.capacity = response["Capacity"]
+- self.mergedNodes = response["Merged nodes"]
+- self.unmergedNodes = response["Unmerged nodes"]
+- self.mergedWeight = response["Merged weight"]
+- self.unmergedWeight = response["Unmerged weight"]
+- self.totalCompressions = response["Total compressions"]
+diff --git a/redis/commands/cluster.py b/redis/commands/cluster.py
+deleted file mode 100644
+index 5d0e804..0000000
+--- a/redis/commands/cluster.py
++++ /dev/null
+@@ -1,412 +0,0 @@
+-from redis.crc import key_slot
+-from redis.exceptions import RedisClusterException, RedisError
+-
+-from .core import ACLCommands, DataAccessCommands, ManagementCommands, PubSubCommands
+-from .helpers import list_or_args
+-
+-
+-class ClusterMultiKeyCommands:
+- """
+- A class containing commands that handle more than one key
+- """
+-
+- def _partition_keys_by_slot(self, keys):
+- """
+- Split keys into a dictionary that maps a slot to
+- a list of keys.
+- """
+- slots_to_keys = {}
+- for key in keys:
+- k = self.encoder.encode(key)
+- slot = key_slot(k)
+- slots_to_keys.setdefault(slot, []).append(key)
+-
+- return slots_to_keys
+-
+- def mget_nonatomic(self, keys, *args):
+- """
+- Splits the keys into different slots and then calls MGET
+- for the keys of every slot. This operation will not be atomic
+- if keys belong to more than one slot.
+-
+- Returns a list of values ordered identically to ``keys``
+- """
+-
+- from redis.client import EMPTY_RESPONSE
+-
+- options = {}
+- if not args:
+- options[EMPTY_RESPONSE] = []
+-
+- # Concatenate all keys into a list
+- keys = list_or_args(keys, args)
+- # Split keys into slots
+- slots_to_keys = self._partition_keys_by_slot(keys)
+-
+- # Call MGET for every slot and concatenate
+- # the results
+- # We must make sure that the keys are returned in order
+- all_results = {}
+- for slot_keys in slots_to_keys.values():
+- slot_values = self.execute_command("MGET", *slot_keys, **options)
+-
+- slot_results = dict(zip(slot_keys, slot_values))
+- all_results.update(slot_results)
+-
+- # Sort the results
+- vals_in_order = [all_results[key] for key in keys]
+- return vals_in_order
+-
+- def mset_nonatomic(self, mapping):
+- """
+- Sets key/values based on a mapping. Mapping is a dictionary of
+- key/value pairs. Both keys and values should be strings or types that
+- can be cast to a string via str().
+-
+- Splits the keys into different slots and then calls MSET
+- for the keys of every slot. This operation will not be atomic
+- if keys belong to more than one slot.
+- """
+-
+- # Partition the keys by slot
+- slots_to_pairs = {}
+- for pair in mapping.items():
+- # encode the key
+- k = self.encoder.encode(pair[0])
+- slot = key_slot(k)
+- slots_to_pairs.setdefault(slot, []).extend(pair)
+-
+- # Call MSET for every slot and concatenate
+- # the results (one result per slot)
+- res = []
+- for pairs in slots_to_pairs.values():
+- res.append(self.execute_command("MSET", *pairs))
+-
+- return res
+-
+- def _split_command_across_slots(self, command, *keys):
+- """
+- Runs the given command once for the keys
+- of each slot. Returns the sum of the return values.
+- """
+- # Partition the keys by slot
+- slots_to_keys = self._partition_keys_by_slot(keys)
+-
+- # Sum up the reply from each command
+- total = 0
+- for slot_keys in slots_to_keys.values():
+- total += self.execute_command(command, *slot_keys)
+-
+- return total
+-
+- def exists(self, *keys):
+- """
+- Returns the number of ``names`` that exist in the
+- whole cluster. The keys are first split up into slots
+- and then an EXISTS command is sent for every slot
+- """
+- return self._split_command_across_slots("EXISTS", *keys)
+-
+- def delete(self, *keys):
+- """
+- Deletes the given keys in the cluster.
+- The keys are first split up into slots
+- and then an DEL command is sent for every slot
+-
+- Non-existant keys are ignored.
+- Returns the number of keys that were deleted.
+- """
+- return self._split_command_across_slots("DEL", *keys)
+-
+- def touch(self, *keys):
+- """
+- Updates the last access time of given keys across the
+- cluster.
+-
+- The keys are first split up into slots
+- and then an TOUCH command is sent for every slot
+-
+- Non-existant keys are ignored.
+- Returns the number of keys that were touched.
+- """
+- return self._split_command_across_slots("TOUCH", *keys)
+-
+- def unlink(self, *keys):
+- """
+- Remove the specified keys in a different thread.
+-
+- The keys are first split up into slots
+- and then an TOUCH command is sent for every slot
+-
+- Non-existant keys are ignored.
+- Returns the number of keys that were unlinked.
+- """
+- return self._split_command_across_slots("UNLINK", *keys)
+-
+-
+-class ClusterManagementCommands(ManagementCommands):
+- """
+- A class for Redis Cluster management commands
+-
+- The class inherits from Redis's core ManagementCommands class and do the
+- required adjustments to work with cluster mode
+- """
+-
+- def slaveof(self, *args, **kwargs):
+- raise RedisClusterException("SLAVEOF is not supported in cluster mode")
+-
+- def replicaof(self, *args, **kwargs):
+- raise RedisClusterException("REPLICAOF is not supported in cluster" " mode")
+-
+- def swapdb(self, *args, **kwargs):
+- raise RedisClusterException("SWAPDB is not supported in cluster" " mode")
+-
+-
+-class ClusterDataAccessCommands(DataAccessCommands):
+- """
+- A class for Redis Cluster Data Access Commands
+-
+- The class inherits from Redis's core DataAccessCommand class and do the
+- required adjustments to work with cluster mode
+- """
+-
+- def stralgo(
+- self,
+- algo,
+- value1,
+- value2,
+- specific_argument="strings",
+- len=False,
+- idx=False,
+- minmatchlen=None,
+- withmatchlen=False,
+- **kwargs,
+- ):
+- target_nodes = kwargs.pop("target_nodes", None)
+- if specific_argument == "strings" and target_nodes is None:
+- target_nodes = "default-node"
+- kwargs.update({"target_nodes": target_nodes})
+- return super().stralgo(
+- algo,
+- value1,
+- value2,
+- specific_argument,
+- len,
+- idx,
+- minmatchlen,
+- withmatchlen,
+- **kwargs,
+- )
+-
+-
+-class RedisClusterCommands(
+- ClusterMultiKeyCommands,
+- ClusterManagementCommands,
+- ACLCommands,
+- PubSubCommands,
+- ClusterDataAccessCommands,
+-):
+- """
+- A class for all Redis Cluster commands
+-
+- For key-based commands, the target node(s) will be internally determined
+- by the keys' hash slot.
+- Non-key-based commands can be executed with the 'target_nodes' argument to
+- target specific nodes. By default, if target_nodes is not specified, the
+- command will be executed on the default cluster node.
+-
+-
+- :param :target_nodes: type can be one of the followings:
+- - nodes flag: ALL_NODES, PRIMARIES, REPLICAS, RANDOM
+- - 'ClusterNode'
+- - 'list(ClusterNodes)'
+- - 'dict(any:clusterNodes)'
+-
+- for example:
+- r.cluster_info(target_nodes=RedisCluster.ALL_NODES)
+- """
+-
+- def cluster_addslots(self, target_node, *slots):
+- """
+- Assign new hash slots to receiving node. Sends to specified node.
+-
+- :target_node: 'ClusterNode'
+- The node to execute the command on
+- """
+- return self.execute_command(
+- "CLUSTER ADDSLOTS", *slots, target_nodes=target_node
+- )
+-
+- def cluster_countkeysinslot(self, slot_id):
+- """
+- Return the number of local keys in the specified hash slot
+- Send to node based on specified slot_id
+- """
+- return self.execute_command("CLUSTER COUNTKEYSINSLOT", slot_id)
+-
+- def cluster_count_failure_report(self, node_id):
+- """
+- Return the number of failure reports active for a given node
+- Sends to a random node
+- """
+- return self.execute_command("CLUSTER COUNT-FAILURE-REPORTS", node_id)
+-
+- def cluster_delslots(self, *slots):
+- """
+- Set hash slots as unbound in the cluster.
+- It determines by it self what node the slot is in and sends it there
+-
+- Returns a list of the results for each processed slot.
+- """
+- return [self.execute_command("CLUSTER DELSLOTS", slot) for slot in slots]
+-
+- def cluster_failover(self, target_node, option=None):
+- """
+- Forces a slave to perform a manual failover of its master
+- Sends to specified node
+-
+- :target_node: 'ClusterNode'
+- The node to execute the command on
+- """
+- if option:
+- if option.upper() not in ["FORCE", "TAKEOVER"]:
+- raise RedisError(
+- f"Invalid option for CLUSTER FAILOVER command: {option}"
+- )
+- else:
+- return self.execute_command(
+- "CLUSTER FAILOVER", option, target_nodes=target_node
+- )
+- else:
+- return self.execute_command("CLUSTER FAILOVER", target_nodes=target_node)
+-
+- def cluster_info(self, target_nodes=None):
+- """
+- Provides info about Redis Cluster node state.
+- The command will be sent to a random node in the cluster if no target
+- node is specified.
+- """
+- return self.execute_command("CLUSTER INFO", target_nodes=target_nodes)
+-
+- def cluster_keyslot(self, key):
+- """
+- Returns the hash slot of the specified key
+- Sends to random node in the cluster
+- """
+- return self.execute_command("CLUSTER KEYSLOT", key)
+-
+- def cluster_meet(self, host, port, target_nodes=None):
+- """
+- Force a node cluster to handshake with another node.
+- Sends to specified node.
+- """
+- return self.execute_command(
+- "CLUSTER MEET", host, port, target_nodes=target_nodes
+- )
+-
+- def cluster_nodes(self):
+- """
+- Force a node cluster to handshake with another node
+-
+- Sends to random node in the cluster
+- """
+- return self.execute_command("CLUSTER NODES")
+-
+- def cluster_replicate(self, target_nodes, node_id):
+- """
+- Reconfigure a node as a slave of the specified master node
+- """
+- return self.execute_command(
+- "CLUSTER REPLICATE", node_id, target_nodes=target_nodes
+- )
+-
+- def cluster_reset(self, soft=True, target_nodes=None):
+- """
+- Reset a Redis Cluster node
+-
+- If 'soft' is True then it will send 'SOFT' argument
+- If 'soft' is False then it will send 'HARD' argument
+- """
+- return self.execute_command(
+- "CLUSTER RESET", b"SOFT" if soft else b"HARD", target_nodes=target_nodes
+- )
+-
+- def cluster_save_config(self, target_nodes=None):
+- """
+- Forces the node to save cluster state on disk
+- """
+- return self.execute_command("CLUSTER SAVECONFIG", target_nodes=target_nodes)
+-
+- def cluster_get_keys_in_slot(self, slot, num_keys):
+- """
+- Returns the number of keys in the specified cluster slot
+- """
+- return self.execute_command("CLUSTER GETKEYSINSLOT", slot, num_keys)
+-
+- def cluster_set_config_epoch(self, epoch, target_nodes=None):
+- """
+- Set the configuration epoch in a new node
+- """
+- return self.execute_command(
+- "CLUSTER SET-CONFIG-EPOCH", epoch, target_nodes=target_nodes
+- )
+-
+- def cluster_setslot(self, target_node, node_id, slot_id, state):
+- """
+- Bind an hash slot to a specific node
+-
+- :target_node: 'ClusterNode'
+- The node to execute the command on
+- """
+- if state.upper() in ("IMPORTING", "NODE", "MIGRATING"):
+- return self.execute_command(
+- "CLUSTER SETSLOT", slot_id, state, node_id, target_nodes=target_node
+- )
+- elif state.upper() == "STABLE":
+- raise RedisError('For "stable" state please use ' "cluster_setslot_stable")
+- else:
+- raise RedisError(f"Invalid slot state: {state}")
+-
+- def cluster_setslot_stable(self, slot_id):
+- """
+- Clears migrating / importing state from the slot.
+- It determines by it self what node the slot is in and sends it there.
+- """
+- return self.execute_command("CLUSTER SETSLOT", slot_id, "STABLE")
+-
+- def cluster_replicas(self, node_id, target_nodes=None):
+- """
+- Provides a list of replica nodes replicating from the specified primary
+- target node.
+- """
+- return self.execute_command(
+- "CLUSTER REPLICAS", node_id, target_nodes=target_nodes
+- )
+-
+- def cluster_slots(self, target_nodes=None):
+- """
+- Get array of Cluster slot to node mappings
+- """
+- return self.execute_command("CLUSTER SLOTS", target_nodes=target_nodes)
+-
+- def readonly(self, target_nodes=None):
+- """
+- Enables read queries.
+- The command will be sent to the default cluster node if target_nodes is
+- not specified.
+- """
+- if target_nodes == "replicas" or target_nodes == "all":
+- # read_from_replicas will only be enabled if the READONLY command
+- # is sent to all replicas
+- self.read_from_replicas = True
+- return self.execute_command("READONLY", target_nodes=target_nodes)
+-
+- def readwrite(self, target_nodes=None):
+- """
+- Disables read queries.
+- The command will be sent to the default cluster node if target_nodes is
+- not specified.
+- """
+- # Reset read from replicas flag
+- self.read_from_replicas = False
+- return self.execute_command("READWRITE", target_nodes=target_nodes)
+diff --git a/redis/commands/graph/__init__.py b/redis/commands/graph/__init__.py
+deleted file mode 100644
+index 7b9972a..0000000
+--- a/redis/commands/graph/__init__.py
++++ /dev/null
+@@ -1,162 +0,0 @@
+-from ..helpers import quote_string, random_string, stringify_param_value
+-from .commands import GraphCommands
+-from .edge import Edge # noqa
+-from .node import Node # noqa
+-from .path import Path # noqa
+-
+-
+-class Graph(GraphCommands):
+- """
+- Graph, collection of nodes and edges.
+- """
+-
+- def __init__(self, client, name=random_string()):
+- """
+- Create a new graph.
+- """
+- self.NAME = name # Graph key
+- self.client = client
+- self.execute_command = client.execute_command
+-
+- self.nodes = {}
+- self.edges = []
+- self._labels = [] # List of node labels.
+- self._properties = [] # List of properties.
+- self._relationshipTypes = [] # List of relation types.
+- self.version = 0 # Graph version
+-
+- @property
+- def name(self):
+- return self.NAME
+-
+- def _clear_schema(self):
+- self._labels = []
+- self._properties = []
+- self._relationshipTypes = []
+-
+- def _refresh_schema(self):
+- self._clear_schema()
+- self._refresh_labels()
+- self._refresh_relations()
+- self._refresh_attributes()
+-
+- def _refresh_labels(self):
+- lbls = self.labels()
+-
+- # Unpack data.
+- self._labels = [None] * len(lbls)
+- for i, l in enumerate(lbls):
+- self._labels[i] = l[0]
+-
+- def _refresh_relations(self):
+- rels = self.relationshipTypes()
+-
+- # Unpack data.
+- self._relationshipTypes = [None] * len(rels)
+- for i, r in enumerate(rels):
+- self._relationshipTypes[i] = r[0]
+-
+- def _refresh_attributes(self):
+- props = self.propertyKeys()
+-
+- # Unpack data.
+- self._properties = [None] * len(props)
+- for i, p in enumerate(props):
+- self._properties[i] = p[0]
+-
+- def get_label(self, idx):
+- """
+- Returns a label by it's index
+-
+- Args:
+-
+- idx:
+- The index of the label
+- """
+- try:
+- label = self._labels[idx]
+- except IndexError:
+- # Refresh labels.
+- self._refresh_labels()
+- label = self._labels[idx]
+- return label
+-
+- def get_relation(self, idx):
+- """
+- Returns a relationship type by it's index
+-
+- Args:
+-
+- idx:
+- The index of the relation
+- """
+- try:
+- relationship_type = self._relationshipTypes[idx]
+- except IndexError:
+- # Refresh relationship types.
+- self._refresh_relations()
+- relationship_type = self._relationshipTypes[idx]
+- return relationship_type
+-
+- def get_property(self, idx):
+- """
+- Returns a property by it's index
+-
+- Args:
+-
+- idx:
+- The index of the property
+- """
+- try:
+- propertie = self._properties[idx]
+- except IndexError:
+- # Refresh properties.
+- self._refresh_attributes()
+- propertie = self._properties[idx]
+- return propertie
+-
+- def add_node(self, node):
+- """
+- Adds a node to the graph.
+- """
+- if node.alias is None:
+- node.alias = random_string()
+- self.nodes[node.alias] = node
+-
+- def add_edge(self, edge):
+- """
+- Adds an edge to the graph.
+- """
+- if not (self.nodes[edge.src_node.alias] and self.nodes[edge.dest_node.alias]):
+- raise AssertionError("Both edge's end must be in the graph")
+-
+- self.edges.append(edge)
+-
+- def _build_params_header(self, params):
+- if not isinstance(params, dict):
+- raise TypeError("'params' must be a dict")
+- # Header starts with "CYPHER"
+- params_header = "CYPHER "
+- for key, value in params.items():
+- params_header += str(key) + "=" + stringify_param_value(value) + " "
+- return params_header
+-
+- # Procedures.
+- def call_procedure(self, procedure, *args, read_only=False, **kwagrs):
+- args = [quote_string(arg) for arg in args]
+- q = f"CALL {procedure}({','.join(args)})"
+-
+- y = kwagrs.get("y", None)
+- if y:
+- q += f" YIELD {','.join(y)}"
+-
+- return self.query(q, read_only=read_only)
+-
+- def labels(self):
+- return self.call_procedure("db.labels", read_only=True).result_set
+-
+- def relationshipTypes(self):
+- return self.call_procedure("db.relationshipTypes", read_only=True).result_set
+-
+- def propertyKeys(self):
+- return self.call_procedure("db.propertyKeys", read_only=True).result_set
+diff --git a/redis/commands/graph/commands.py b/redis/commands/graph/commands.py
+deleted file mode 100644
+index e097936..0000000
+--- a/redis/commands/graph/commands.py
++++ /dev/null
+@@ -1,202 +0,0 @@
+-from redis import DataError
+-from redis.exceptions import ResponseError
+-
+-from .exceptions import VersionMismatchException
+-from .query_result import QueryResult
+-
+-
+-class GraphCommands:
+- """RedisGraph Commands"""
+-
+- def commit(self):
+- """
+- Create entire graph.
+- For more information see `CREATE <https://oss.redis.com/redisgraph/master/commands/#create>`_. # noqa
+- """
+- if len(self.nodes) == 0 and len(self.edges) == 0:
+- return None
+-
+- query = "CREATE "
+- for _, node in self.nodes.items():
+- query += str(node) + ","
+-
+- query += ",".join([str(edge) for edge in self.edges])
+-
+- # Discard leading comma.
+- if query[-1] == ",":
+- query = query[:-1]
+-
+- return self.query(query)
+-
+- def query(self, q, params=None, timeout=None, read_only=False, profile=False):
+- """
+- Executes a query against the graph.
+- For more information see `GRAPH.QUERY <https://oss.redis.com/redisgraph/master/commands/#graphquery>`_. # noqa
+-
+- Args:
+-
+- -------
+- q :
+- The query.
+- params : dict
+- Query parameters.
+- timeout : int
+- Maximum runtime for read queries in milliseconds.
+- read_only : bool
+- Executes a readonly query if set to True.
+- profile : bool
+- Return details on results produced by and time
+- spent in each operation.
+- """
+-
+- # maintain original 'q'
+- query = q
+-
+- # handle query parameters
+- if params is not None:
+- query = self._build_params_header(params) + query
+-
+- # construct query command
+- # ask for compact result-set format
+- # specify known graph version
+- if profile:
+- cmd = "GRAPH.PROFILE"
+- else:
+- cmd = "GRAPH.RO_QUERY" if read_only else "GRAPH.QUERY"
+- command = [cmd, self.name, query, "--compact"]
+-
+- # include timeout is specified
+- if timeout:
+- if not isinstance(timeout, int):
+- raise Exception("Timeout argument must be a positive integer")
+- command += ["timeout", timeout]
+-
+- # issue query
+- try:
+- response = self.execute_command(*command)
+- return QueryResult(self, response, profile)
+- except ResponseError as e:
+- if "wrong number of arguments" in str(e):
+- print(
+- "Note: RedisGraph Python requires server version 2.2.8 or above"
+- ) # noqa
+- if "unknown command" in str(e) and read_only:
+- # `GRAPH.RO_QUERY` is unavailable in older versions.
+- return self.query(q, params, timeout, read_only=False)
+- raise e
+- except VersionMismatchException as e:
+- # client view over the graph schema is out of sync
+- # set client version and refresh local schema
+- self.version = e.version
+- self._refresh_schema()
+- # re-issue query
+- return self.query(q, params, timeout, read_only)
+-
+- def merge(self, pattern):
+- """
+- Merge pattern.
+- For more information see `MERGE <https://oss.redis.com/redisgraph/master/commands/#merge>`_. # noqa
+- """
+- query = "MERGE "
+- query += str(pattern)
+-
+- return self.query(query)
+-
+- def delete(self):
+- """
+- Deletes graph.
+- For more information see `DELETE <https://oss.redis.com/redisgraph/master/commands/#delete>`_. # noqa
+- """
+- self._clear_schema()
+- return self.execute_command("GRAPH.DELETE", self.name)
+-
+- # declared here, to override the built in redis.db.flush()
+- def flush(self):
+- """
+- Commit the graph and reset the edges and the nodes to zero length.
+- """
+- self.commit()
+- self.nodes = {}
+- self.edges = []
+-
+- def explain(self, query, params=None):
+- """
+- Get the execution plan for given query,
+- Returns an array of operations.
+- For more information see `GRAPH.EXPLAIN <https://oss.redis.com/redisgraph/master/commands/#graphexplain>`_. # noqa
+-
+- Args:
+-
+- -------
+- query:
+- The query that will be executed.
+- params: dict
+- Query parameters.
+- """
+- if params is not None:
+- query = self._build_params_header(params) + query
+-
+- plan = self.execute_command("GRAPH.EXPLAIN", self.name, query)
+- return "\n".join(plan)
+-
+- def bulk(self, **kwargs):
+- """Internal only. Not supported."""
+- raise NotImplementedError(
+- "GRAPH.BULK is internal only. "
+- "Use https://github.com/redisgraph/redisgraph-bulk-loader."
+- )
+-
+- def profile(self, query):
+- """
+- Execute a query and produce an execution plan augmented with metrics
+- for each operation's execution. Return a string representation of a
+- query execution plan, with details on results produced by and time
+- spent in each operation.
+- For more information see `GRAPH.PROFILE <https://oss.redis.com/redisgraph/master/commands/#graphprofile>`_. # noqa
+- """
+- return self.query(query, profile=True)
+-
+- def slowlog(self):
+- """
+- Get a list containing up to 10 of the slowest queries issued
+- against the given graph ID.
+- For more information see `GRAPH.SLOWLOG <https://oss.redis.com/redisgraph/master/commands/#graphslowlog>`_. # noqa
+-
+- Each item in the list has the following structure:
+- 1. A unix timestamp at which the log entry was processed.
+- 2. The issued command.
+- 3. The issued query.
+- 4. The amount of time needed for its execution, in milliseconds.
+- """
+- return self.execute_command("GRAPH.SLOWLOG", self.name)
+-
+- def config(self, name, value=None, set=False):
+- """
+- Retrieve or update a RedisGraph configuration.
+- For more information see `GRAPH.CONFIG <https://oss.redis.com/redisgraph/master/commands/#graphconfig>`_. # noqa
+-
+- Args:
+-
+- name : str
+- The name of the configuration
+- value :
+- The value we want to ser (can be used only when `set` is on)
+- set : bool
+- Turn on to set a configuration. Default behavior is get.
+- """
+- params = ["SET" if set else "GET", name]
+- if value is not None:
+- if set:
+- params.append(value)
+- else:
+- raise DataError(
+- "``value`` can be provided only when ``set`` is True"
+- ) # noqa
+- return self.execute_command("GRAPH.CONFIG", *params)
+-
+- def list_keys(self):
+- """
+- Lists all graph keys in the keyspace.
+- For more information see `GRAPH.LIST <https://oss.redis.com/redisgraph/master/commands/#graphlist>`_. # noqa
+- """
+- return self.execute_command("GRAPH.LIST")
+diff --git a/redis/commands/graph/edge.py b/redis/commands/graph/edge.py
+deleted file mode 100644
+index b334293..0000000
+--- a/redis/commands/graph/edge.py
++++ /dev/null
+@@ -1,87 +0,0 @@
+-from ..helpers import quote_string
+-from .node import Node
+-
+-
+-class Edge:
+- """
+- An edge connecting two nodes.
+- """
+-
+- def __init__(self, src_node, relation, dest_node, edge_id=None, properties=None):
+- """
+- Create a new edge.
+- """
+- if src_node is None or dest_node is None:
+- # NOTE(bors-42): It makes sense to change AssertionError to
+- # ValueError here
+- raise AssertionError("Both src_node & dest_node must be provided")
+-
+- self.id = edge_id
+- self.relation = relation or ""
+- self.properties = properties or {}
+- self.src_node = src_node
+- self.dest_node = dest_node
+-
+- def toString(self):
+- res = ""
+- if self.properties:
+- props = ",".join(
+- key + ":" + str(quote_string(val))
+- for key, val in sorted(self.properties.items())
+- )
+- res += "{" + props + "}"
+-
+- return res
+-
+- def __str__(self):
+- # Source node.
+- if isinstance(self.src_node, Node):
+- res = str(self.src_node)
+- else:
+- res = "()"
+-
+- # Edge
+- res += "-["
+- if self.relation:
+- res += ":" + self.relation
+- if self.properties:
+- props = ",".join(
+- key + ":" + str(quote_string(val))
+- for key, val in sorted(self.properties.items())
+- )
+- res += "{" + props + "}"
+- res += "]->"
+-
+- # Dest node.
+- if isinstance(self.dest_node, Node):
+- res += str(self.dest_node)
+- else:
+- res += "()"
+-
+- return res
+-
+- def __eq__(self, rhs):
+- # Quick positive check, if both IDs are set.
+- if self.id is not None and rhs.id is not None and self.id == rhs.id:
+- return True
+-
+- # Source and destination nodes should match.
+- if self.src_node != rhs.src_node:
+- return False
+-
+- if self.dest_node != rhs.dest_node:
+- return False
+-
+- # Relation should match.
+- if self.relation != rhs.relation:
+- return False
+-
+- # Quick check for number of properties.
+- if len(self.properties) != len(rhs.properties):
+- return False
+-
+- # Compare properties.
+- if self.properties != rhs.properties:
+- return False
+-
+- return True
+diff --git a/redis/commands/graph/exceptions.py b/redis/commands/graph/exceptions.py
+deleted file mode 100644
+index 4bbac10..0000000
+--- a/redis/commands/graph/exceptions.py
++++ /dev/null
+@@ -1,3 +0,0 @@
+-class VersionMismatchException(Exception):
+- def __init__(self, version):
+- self.version = version
+diff --git a/redis/commands/graph/node.py b/redis/commands/graph/node.py
+deleted file mode 100644
+index 47e4eeb..0000000
+--- a/redis/commands/graph/node.py
++++ /dev/null
+@@ -1,84 +0,0 @@
+-from ..helpers import quote_string
+-
+-
+-class Node:
+- """
+- A node within the graph.
+- """
+-
+- def __init__(self, node_id=None, alias=None, label=None, properties=None):
+- """
+- Create a new node.
+- """
+- self.id = node_id
+- self.alias = alias
+- if isinstance(label, list):
+- label = [inner_label for inner_label in label if inner_label != ""]
+-
+- if (
+- label is None
+- or label == ""
+- or (isinstance(label, list) and len(label) == 0)
+- ):
+- self.label = None
+- self.labels = None
+- elif isinstance(label, str):
+- self.label = label
+- self.labels = [label]
+- elif isinstance(label, list) and all(
+- [isinstance(inner_label, str) for inner_label in label]
+- ):
+- self.label = label[0]
+- self.labels = label
+- else:
+- raise AssertionError(
+- "label should be either None, " "string or a list of strings"
+- )
+-
+- self.properties = properties or {}
+-
+- def toString(self):
+- res = ""
+- if self.properties:
+- props = ",".join(
+- key + ":" + str(quote_string(val))
+- for key, val in sorted(self.properties.items())
+- )
+- res += "{" + props + "}"
+-
+- return res
+-
+- def __str__(self):
+- res = "("
+- if self.alias:
+- res += self.alias
+- if self.labels:
+- res += ":" + ":".join(self.labels)
+- if self.properties:
+- props = ",".join(
+- key + ":" + str(quote_string(val))
+- for key, val in sorted(self.properties.items())
+- )
+- res += "{" + props + "}"
+- res += ")"
+-
+- return res
+-
+- def __eq__(self, rhs):
+- # Quick positive check, if both IDs are set.
+- if self.id is not None and rhs.id is not None and self.id != rhs.id:
+- return False
+-
+- # Label should match.
+- if self.label != rhs.label:
+- return False
+-
+- # Quick check for number of properties.
+- if len(self.properties) != len(rhs.properties):
+- return False
+-
+- # Compare properties.
+- if self.properties != rhs.properties:
+- return False
+-
+- return True
+diff --git a/redis/commands/graph/path.py b/redis/commands/graph/path.py
+deleted file mode 100644
+index 6f2214a..0000000
+--- a/redis/commands/graph/path.py
++++ /dev/null
+@@ -1,74 +0,0 @@
+-from .edge import Edge
+-from .node import Node
+-
+-
+-class Path:
+- def __init__(self, nodes, edges):
+- if not (isinstance(nodes, list) and isinstance(edges, list)):
+- raise TypeError("nodes and edges must be list")
+-
+- self._nodes = nodes
+- self._edges = edges
+- self.append_type = Node
+-
+- @classmethod
+- def new_empty_path(cls):
+- return cls([], [])
+-
+- def nodes(self):
+- return self._nodes
+-
+- def edges(self):
+- return self._edges
+-
+- def get_node(self, index):
+- return self._nodes[index]
+-
+- def get_relationship(self, index):
+- return self._edges[index]
+-
+- def first_node(self):
+- return self._nodes[0]
+-
+- def last_node(self):
+- return self._nodes[-1]
+-
+- def edge_count(self):
+- return len(self._edges)
+-
+- def nodes_count(self):
+- return len(self._nodes)
+-
+- def add_node(self, node):
+- if not isinstance(node, self.append_type):
+- raise AssertionError("Add Edge before adding Node")
+- self._nodes.append(node)
+- self.append_type = Edge
+- return self
+-
+- def add_edge(self, edge):
+- if not isinstance(edge, self.append_type):
+- raise AssertionError("Add Node before adding Edge")
+- self._edges.append(edge)
+- self.append_type = Node
+- return self
+-
+- def __eq__(self, other):
+- return self.nodes() == other.nodes() and self.edges() == other.edges()
+-
+- def __str__(self):
+- res = "<"
+- edge_count = self.edge_count()
+- for i in range(0, edge_count):
+- node_id = self.get_node(i).id
+- res += "(" + str(node_id) + ")"
+- edge = self.get_relationship(i)
+- res += (
+- "-[" + str(int(edge.id)) + "]->"
+- if edge.src_node == node_id
+- else "<-[" + str(int(edge.id)) + "]-"
+- )
+- node_id = self.get_node(edge_count).id
+- res += "(" + str(node_id) + ")"
+- res += ">"
+- return res
+diff --git a/redis/commands/graph/query_result.py b/redis/commands/graph/query_result.py
+deleted file mode 100644
+index e9d9f4d..0000000
+--- a/redis/commands/graph/query_result.py
++++ /dev/null
+@@ -1,362 +0,0 @@
+-from collections import OrderedDict
+-
+-# from prettytable import PrettyTable
+-from redis import ResponseError
+-
+-from .edge import Edge
+-from .exceptions import VersionMismatchException
+-from .node import Node
+-from .path import Path
+-
+-LABELS_ADDED = "Labels added"
+-NODES_CREATED = "Nodes created"
+-NODES_DELETED = "Nodes deleted"
+-RELATIONSHIPS_DELETED = "Relationships deleted"
+-PROPERTIES_SET = "Properties set"
+-RELATIONSHIPS_CREATED = "Relationships created"
+-INDICES_CREATED = "Indices created"
+-INDICES_DELETED = "Indices deleted"
+-CACHED_EXECUTION = "Cached execution"
+-INTERNAL_EXECUTION_TIME = "internal execution time"
+-
+-STATS = [
+- LABELS_ADDED,
+- NODES_CREATED,
+- PROPERTIES_SET,
+- RELATIONSHIPS_CREATED,
+- NODES_DELETED,
+- RELATIONSHIPS_DELETED,
+- INDICES_CREATED,
+- INDICES_DELETED,
+- CACHED_EXECUTION,
+- INTERNAL_EXECUTION_TIME,
+-]
+-
+-
+-class ResultSetColumnTypes:
+- COLUMN_UNKNOWN = 0
+- COLUMN_SCALAR = 1
+- COLUMN_NODE = 2 # Unused as of RedisGraph v2.1.0, retained for backwards compatibility. # noqa
+- COLUMN_RELATION = 3 # Unused as of RedisGraph v2.1.0, retained for backwards compatibility. # noqa
+-
+-
+-class ResultSetScalarTypes:
+- VALUE_UNKNOWN = 0
+- VALUE_NULL = 1
+- VALUE_STRING = 2
+- VALUE_INTEGER = 3
+- VALUE_BOOLEAN = 4
+- VALUE_DOUBLE = 5
+- VALUE_ARRAY = 6
+- VALUE_EDGE = 7
+- VALUE_NODE = 8
+- VALUE_PATH = 9
+- VALUE_MAP = 10
+- VALUE_POINT = 11
+-
+-
+-class QueryResult:
+- def __init__(self, graph, response, profile=False):
+- """
+- A class that represents a result of the query operation.
+-
+- Args:
+-
+- graph:
+- The graph on which the query was executed.
+- response:
+- The response from the server.
+- profile:
+- A boolean indicating if the query command was "GRAPH.PROFILE"
+- """
+- self.graph = graph
+- self.header = []
+- self.result_set = []
+-
+- # in case of an error an exception will be raised
+- self._check_for_errors(response)
+-
+- if len(response) == 1:
+- self.parse_statistics(response[0])
+- elif profile:
+- self.parse_profile(response)
+- else:
+- # start by parsing statistics, matches the one we have
+- self.parse_statistics(response[-1]) # Last element.
+- self.parse_results(response)
+-
+- def _check_for_errors(self, response):
+- if isinstance(response[0], ResponseError):
+- error = response[0]
+- if str(error) == "version mismatch":
+- version = response[1]
+- error = VersionMismatchException(version)
+- raise error
+-
+- # If we encountered a run-time error, the last response
+- # element will be an exception
+- if isinstance(response[-1], ResponseError):
+- raise response[-1]
+-
+- def parse_results(self, raw_result_set):
+- self.header = self.parse_header(raw_result_set)
+-
+- # Empty header.
+- if len(self.header) == 0:
+- return
+-
+- self.result_set = self.parse_records(raw_result_set)
+-
+- def parse_statistics(self, raw_statistics):
+- self.statistics = {}
+-
+- # decode statistics
+- for idx, stat in enumerate(raw_statistics):
+- if isinstance(stat, bytes):
+- raw_statistics[idx] = stat.decode()
+-
+- for s in STATS:
+- v = self._get_value(s, raw_statistics)
+- if v is not None:
+- self.statistics[s] = v
+-
+- def parse_header(self, raw_result_set):
+- # An array of column name/column type pairs.
+- header = raw_result_set[0]
+- return header
+-
+- def parse_records(self, raw_result_set):
+- records = []
+- result_set = raw_result_set[1]
+- for row in result_set:
+- record = []
+- for idx, cell in enumerate(row):
+- if self.header[idx][0] == ResultSetColumnTypes.COLUMN_SCALAR: # noqa
+- record.append(self.parse_scalar(cell))
+- elif self.header[idx][0] == ResultSetColumnTypes.COLUMN_NODE: # noqa
+- record.append(self.parse_node(cell))
+- elif (
+- self.header[idx][0] == ResultSetColumnTypes.COLUMN_RELATION
+- ): # noqa
+- record.append(self.parse_edge(cell))
+- else:
+- print("Unknown column type.\n")
+- records.append(record)
+-
+- return records
+-
+- def parse_entity_properties(self, props):
+- # [[name, value type, value] X N]
+- properties = {}
+- for prop in props:
+- prop_name = self.graph.get_property(prop[0])
+- prop_value = self.parse_scalar(prop[1:])
+- properties[prop_name] = prop_value
+-
+- return properties
+-
+- def parse_string(self, cell):
+- if isinstance(cell, bytes):
+- return cell.decode()
+- elif not isinstance(cell, str):
+- return str(cell)
+- else:
+- return cell
+-
+- def parse_node(self, cell):
+- # Node ID (integer),
+- # [label string offset (integer)],
+- # [[name, value type, value] X N]
+-
+- node_id = int(cell[0])
+- labels = None
+- if len(cell[1]) > 0:
+- labels = []
+- for inner_label in cell[1]:
+- labels.append(self.graph.get_label(inner_label))
+- properties = self.parse_entity_properties(cell[2])
+- return Node(node_id=node_id, label=labels, properties=properties)
+-
+- def parse_edge(self, cell):
+- # Edge ID (integer),
+- # reltype string offset (integer),
+- # src node ID offset (integer),
+- # dest node ID offset (integer),
+- # [[name, value, value type] X N]
+-
+- edge_id = int(cell[0])
+- relation = self.graph.get_relation(cell[1])
+- src_node_id = int(cell[2])
+- dest_node_id = int(cell[3])
+- properties = self.parse_entity_properties(cell[4])
+- return Edge(
+- src_node_id, relation, dest_node_id, edge_id=edge_id, properties=properties
+- )
+-
+- def parse_path(self, cell):
+- nodes = self.parse_scalar(cell[0])
+- edges = self.parse_scalar(cell[1])
+- return Path(nodes, edges)
+-
+- def parse_map(self, cell):
+- m = OrderedDict()
+- n_entries = len(cell)
+-
+- # A map is an array of key value pairs.
+- # 1. key (string)
+- # 2. array: (value type, value)
+- for i in range(0, n_entries, 2):
+- key = self.parse_string(cell[i])
+- m[key] = self.parse_scalar(cell[i + 1])
+-
+- return m
+-
+- def parse_point(self, cell):
+- p = {}
+- # A point is received an array of the form: [latitude, longitude]
+- # It is returned as a map of the form: {"latitude": latitude, "longitude": longitude} # noqa
+- p["latitude"] = float(cell[0])
+- p["longitude"] = float(cell[1])
+- return p
+-
+- def parse_scalar(self, cell):
+- scalar_type = int(cell[0])
+- value = cell[1]
+- scalar = None
+-
+- if scalar_type == ResultSetScalarTypes.VALUE_NULL:
+- scalar = None
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_STRING:
+- scalar = self.parse_string(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_INTEGER:
+- scalar = int(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_BOOLEAN:
+- value = value.decode() if isinstance(value, bytes) else value
+- if value == "true":
+- scalar = True
+- elif value == "false":
+- scalar = False
+- else:
+- print("Unknown boolean type\n")
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_DOUBLE:
+- scalar = float(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_ARRAY:
+- # array variable is introduced only for readability
+- scalar = array = value
+- for i in range(len(array)):
+- scalar[i] = self.parse_scalar(array[i])
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_NODE:
+- scalar = self.parse_node(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_EDGE:
+- scalar = self.parse_edge(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_PATH:
+- scalar = self.parse_path(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_MAP:
+- scalar = self.parse_map(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_POINT:
+- scalar = self.parse_point(value)
+-
+- elif scalar_type == ResultSetScalarTypes.VALUE_UNKNOWN:
+- print("Unknown scalar type\n")
+-
+- return scalar
+-
+- def parse_profile(self, response):
+- self.result_set = [x[0 : x.index(",")].strip() for x in response]
+-
+- # """Prints the data from the query response:
+- # 1. First row result_set contains the columns names.
+- # Thus the first row in PrettyTable will contain the
+- # columns.
+- # 2. The row after that will contain the data returned,
+- # or 'No Data returned' if there is none.
+- # 3. Prints the statistics of the query.
+- # """
+-
+- # def pretty_print(self):
+- # if not self.is_empty():
+- # header = [col[1] for col in self.header]
+- # tbl = PrettyTable(header)
+-
+- # for row in self.result_set:
+- # record = []
+- # for idx, cell in enumerate(row):
+- # if type(cell) is Node:
+- # record.append(cell.toString())
+- # elif type(cell) is Edge:
+- # record.append(cell.toString())
+- # else:
+- # record.append(cell)
+- # tbl.add_row(record)
+-
+- # if len(self.result_set) == 0:
+- # tbl.add_row(['No data returned.'])
+-
+- # print(str(tbl) + '\n')
+-
+- # for stat in self.statistics:
+- # print("%s %s" % (stat, self.statistics[stat]))
+-
+- def is_empty(self):
+- return len(self.result_set) == 0
+-
+- @staticmethod
+- def _get_value(prop, statistics):
+- for stat in statistics:
+- if prop in stat:
+- return float(stat.split(": ")[1].split(" ")[0])
+-
+- return None
+-
+- def _get_stat(self, stat):
+- return self.statistics[stat] if stat in self.statistics else 0
+-
+- @property
+- def labels_added(self):
+- return self._get_stat(LABELS_ADDED)
+-
+- @property
+- def nodes_created(self):
+- return self._get_stat(NODES_CREATED)
+-
+- @property
+- def nodes_deleted(self):
+- return self._get_stat(NODES_DELETED)
+-
+- @property
+- def properties_set(self):
+- return self._get_stat(PROPERTIES_SET)
+-
+- @property
+- def relationships_created(self):
+- return self._get_stat(RELATIONSHIPS_CREATED)
+-
+- @property
+- def relationships_deleted(self):
+- return self._get_stat(RELATIONSHIPS_DELETED)
+-
+- @property
+- def indices_created(self):
+- return self._get_stat(INDICES_CREATED)
+-
+- @property
+- def indices_deleted(self):
+- return self._get_stat(INDICES_DELETED)
+-
+- @property
+- def cached_execution(self):
+- return self._get_stat(CACHED_EXECUTION) == 1
+-
+- @property
+- def run_time_ms(self):
+- return self._get_stat(INTERNAL_EXECUTION_TIME)
diff --git a/redis/commands/json/__init__.py b/redis/commands/json/__init__.py
deleted file mode 100644
-index d634dbd..0000000
+index 12c0648..0000000
--- a/redis/commands/json/__init__.py
+++ /dev/null
-@@ -1,120 +0,0 @@
--from json import JSONDecoder, JSONEncoder, JSONDecodeError
+@@ -1,118 +0,0 @@
+-from json import JSONDecodeError, JSONDecoder, JSONEncoder
+-
+-import redis
-
--from .decoders import (
-- decode_list,
-- bulk_of_jsons,
--)
-from ..helpers import nativestr
-from .commands import JSONCommands
--import redis
+-from .decoders import bulk_of_jsons, decode_list
-
-
-class JSON(JSONCommands):
@@ -224,15 +4717,20 @@ index d634dbd..0000000
- """Pipeline for the module."""
diff --git a/redis/commands/json/commands.py b/redis/commands/json/commands.py
deleted file mode 100644
-index 4436f6a..0000000
+index a132b8e..0000000
--- a/redis/commands/json/commands.py
+++ /dev/null
-@@ -1,232 +0,0 @@
--from .path import Path
--from .decoders import decode_dict_keys
+@@ -1,329 +0,0 @@
+-import os
+-from json import JSONDecodeError, loads
+-
-from deprecated import deprecated
+-
-from redis.exceptions import DataError
-
+-from .decoders import decode_dict_keys
+-from .path import Path
+-
-
-class JSONCommands:
- """json commands."""
@@ -240,7 +4738,9 @@ index 4436f6a..0000000
- def arrappend(self, name, path=Path.rootPath(), *args):
- """Append the objects ``args`` to the array under the
- ``path` in key ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrappend
+- """ # noqa
- pieces = [name, str(path)]
- for o in args:
- pieces.append(self._encode(o))
@@ -253,16 +4753,19 @@ index 4436f6a..0000000
-
- The search can be limited using the optional inclusive ``start``
- and exclusive ``stop`` indices.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrindex
+- """ # noqa
- return self.execute_command(
-- "JSON.ARRINDEX", name, str(path), self._encode(scalar),
-- start, stop
+- "JSON.ARRINDEX", name, str(path), self._encode(scalar), start, stop
- )
-
- def arrinsert(self, name, path, index, *args):
- """Insert the objects ``args`` to the array at index ``index``
- under the ``path` in key ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrinsert
+- """ # noqa
- pieces = [name, str(path), index]
- for o in args:
- pieces.append(self._encode(o))
@@ -271,54 +4774,74 @@ index 4436f6a..0000000
- def arrlen(self, name, path=Path.rootPath()):
- """Return the length of the array JSON value under ``path``
- at key``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrlen
+- """ # noqa
- return self.execute_command("JSON.ARRLEN", name, str(path))
-
- def arrpop(self, name, path=Path.rootPath(), index=-1):
- """Pop the element at ``index`` in the array JSON value under
- ``path`` at key ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrpop
+- """ # noqa
- return self.execute_command("JSON.ARRPOP", name, str(path), index)
-
- def arrtrim(self, name, path, start, stop):
- """Trim the array JSON value under ``path`` at key ``name`` to the
- inclusive range given by ``start`` and ``stop``.
-- """
-- return self.execute_command("JSON.ARRTRIM", name, str(path),
-- start, stop)
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonarrtrim
+- """ # noqa
+- return self.execute_command("JSON.ARRTRIM", name, str(path), start, stop)
-
- def type(self, name, path=Path.rootPath()):
-- """Get the type of the JSON value under ``path`` from key ``name``."""
+- """Get the type of the JSON value under ``path`` from key ``name``.
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsontype
+- """ # noqa
- return self.execute_command("JSON.TYPE", name, str(path))
-
- def resp(self, name, path=Path.rootPath()):
-- """Return the JSON value under ``path`` at key ``name``."""
+- """Return the JSON value under ``path`` at key ``name``.
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonresp
+- """ # noqa
- return self.execute_command("JSON.RESP", name, str(path))
-
- def objkeys(self, name, path=Path.rootPath()):
- """Return the key names in the dictionary JSON value under ``path`` at
-- key ``name``."""
+- key ``name``.
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonobjkeys
+- """ # noqa
- return self.execute_command("JSON.OBJKEYS", name, str(path))
-
- def objlen(self, name, path=Path.rootPath()):
- """Return the length of the dictionary JSON value under ``path`` at key
- ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonobjlen
+- """ # noqa
- return self.execute_command("JSON.OBJLEN", name, str(path))
-
- def numincrby(self, name, path, number):
- """Increment the numeric (integer or floating point) JSON value under
- ``path`` at key ``name`` by the provided ``number``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonnumincrby
+- """ # noqa
- return self.execute_command(
- "JSON.NUMINCRBY", name, str(path), self._encode(number)
- )
-
-- @deprecated(version='4.0.0', reason='deprecated since redisjson 1.0.0')
+- @deprecated(version="4.0.0", reason="deprecated since redisjson 1.0.0")
- def nummultby(self, name, path, number):
- """Multiply the numeric (integer or floating point) JSON value under
- ``path`` at key ``name`` with the provided ``number``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonnummultby
+- """ # noqa
- return self.execute_command(
- "JSON.NUMMULTBY", name, str(path), self._encode(number)
- )
@@ -330,11 +4853,16 @@ index 4436f6a..0000000
-
- Return the count of cleared paths (ignoring non-array and non-objects
- paths).
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonclear
+- """ # noqa
- return self.execute_command("JSON.CLEAR", name, str(path))
-
- def delete(self, key, path=Path.rootPath()):
-- """Delete the JSON value stored at key ``key`` under ``path``."""
+- """Delete the JSON value stored at key ``key`` under ``path``.
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsondel
+- """
- return self.execute_command("JSON.DEL", key, str(path))
-
- # forget is an alias for delete
@@ -347,7 +4875,9 @@ index 4436f6a..0000000
- ``args`` is zero or more paths, and defaults to root path
- ```no_escape`` is a boolean flag to add no_escape option to get
- non-ascii characters
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonget
+- """ # noqa
- pieces = [name]
- if no_escape:
- pieces.append("noescape")
@@ -370,7 +4900,9 @@ index 4436f6a..0000000
- """
- Get the objects stored as a JSON values under ``path``. ``keys``
- is a list of one or more keys.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonmget
+- """ # noqa
- pieces = []
- pieces += keys
- pieces.append(str(path))
@@ -387,6 +4919,8 @@ index 4436f6a..0000000
-
- For the purpose of using this within a pipeline, this command is also
- aliased to jsonset.
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonset
- """
- if decode_keys:
- obj = decode_dict_keys(obj)
@@ -405,10 +4939,60 @@ index 4436f6a..0000000
- pieces.append("XX")
- return self.execute_command("JSON.SET", *pieces)
-
+- def set_file(self, name, path, file_name, nx=False, xx=False, decode_keys=False):
+- """
+- Set the JSON value at key ``name`` under the ``path`` to the content
+- of the json file ``file_name``.
+-
+- ``nx`` if set to True, set ``value`` only if it does not exist.
+- ``xx`` if set to True, set ``value`` only if it exists.
+- ``decode_keys`` If set to True, the keys of ``obj`` will be decoded
+- with utf-8.
+-
+- """
+-
+- with open(file_name, "r") as fp:
+- file_content = loads(fp.read())
+-
+- return self.set(name, path, file_content, nx=nx, xx=xx, decode_keys=decode_keys)
+-
+- def set_path(self, json_path, root_folder, nx=False, xx=False, decode_keys=False):
+- """
+- Iterate over ``root_folder`` and set each JSON file to a value
+- under ``json_path`` with the file name as the key.
+-
+- ``nx`` if set to True, set ``value`` only if it does not exist.
+- ``xx`` if set to True, set ``value`` only if it exists.
+- ``decode_keys`` If set to True, the keys of ``obj`` will be decoded
+- with utf-8.
+-
+- """
+- set_files_result = {}
+- for root, dirs, files in os.walk(root_folder):
+- for file in files:
+- file_path = os.path.join(root, file)
+- try:
+- file_name = file_path.rsplit(".")[0]
+- self.set_file(
+- file_name,
+- json_path,
+- file_path,
+- nx=nx,
+- xx=xx,
+- decode_keys=decode_keys,
+- )
+- set_files_result[file_path] = True
+- except JSONDecodeError:
+- set_files_result[file_path] = False
+-
+- return set_files_result
+-
- def strlen(self, name, path=None):
- """Return the length of the string JSON value under ``path`` at key
- ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonstrlen
+- """ # noqa
- pieces = [name]
- if path is not None:
- pieces.append(str(path))
@@ -417,27 +5001,30 @@ index 4436f6a..0000000
- def toggle(self, name, path=Path.rootPath()):
- """Toggle boolean value under ``path`` at key ``name``.
- returning the new value.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsontoggle
+- """ # noqa
- return self.execute_command("JSON.TOGGLE", name, str(path))
-
- def strappend(self, name, value, path=Path.rootPath()):
- """Append to the string JSON value. If two options are specified after
- the key name, the path is determined to be the first. If a single
- option is passed, then the rootpath (i.e Path.rootPath()) is used.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsonstrappend
+- """ # noqa
- pieces = [name, str(path), self._encode(value)]
-- return self.execute_command(
-- "JSON.STRAPPEND", *pieces
-- )
+- return self.execute_command("JSON.STRAPPEND", *pieces)
-
- def debug(self, subcommand, key=None, path=Path.rootPath()):
- """Return the memory usage in bytes of a value under ``path`` from
- key ``name``.
-- """
+-
+- For more information: https://oss.redis.com/redisjson/commands/#jsondebg
+- """ # noqa
- valid_subcommands = ["MEMORY", "HELP"]
- if subcommand not in valid_subcommands:
-- raise DataError("The only valid subcommands are ",
-- str(valid_subcommands))
+- raise DataError("The only valid subcommands are ", str(valid_subcommands))
- pieces = [subcommand]
- if subcommand == "MEMORY":
- if key is None:
@@ -446,29 +5033,33 @@ index 4436f6a..0000000
- pieces.append(str(path))
- return self.execute_command("JSON.DEBUG", *pieces)
-
-- @deprecated(version='4.0.0',
-- reason='redisjson-py supported this, call get directly.')
+- @deprecated(
+- version="4.0.0", reason="redisjson-py supported this, call get directly."
+- )
- def jsonget(self, *args, **kwargs):
- return self.get(*args, **kwargs)
-
-- @deprecated(version='4.0.0',
-- reason='redisjson-py supported this, call get directly.')
+- @deprecated(
+- version="4.0.0", reason="redisjson-py supported this, call get directly."
+- )
- def jsonmget(self, *args, **kwargs):
- return self.mget(*args, **kwargs)
-
-- @deprecated(version='4.0.0',
-- reason='redisjson-py supported this, call get directly.')
+- @deprecated(
+- version="4.0.0", reason="redisjson-py supported this, call get directly."
+- )
- def jsonset(self, *args, **kwargs):
- return self.set(*args, **kwargs)
diff --git a/redis/commands/json/decoders.py b/redis/commands/json/decoders.py
deleted file mode 100644
-index b19395c..0000000
+index b938471..0000000
--- a/redis/commands/json/decoders.py
+++ /dev/null
-@@ -1,59 +0,0 @@
--from ..helpers import nativestr
--import re
+@@ -1,60 +0,0 @@
-import copy
+-import re
+-
+-from ..helpers import nativestr
-
-
-def bulk_of_jsons(d):
@@ -501,7 +5092,7 @@ index b19395c..0000000
- One can't simply call int/float in a try/catch because there is a
- semantic difference between (for example) 15.0 and 15.
- """
-- floatreg = '^\\d+.\\d+$'
+- floatreg = "^\\d+.\\d+$"
- match = re.findall(floatreg, obj)
- if match != []:
- return float(match[0])
@@ -527,11 +5118,11 @@ index b19395c..0000000
- return b
diff --git a/redis/commands/json/path.py b/redis/commands/json/path.py
deleted file mode 100644
-index 6d87045..0000000
+index f0a413a..0000000
--- a/redis/commands/json/path.py
+++ /dev/null
@@ -1,16 +0,0 @@
--class Path(object):
+-class Path:
- """This class represents a path in a JSON value."""
-
- strPath = ""
@@ -549,11 +5140,11 @@ index 6d87045..0000000
- return self.strPath
diff --git a/redis/commands/redismodules.py b/redis/commands/redismodules.py
deleted file mode 100644
-index 5f629fb..0000000
+index eafd650..0000000
--- a/redis/commands/redismodules.py
+++ /dev/null
-@@ -1,35 +0,0 @@
--from json import JSONEncoder, JSONDecoder
+@@ -1,83 +0,0 @@
+-from json import JSONDecoder, JSONEncoder
-
-
-class RedisModuleCommands:
@@ -562,21 +5153,18 @@ index 5f629fb..0000000
- """
-
- def json(self, encoder=JSONEncoder(), decoder=JSONDecoder()):
-- """Access the json namespace, providing support for redis json.
-- """
+- """Access the json namespace, providing support for redis json."""
-
- from .json import JSON
-- jj = JSON(
-- client=self,
-- encoder=encoder,
-- decoder=decoder)
+-
+- jj = JSON(client=self, encoder=encoder, decoder=decoder)
- return jj
-
- def ft(self, index_name="idx"):
-- """Access the search namespace, providing support for redis search.
-- """
+- """Access the search namespace, providing support for redis search."""
-
- from .search import Search
+-
- s = Search(client=self, index_name=index_name)
- return s
-
@@ -586,11 +5174,62 @@ index 5f629fb..0000000
- """
-
- from .timeseries import TimeSeries
+-
- s = TimeSeries(client=self)
- return s
+-
+- def bf(self):
+- """Access the bloom namespace."""
+-
+- from .bf import BFBloom
+-
+- bf = BFBloom(client=self)
+- return bf
+-
+- def cf(self):
+- """Access the bloom namespace."""
+-
+- from .bf import CFBloom
+-
+- cf = CFBloom(client=self)
+- return cf
+-
+- def cms(self):
+- """Access the bloom namespace."""
+-
+- from .bf import CMSBloom
+-
+- cms = CMSBloom(client=self)
+- return cms
+-
+- def topk(self):
+- """Access the bloom namespace."""
+-
+- from .bf import TOPKBloom
+-
+- topk = TOPKBloom(client=self)
+- return topk
+-
+- def tdigest(self):
+- """Access the bloom namespace."""
+-
+- from .bf import TDigestBloom
+-
+- tdigest = TDigestBloom(client=self)
+- return tdigest
+-
+- def graph(self, index_name="idx"):
+- """Access the timeseries namespace, providing support for
+- redis timeseries data.
+- """
+-
+- from .graph import Graph
+-
+- g = Graph(client=self, name=index_name)
+- return g
diff --git a/redis/commands/search/__init__.py b/redis/commands/search/__init__.py
deleted file mode 100644
-index 8320ad4..0000000
+index 94bc037..0000000
--- a/redis/commands/search/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
@@ -603,7 +5242,7 @@ index 8320ad4..0000000
- It abstracts the API of the module and lets you just use the engine.
- """
-
-- class BatchIndexer(object):
+- class BatchIndexer:
- """
- A batch indexer allows you to automatically batch
- document indexing in pipelines, flushing it every N documents.
@@ -631,7 +5270,7 @@ index 8320ad4..0000000
- replace=False,
- partial=False,
- no_create=False,
-- **fields
+- **fields,
- ):
- """
- Add a document to the batch query
@@ -645,7 +5284,7 @@ index 8320ad4..0000000
- replace=replace,
- partial=partial,
- no_create=no_create,
-- **fields
+- **fields,
- )
- self.current_chunk += 1
- self.total += 1
@@ -705,14 +5344,14 @@ index dd1dff3..0000000
- return s # Not a string we care about
diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py
deleted file mode 100644
-index b391d1f..0000000
+index 061e69c..0000000
--- a/redis/commands/search/aggregation.py
+++ /dev/null
-@@ -1,406 +0,0 @@
+@@ -1,357 +0,0 @@
-FIELDNAME = object()
-
-
--class Limit(object):
+-class Limit:
- def __init__(self, offset=0, count=0):
- self.offset = offset
- self.count = count
@@ -724,7 +5363,7 @@ index b391d1f..0000000
- return []
-
-
--class Reducer(object):
+-class Reducer:
- """
- Base reducer object for all reducers.
-
@@ -766,7 +5405,7 @@ index b391d1f..0000000
- return self._args
-
-
--class SortDirection(object):
+-class SortDirection:
- """
- This special class is used to indicate sort direction.
- """
@@ -793,76 +5432,7 @@ index b391d1f..0000000
- DIRSTRING = "DESC"
-
-
--class Group(object):
-- """
-- This object automatically created in the `AggregateRequest.group_by()`
-- """
--
-- def __init__(self, fields, reducers):
-- if not reducers:
-- raise ValueError("Need at least one reducer")
--
-- fields = [fields] if isinstance(fields, str) else fields
-- reducers = [reducers] if isinstance(reducers, Reducer) else reducers
--
-- self.fields = fields
-- self.reducers = reducers
-- self.limit = Limit()
--
-- def build_args(self):
-- ret = ["GROUPBY", str(len(self.fields))]
-- ret.extend(self.fields)
-- for reducer in self.reducers:
-- ret += ["REDUCE", reducer.NAME, str(len(reducer.args))]
-- ret.extend(reducer.args)
-- if reducer._alias is not None:
-- ret += ["AS", reducer._alias]
-- return ret
--
--
--class Projection(object):
-- """
-- This object automatically created in the `AggregateRequest.apply()`
-- """
--
-- def __init__(self, projector, alias=None):
-- self.alias = alias
-- self.projector = projector
--
-- def build_args(self):
-- ret = ["APPLY", self.projector]
-- if self.alias is not None:
-- ret += ["AS", self.alias]
--
-- return ret
--
--
--class SortBy(object):
-- """
-- This object automatically created in the `AggregateRequest.sort_by()`
-- """
--
-- def __init__(self, fields, max=0):
-- self.fields = fields
-- self.max = max
--
-- def build_args(self):
-- fields_args = []
-- for f in self.fields:
-- if isinstance(f, SortDirection):
-- fields_args += [f.field, f.DIRSTRING]
-- else:
-- fields_args += [f]
--
-- ret = ["SORTBY", str(len(fields_args))]
-- ret.extend(fields_args)
-- if self.max > 0:
-- ret += ["MAX", str(self.max)]
--
-- return ret
--
--
--class AggregateRequest(object):
+-class AggregateRequest:
- """
- Aggregation request which can be passed to `Client.aggregate`.
- """
@@ -883,6 +5453,7 @@ index b391d1f..0000000
- self._query = query
- self._aggregateplan = []
- self._loadfields = []
+- self._loadall = False
- self._limit = Limit()
- self._max = 0
- self._with_schema = False
@@ -896,9 +5467,13 @@ index b391d1f..0000000
-
- ### Parameters
-
-- - **fields**: One or more fields in the format of `@field`
+- - **fields**: If fields not specified, all the fields will be loaded.
+- Otherwise, fields should be given in the format of `@field`.
- """
-- self._loadfields.extend(fields)
+- if fields:
+- self._loadfields.extend(fields)
+- else:
+- self._loadall = True
- return self
-
- def group_by(self, fields, *reducers):
@@ -913,9 +5488,17 @@ index b391d1f..0000000
- - **reducers**: One or more reducers. Reducers may be found in the
- `aggregation` module.
- """
-- group = Group(fields, reducers)
-- self._aggregateplan.extend(group.build_args())
+- fields = [fields] if isinstance(fields, str) else fields
+- reducers = [reducers] if isinstance(reducers, Reducer) else reducers
-
+- ret = ["GROUPBY", str(len(fields)), *fields]
+- for reducer in reducers:
+- ret += ["REDUCE", reducer.NAME, str(len(reducer.args))]
+- ret.extend(reducer.args)
+- if reducer._alias is not None:
+- ret += ["AS", reducer._alias]
+-
+- self._aggregateplan.extend(ret)
- return self
-
- def apply(self, **kwexpr):
@@ -929,8 +5512,10 @@ index b391d1f..0000000
- expression itself, for example `apply(square_root="sqrt(@foo)")`
- """
- for alias, expr in kwexpr.items():
-- projection = Projection(expr, alias)
-- self._aggregateplan.extend(projection.build_args())
+- ret = ["APPLY", expr]
+- if alias is not None:
+- ret += ["AS", alias]
+- self._aggregateplan.extend(ret)
-
- return self
-
@@ -976,8 +5561,7 @@ index b391d1f..0000000
- `sort_by()` instead.
-
- """
-- limit = Limit(offset, num)
-- self._limit = limit
+- self._limit = Limit(offset, num)
- return self
-
- def sort_by(self, *fields, **kwargs):
@@ -1011,10 +5595,20 @@ index b391d1f..0000000
- if isinstance(fields, (str, SortDirection)):
- fields = [fields]
-
+- fields_args = []
+- for f in fields:
+- if isinstance(f, SortDirection):
+- fields_args += [f.field, f.DIRSTRING]
+- else:
+- fields_args += [f]
+-
+- ret = ["SORTBY", str(len(fields_args))]
+- ret.extend(fields_args)
- max = kwargs.get("max", 0)
-- sortby = SortBy(fields, max)
+- if max > 0:
+- ret += ["MAX", str(max)]
-
-- self._aggregateplan.extend(sortby.build_args())
+- self._aggregateplan.extend(ret)
- return self
-
- def filter(self, expressions):
@@ -1056,12 +5650,6 @@ index b391d1f..0000000
- self._cursor = args
- return self
-
-- def _limit_2_args(self, limit):
-- if limit[1]:
-- return ["LIMIT"] + [str(x) for x in limit]
-- else:
-- return []
--
- def build_args(self):
- # @foo:bar ...
- ret = [self._query]
@@ -1075,7 +5663,10 @@ index b391d1f..0000000
- if self._cursor:
- ret += self._cursor
-
-- if self._loadfields:
+- if self._loadall:
+- ret.append("LOAD")
+- ret.append("*")
+- elif self._loadfields:
- ret.append("LOAD")
- ret.append(str(len(self._loadfields)))
- ret.extend(self._loadfields)
@@ -1087,7 +5678,7 @@ index b391d1f..0000000
- return ret
-
-
--class Cursor(object):
+-class Cursor:
- def __init__(self, cid):
- self.cid = cid
- self.max_idle = 0
@@ -1102,33 +5693,33 @@ index b391d1f..0000000
- return args
-
-
--class AggregateResult(object):
+-class AggregateResult:
- def __init__(self, rows, cursor, schema):
- self.rows = rows
- self.cursor = cursor
- self.schema = schema
-
- def __repr__(self):
-- return "<{} at 0x{:x} Rows={}, Cursor={}>".format(
-- self.__class__.__name__,
-- id(self),
-- len(self.rows),
-- self.cursor.cid if self.cursor else -1,
+- cid = self.cursor.cid if self.cursor else -1
+- return (
+- f"<{self.__class__.__name__} at 0x{id(self):x} "
+- f"Rows={len(self.rows)}, Cursor={cid}>"
- )
diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py
deleted file mode 100644
-index 0cee2ad..0000000
+index 4ec6fc9..0000000
--- a/redis/commands/search/commands.py
+++ /dev/null
-@@ -1,706 +0,0 @@
+@@ -1,790 +0,0 @@
-import itertools
-import time
-
--from .document import Document
--from .result import Result
--from .query import Query
+-from ..helpers import parse_to_dict
-from ._util import to_string
-from .aggregation import AggregateRequest, AggregateResult, Cursor
+-from .document import Document
+-from .query import Query
+-from .result import Result
-from .suggestion import SuggestionParser
-
-NUMERIC = "NUMERIC"
@@ -1143,6 +5734,7 @@ index 0cee2ad..0000000
-EXPLAINCLI_CMD = "FT.EXPLAINCLI"
-DEL_CMD = "FT.DEL"
-AGGREGATE_CMD = "FT.AGGREGATE"
+-PROFILE_CMD = "FT.PROFILE"
-CURSOR_CMD = "FT.CURSOR"
-SPELLCHECK_CMD = "FT.SPELLCHECK"
-DICT_ADD_CMD = "FT.DICTADD"
@@ -1200,7 +5792,9 @@ index 0cee2ad..0000000
- allow searching in specific fields
- - **stopwords**: If not None, we create the index with this custom
- stopword list. The list can be empty
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftcreate
+- """ # noqa
-
- args = [CREATE_CMD, self.index_name]
- if definition is not None:
@@ -1230,7 +5824,9 @@ index 0cee2ad..0000000
- ### Parameters:
-
- - **fields**: a list of Field objects to add for the index
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftalter_schema_add
+- """ # noqa
-
- args = [ALTER_CMD, self.index_name, "SCHEMA", "ADD"]
- try:
@@ -1240,17 +5836,6 @@ index 0cee2ad..0000000
-
- return self.execute_command(*args)
-
-- def drop_index(self, delete_documents=True):
-- """
-- Drop the index if it exists. Deprecated from RediSearch 2.0.
--
-- ### Parameters:
--
-- - **delete_documents**: If `True`, all documents will be deleted.
-- """
-- keep_str = "" if delete_documents else "KEEPDOCS"
-- return self.execute_command(DROP_CMD, self.index_name, keep_str)
--
- def dropindex(self, delete_documents=False):
- """
- Drop the index if it exists.
@@ -1260,7 +5845,8 @@ index 0cee2ad..0000000
- ### Parameters:
-
- - **delete_documents**: If `True`, all documents will be deleted.
-- """
+- For more information: https://oss.redis.com/redisearch/Commands/#ftdropindex
+- """ # noqa
- keep_str = "" if delete_documents else "KEEPDOCS"
- return self.execute_command(DROP_CMD, self.index_name, keep_str)
-
@@ -1275,7 +5861,7 @@ index 0cee2ad..0000000
- partial=False,
- language=None,
- no_create=False,
-- **fields
+- **fields,
- ):
- """
- Internal add_document used for both batch and single doc indexing
@@ -1338,7 +5924,7 @@ index 0cee2ad..0000000
- partial=False,
- language=None,
- no_create=False,
-- **fields
+- **fields,
- ):
- """
- Add a single document to the index.
@@ -1367,7 +5953,9 @@ index 0cee2ad..0000000
- - **fields** kwargs dictionary of the document fields to be saved
- and/or indexed.
- NOTE: Geo points shoule be encoded as strings of "lon,lat"
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftadd
+- """ # noqa
- return self._add_document(
- doc_id,
- conn=None,
@@ -1378,7 +5966,7 @@ index 0cee2ad..0000000
- partial=partial,
- language=language,
- no_create=no_create,
-- **fields
+- **fields,
- )
-
- def add_document_hash(
@@ -1399,7 +5987,9 @@ index 0cee2ad..0000000
- - **replace**: if True, and the document already is in the index, we
- perform an update and reindex the document
- - **language**: Specify the language used for document tokenization.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftaddhash
+- """ # noqa
- return self._add_document_hash(
- doc_id,
- conn=None,
@@ -1417,7 +6007,9 @@ index 0cee2ad..0000000
-
- - **delete_actual_document**: if set to True, RediSearch also delete
- the actual document if it is in the index
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftdel
+- """ # noqa
- args = [DEL_CMD, self.index_name, doc_id]
- if conn is None:
- conn = self.client
@@ -1448,6 +6040,8 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **ids**: the ids of the saved documents.
+-
+- For more information https://oss.redis.com/redisearch/Commands/#ftget
- """
-
- return self.client.execute_command(MGET_CMD, self.index_name, *ids)
@@ -1456,6 +6050,8 @@ index 0cee2ad..0000000
- """
- Get info an stats about the the current index, including the number of
- documents, memory consumption, etc
+-
+- For more information https://oss.redis.com/redisearch/Commands/#ftinfo
- """
-
- res = self.client.execute_command(INFO_CMD, self.index_name)
@@ -1469,7 +6065,7 @@ index 0cee2ad..0000000
- # convert the query from a text to a query object
- query = Query(query)
- if not isinstance(query, Query):
-- raise ValueError("Bad query type %s" % type(query))
+- raise ValueError(f"Bad query type {type(query)}")
-
- args += query.get_args()
- return args, query
@@ -1483,7 +6079,9 @@ index 0cee2ad..0000000
- - **query**: the search query. Either a text for simple queries with
- default parameters, or a Query object for complex queries.
- See RediSearch's documentation on query format
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftsearch
+- """ # noqa
- args, query = self._mk_query_args(query)
- st = time.time()
- res = self.execute_command(SEARCH_CMD, *args)
@@ -1497,6 +6095,10 @@ index 0cee2ad..0000000
- )
-
- def explain(self, query):
+- """Returns the execution plan for a complex query.
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftexplain
+- """ # noqa
- args, query_text = self._mk_query_args(query)
- return self.execute_command(EXPLAIN_CMD, *args)
-
@@ -1505,15 +6107,17 @@ index 0cee2ad..0000000
-
- def aggregate(self, query):
- """
-- Issue an aggregation query
+- Issue an aggregation query.
-
- ### Parameters
-
-- **query**: This can be either an `AggeregateRequest`, or a `Cursor`
+- **query**: This can be either an `AggregateRequest`, or a `Cursor`
-
- An `AggregateResult` object is returned. You can access the rows from
- its `rows` property, which will always yield the rows of the result.
-- """
+-
+- Fpr more information: https://oss.redis.com/redisearch/Commands/#ftaggregate
+- """ # noqa
- if isinstance(query, AggregateRequest):
- has_cursor = bool(query._cursor)
- cmd = [AGGREGATE_CMD, self.index_name] + query.build_args()
@@ -1524,6 +6128,9 @@ index 0cee2ad..0000000
- raise ValueError("Bad query", query)
-
- raw = self.execute_command(*cmd)
+- return self._get_AggregateResult(raw, query, has_cursor)
+-
+- def _get_AggregateResult(self, raw, query, has_cursor):
- if has_cursor:
- if isinstance(query, Cursor):
- query.cid = raw[1]
@@ -1541,8 +6148,49 @@ index 0cee2ad..0000000
- schema = None
- rows = raw[1:]
-
-- res = AggregateResult(rows, cursor, schema)
-- return res
+- return AggregateResult(rows, cursor, schema)
+-
+- def profile(self, query, limited=False):
+- """
+- Performs a search or aggregate command and collects performance
+- information.
+-
+- ### Parameters
+-
+- **query**: This can be either an `AggregateRequest`, `Query` or
+- string.
+- **limited**: If set to True, removes details of reader iterator.
+-
+- """
+- st = time.time()
+- cmd = [PROFILE_CMD, self.index_name, ""]
+- if limited:
+- cmd.append("LIMITED")
+- cmd.append("QUERY")
+-
+- if isinstance(query, AggregateRequest):
+- cmd[2] = "AGGREGATE"
+- cmd += query.build_args()
+- elif isinstance(query, Query):
+- cmd[2] = "SEARCH"
+- cmd += query.get_args()
+- else:
+- raise ValueError("Must provide AggregateRequest object or " "Query object.")
+-
+- res = self.execute_command(*cmd)
+-
+- if isinstance(query, AggregateRequest):
+- result = self._get_AggregateResult(res[0], query, query._cursor)
+- else:
+- result = Result(
+- res[0],
+- not query._no_content,
+- duration=(time.time() - st) * 1000.0,
+- has_payload=query._with_payloads,
+- with_scores=query._with_scores,
+- )
+-
+- return result, parse_to_dict(res[1])
-
- def spellcheck(self, query, distance=None, include=None, exclude=None):
- """
@@ -1555,7 +6203,9 @@ index 0cee2ad..0000000
- suggestions (default: 1, max: 4).
- **include**: specifies an inclusion custom dictionary.
- **exclude**: specifies an exclusion custom dictionary.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftspellcheck
+- """ # noqa
- cmd = [SPELLCHECK_CMD, self.index_name, query]
- if distance:
- cmd.extend(["DISTANCE", distance])
@@ -1599,8 +6249,7 @@ index 0cee2ad..0000000
- # ]
- # }
- corrections[_correction[1]] = [
-- {"score": _item[0], "suggestion": _item[1]}
-- for _item in _correction[2]
+- {"score": _item[0], "suggestion": _item[1]} for _item in _correction[2]
- ]
-
- return corrections
@@ -1612,7 +6261,9 @@ index 0cee2ad..0000000
-
- - **name**: Dictionary name.
- - **terms**: List of items for adding to the dictionary.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftdictadd
+- """ # noqa
- cmd = [DICT_ADD_CMD, name]
- cmd.extend(terms)
- return self.execute_command(*cmd)
@@ -1624,7 +6275,9 @@ index 0cee2ad..0000000
-
- - **name**: Dictionary name.
- - **terms**: List of items for removing from the dictionary.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftdictdel
+- """ # noqa
- cmd = [DICT_DEL_CMD, name]
- cmd.extend(terms)
- return self.execute_command(*cmd)
@@ -1635,7 +6288,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **name**: Dictionary name.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftdictdump
+- """ # noqa
- cmd = [DICT_DUMP_CMD, name]
- return self.execute_command(*cmd)
-
@@ -1646,7 +6301,9 @@ index 0cee2ad..0000000
-
- - **option**: the name of the configuration option.
- - **value**: a value for the configuration option.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftconfig
+- """ # noqa
- cmd = [CONFIG_CMD, "SET", option, value]
- raw = self.execute_command(*cmd)
- return raw == "OK"
@@ -1657,7 +6314,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **option**: the name of the configuration option.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftconfig
+- """ # noqa
- cmd = [CONFIG_CMD, "GET", option]
- res = {}
- raw = self.execute_command(*cmd)
@@ -1673,7 +6332,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **tagfield**: Tag field name
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#fttagvals
+- """ # noqa
-
- return self.execute_command(TAGVALS_CMD, self.index_name, tagfield)
-
@@ -1684,7 +6345,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **alias**: Name of the alias to create
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftaliasadd
+- """ # noqa
-
- return self.execute_command(ALIAS_ADD_CMD, alias, self.index_name)
-
@@ -1695,7 +6358,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **alias**: Name of the alias to create
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftaliasupdate
+- """ # noqa
-
- return self.execute_command(ALIAS_UPDATE_CMD, alias, self.index_name)
-
@@ -1706,7 +6371,9 @@ index 0cee2ad..0000000
- ### Parameters
-
- - **alias**: Name of the alias to delete
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftaliasdel
+- """ # noqa
- return self.execute_command(ALIAS_DEL_CMD, alias)
-
- def sugadd(self, key, *suggestions, **kwargs):
@@ -1715,8 +6382,9 @@ index 0cee2ad..0000000
- a score and string.
- If kwargs["increment"] is true and the terms are already in the
- server's dictionary, we increment their scores.
-- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugadd>`_. # noqa
-- """
+-
+- For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugadd
+- """ # noqa
- # If Transaction is not False it will MULTI/EXEC which will error
- pipe = self.pipeline(transaction=False)
- for sug in suggestions:
@@ -1734,25 +6402,25 @@ index 0cee2ad..0000000
- def suglen(self, key):
- """
- Return the number of entries in the AutoCompleter index.
-- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsuglen>`_. # noqa
-- """
+-
+- For more information https://oss.redis.com/redisearch/master/Commands/#ftsuglen
+- """ # noqa
- return self.execute_command(SUGLEN_COMMAND, key)
-
- def sugdel(self, key, string):
- """
- Delete a string from the AutoCompleter index.
- Returns 1 if the string was found and deleted, 0 otherwise.
-- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugdel>`_. # noqa
-- """
+-
+- For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugdel
+- """ # noqa
- return self.execute_command(SUGDEL_COMMAND, key, string)
-
- def sugget(
-- self, key, prefix, fuzzy=False, num=10, with_scores=False,
-- with_payloads=False
+- self, key, prefix, fuzzy=False, num=10, with_scores=False, with_payloads=False
- ):
- """
- Get a list of suggestions from the AutoCompleter, for a given prefix.
-- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugget>`_. # noqa
-
- Parameters:
-
@@ -1779,7 +6447,9 @@ index 0cee2ad..0000000
- list:
- A list of Suggestion objects. If with_scores was False, the
- score of all suggestions is 1.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugget
+- """ # noqa
- args = [SUGGET_COMMAND, key, prefix, "MAX", num]
- if fuzzy:
- args.append(FUZZY)
@@ -1811,7 +6481,9 @@ index 0cee2ad..0000000
- If set to true, we do not scan and index.
- terms :
- The terms.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftsynupdate
+- """ # noqa
- cmd = [SYNUPDATE_CMD, self.index_name, groupid]
- if skipinitial:
- cmd.extend(["SKIPINITIALSCAN"])
@@ -1824,16 +6496,18 @@ index 0cee2ad..0000000
-
- The command is used to dump the synonyms data structure.
- Returns a list of synonym terms and their synonym group ids.
-- """
+-
+- For more information: https://oss.redis.com/redisearch/Commands/#ftsyndump
+- """ # noqa
- raw = self.execute_command(SYNDUMP_CMD, self.index_name)
- return {raw[i]: raw[i + 1] for i in range(0, len(raw), 2)}
diff --git a/redis/commands/search/document.py b/redis/commands/search/document.py
deleted file mode 100644
-index 0d4255d..0000000
+index 5b30505..0000000
--- a/redis/commands/search/document.py
+++ /dev/null
@@ -1,13 +0,0 @@
--class Document(object):
+-class Document:
- """
- Represents a single document in a result set
- """
@@ -1845,14 +6519,14 @@ index 0d4255d..0000000
- setattr(self, k, v)
-
- def __repr__(self):
-- return "Document %s" % self.__dict__
+- return f"Document {self.__dict__}"
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
deleted file mode 100644
-index 45114a4..0000000
+index 69e3908..0000000
--- a/redis/commands/search/field.py
+++ /dev/null
-@@ -1,94 +0,0 @@
--class Field(object):
+@@ -1,92 +0,0 @@
+-class Field:
-
- NUMERIC = "NUMERIC"
- TEXT = "TEXT"
@@ -1863,8 +6537,7 @@ index 45114a4..0000000
- NOINDEX = "NOINDEX"
- AS = "AS"
-
-- def __init__(self, name, args=[], sortable=False,
-- no_index=False, as_name=None):
+- def __init__(self, name, args=[], sortable=False, no_index=False, as_name=None):
- self.name = name
- self.args = args
- self.args_suffix = list()
@@ -1901,8 +6574,7 @@ index 45114a4..0000000
- def __init__(
- self, name, weight=1.0, no_stem=False, phonetic_matcher=None, **kwargs
- ):
-- Field.__init__(self, name,
-- args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
+- Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
-
- if no_stem:
- Field.append_arg(self, self.NOSTEM)
@@ -1948,10 +6620,10 @@ index 45114a4..0000000
- )
diff --git a/redis/commands/search/indexDefinition.py b/redis/commands/search/indexDefinition.py
deleted file mode 100644
-index 4fbc609..0000000
+index 0c7a3b0..0000000
--- a/redis/commands/search/indexDefinition.py
+++ /dev/null
-@@ -1,80 +0,0 @@
+@@ -1,79 +0,0 @@
-from enum import Enum
-
-
@@ -1962,7 +6634,7 @@ index 4fbc609..0000000
- JSON = 2
-
-
--class IndexDefinition(object):
+-class IndexDefinition:
- """IndexDefinition is used to define a index definition for automatic
- indexing on Hash or Json update."""
-
@@ -1992,8 +6664,7 @@ index 4fbc609..0000000
- elif index_type is IndexType.JSON:
- self.args.extend(["ON", "JSON"])
- elif index_type is not None:
-- raise RuntimeError("index_type must be one of {}".
-- format(list(IndexType)))
+- raise RuntimeError(f"index_type must be one of {list(IndexType)}")
-
- def _appendPrefix(self, prefix):
- """Append PREFIX."""
@@ -2034,11 +6705,11 @@ index 4fbc609..0000000
- self.args.append(payload_field)
diff --git a/redis/commands/search/query.py b/redis/commands/search/query.py
deleted file mode 100644
-index 85a8255..0000000
+index 2bb8347..0000000
--- a/redis/commands/search/query.py
+++ /dev/null
-@@ -1,325 +0,0 @@
--class Query(object):
+@@ -1,322 +0,0 @@
+-class Query:
- """
- Query is used to build complex queries that have more parameters than just
- the query string. The query string is set in the constructor, and other
@@ -2102,11 +6773,9 @@ index 85a8255..0000000
- def _mk_field_list(self, fields):
- if not fields:
- return []
-- return \
-- [fields] if isinstance(fields, str) else list(fields)
+- return [fields] if isinstance(fields, str) else list(fields)
-
-- def summarize(self, fields=None, context_len=None,
-- num_frags=None, sep=None):
+- def summarize(self, fields=None, context_len=None, num_frags=None, sep=None):
- """
- Return an abridged format of the field, containing only the segments of
- the field which contain the matching term(s).
@@ -2331,7 +7000,7 @@ index 85a8255..0000000
- return self
-
-
--class Filter(object):
+-class Filter:
- def __init__(self, keyword, field, *args):
- self.args = [keyword, field] + list(args)
-
@@ -2340,11 +7009,10 @@ index 85a8255..0000000
- INF = "+inf"
- NEG_INF = "-inf"
-
-- def __init__(self, field, minval, maxval, minExclusive=False,
-- maxExclusive=False):
+- def __init__(self, field, minval, maxval, minExclusive=False, maxExclusive=False):
- args = [
-- minval if not minExclusive else "({}".format(minval),
-- maxval if not maxExclusive else "({}".format(maxval),
+- minval if not minExclusive else f"({minval}",
+- maxval if not maxExclusive else f"({maxval}",
- ]
-
- Filter.__init__(self, "FILTER", field, *args)
@@ -2360,15 +7028,15 @@ index 85a8255..0000000
- Filter.__init__(self, "GEOFILTER", field, lon, lat, radius, unit)
-
-
--class SortbyField(object):
+-class SortbyField:
- def __init__(self, field, asc=True):
- self.args = [field, "ASC" if asc else "DESC"]
diff --git a/redis/commands/search/querystring.py b/redis/commands/search/querystring.py
deleted file mode 100644
-index aecd3b8..0000000
+index 1da0387..0000000
--- a/redis/commands/search/querystring.py
+++ /dev/null
-@@ -1,321 +0,0 @@
+@@ -1,314 +0,0 @@
-def tags(*t):
- """
- Indicate that the values should be matched to a tag field
@@ -2386,8 +7054,7 @@ index aecd3b8..0000000
- """
- Indicate that value is a numeric range
- """
-- return RangeValue(a, b, inclusive_min=inclusive_min,
-- inclusive_max=inclusive_max)
+- return RangeValue(a, b, inclusive_min=inclusive_min, inclusive_max=inclusive_max)
-
-
-def equal(n):
@@ -2432,7 +7099,7 @@ index aecd3b8..0000000
- return GeoValue(lat, lon, radius, unit)
-
-
--class Value(object):
+-class Value:
- @property
- def combinable(self):
- """
@@ -2505,7 +7172,7 @@ index aecd3b8..0000000
- self.unit = unit
-
-
--class Node(object):
+-class Node:
- def __init__(self, *children, **kwparams):
- """
- Create a node
@@ -2568,14 +7235,10 @@ index aecd3b8..0000000
-
- def join_fields(self, key, vals):
- if len(vals) == 1:
-- return [BaseNode("@{}:{}".format(key, vals[0].to_string()))]
+- return [BaseNode(f"@{key}:{vals[0].to_string()}")]
- if not vals[0].combinable:
-- return [BaseNode("@{}:{}".format(key,
-- v.to_string())) for v in vals]
-- s = BaseNode(
-- "@{}:({})".format(key,
-- self.JOINSTR.join(v.to_string() for v in vals))
-- )
+- return [BaseNode(f"@{key}:{v.to_string()}") for v in vals]
+- s = BaseNode(f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})")
- return [s]
-
- @classmethod
@@ -2591,9 +7254,7 @@ index aecd3b8..0000000
- def to_string(self, with_parens=None):
- with_parens = self._should_use_paren(with_parens)
- pre, post = ("(", ")") if with_parens else ("", "")
-- return "{}{}{}".format(
-- pre, self.JOINSTR.join(n.to_string() for n in self.params), post
-- )
+- return f"{pre}{self.JOINSTR.join(n.to_string() for n in self.params)}{post}"
-
- def _should_use_paren(self, optval):
- if optval is not None:
@@ -2606,7 +7267,7 @@ index aecd3b8..0000000
-
-class BaseNode(Node):
- def __init__(self, s):
-- super(BaseNode, self).__init__()
+- super().__init__()
- self.s = str(s)
-
- def to_string(self, with_parens=None):
@@ -2639,7 +7300,7 @@ index aecd3b8..0000000
-
- def to_string(self, with_parens=None):
- with_parens = self._should_use_paren(with_parens)
-- ret = super(DisjunctNode, self).to_string(with_parens=False)
+- ret = super().to_string(with_parens=False)
- if with_parens:
- return "(-" + ret + ")"
- else:
@@ -2665,7 +7326,7 @@ index aecd3b8..0000000
-
- def to_string(self, with_parens=None):
- with_parens = self._should_use_paren(with_parens)
-- ret = super(OptionalNode, self).to_string(with_parens=False)
+- ret = super().to_string(with_parens=False)
- if with_parens:
- return "(~" + ret + ")"
- else:
@@ -2692,7 +7353,7 @@ index aecd3b8..0000000
- return intersect(*args, **kwargs).to_string()
diff --git a/redis/commands/search/reducers.py b/redis/commands/search/reducers.py
deleted file mode 100644
-index 6cbbf2f..0000000
+index 41ed11a..0000000
--- a/redis/commands/search/reducers.py
+++ /dev/null
@@ -1,178 +0,0 @@
@@ -2701,7 +7362,7 @@ index 6cbbf2f..0000000
-
-class FieldOnlyReducer(Reducer):
- def __init__(self, field):
-- super(FieldOnlyReducer, self).__init__(field)
+- super().__init__(field)
- self._field = field
-
-
@@ -2713,7 +7374,7 @@ index 6cbbf2f..0000000
- NAME = "COUNT"
-
- def __init__(self):
-- super(count, self).__init__()
+- super().__init__()
-
-
-class sum(FieldOnlyReducer):
@@ -2724,7 +7385,7 @@ index 6cbbf2f..0000000
- NAME = "SUM"
-
- def __init__(self, field):
-- super(sum, self).__init__(field)
+- super().__init__(field)
-
-
-class min(FieldOnlyReducer):
@@ -2735,7 +7396,7 @@ index 6cbbf2f..0000000
- NAME = "MIN"
-
- def __init__(self, field):
-- super(min, self).__init__(field)
+- super().__init__(field)
-
-
-class max(FieldOnlyReducer):
@@ -2746,7 +7407,7 @@ index 6cbbf2f..0000000
- NAME = "MAX"
-
- def __init__(self, field):
-- super(max, self).__init__(field)
+- super().__init__(field)
-
-
-class avg(FieldOnlyReducer):
@@ -2757,7 +7418,7 @@ index 6cbbf2f..0000000
- NAME = "AVG"
-
- def __init__(self, field):
-- super(avg, self).__init__(field)
+- super().__init__(field)
-
-
-class tolist(FieldOnlyReducer):
@@ -2768,7 +7429,7 @@ index 6cbbf2f..0000000
- NAME = "TOLIST"
-
- def __init__(self, field):
-- super(tolist, self).__init__(field)
+- super().__init__(field)
-
-
-class count_distinct(FieldOnlyReducer):
@@ -2780,7 +7441,7 @@ index 6cbbf2f..0000000
- NAME = "COUNT_DISTINCT"
-
- def __init__(self, field):
-- super(count_distinct, self).__init__(field)
+- super().__init__(field)
-
-
-class count_distinctish(FieldOnlyReducer):
@@ -2802,7 +7463,7 @@ index 6cbbf2f..0000000
- NAME = "QUANTILE"
-
- def __init__(self, field, pct):
-- super(quantile, self).__init__(field, str(pct))
+- super().__init__(field, str(pct))
- self._field = field
-
-
@@ -2814,7 +7475,7 @@ index 6cbbf2f..0000000
- NAME = "STDDEV"
-
- def __init__(self, field):
-- super(stddev, self).__init__(field)
+- super().__init__(field)
-
-
-class first_value(Reducer):
@@ -2853,7 +7514,7 @@ index 6cbbf2f..0000000
- args = [field]
- if fieldstrs:
- args += ["BY"] + fieldstrs
-- super(first_value, self).__init__(*args)
+- super().__init__(*args)
- self._field = field
-
-
@@ -2872,19 +7533,19 @@ index 6cbbf2f..0000000
- **size**: Return this many items (can be less)
- """
- args = [field, str(size)]
-- super(random_sample, self).__init__(*args)
+- super().__init__(*args)
- self._field = field
diff --git a/redis/commands/search/result.py b/redis/commands/search/result.py
deleted file mode 100644
-index 9cd922a..0000000
+index 5f4aca6..0000000
--- a/redis/commands/search/result.py
+++ /dev/null
@@ -1,73 +0,0 @@
--from .document import Document
-from ._util import to_string
+-from .document import Document
-
-
--class Result(object):
+-class Result:
- """
- Represents the result of a search query, and has an array of Document
- objects
@@ -2952,17 +7613,17 @@ index 9cd922a..0000000
- self.docs.append(doc)
-
- def __repr__(self):
-- return "Result{%d total, docs: %s}" % (self.total, self.docs)
+- return f"Result{{{self.total} total, docs: {self.docs}}}"
diff --git a/redis/commands/search/suggestion.py b/redis/commands/search/suggestion.py
deleted file mode 100644
-index 3401af9..0000000
+index 5d1eba6..0000000
--- a/redis/commands/search/suggestion.py
+++ /dev/null
-@@ -1,53 +0,0 @@
+@@ -1,51 +0,0 @@
-from ._util import to_string
-
-
--class Suggestion(object):
+-class Suggestion:
- """
- Represents a single suggestion being sent or returned from the
- autocomplete server
@@ -2977,7 +7638,7 @@ index 3401af9..0000000
- return self.string
-
-
--class SuggestionParser(object):
+-class SuggestionParser:
- """
- Internal class used to parse results from the `SUGGET` command.
- This needs to consume either 1, 2, or 3 values at a time from
@@ -3007,33 +7668,123 @@ index 3401af9..0000000
- def __iter__(self):
- for i in range(0, len(self._sugs), self.sugsize):
- ss = self._sugs[i]
-- score = float(self._sugs[i + self._scoreidx]) \
-- if self.with_scores else 1.0
-- payload = self._sugs[i + self._payloadidx] \
-- if self.with_payloads else None
+- score = float(self._sugs[i + self._scoreidx]) if self.with_scores else 1.0
+- payload = self._sugs[i + self._payloadidx] if self.with_payloads else None
- yield Suggestion(ss, score, payload)
+diff --git a/redis/commands/sentinel.py b/redis/commands/sentinel.py
+deleted file mode 100644
+index a9b06c2..0000000
+--- a/redis/commands/sentinel.py
++++ /dev/null
+@@ -1,93 +0,0 @@
+-import warnings
+-
+-
+-class SentinelCommands:
+- """
+- A class containing the commands specific to redis sentinal. This class is
+- to be used as a mixin.
+- """
+-
+- def sentinel(self, *args):
+- "Redis Sentinel's SENTINEL command."
+- warnings.warn(DeprecationWarning("Use the individual sentinel_* methods"))
+-
+- def sentinel_get_master_addr_by_name(self, service_name):
+- "Returns a (host, port) pair for the given ``service_name``"
+- return self.execute_command("SENTINEL GET-MASTER-ADDR-BY-NAME", service_name)
+-
+- def sentinel_master(self, service_name):
+- "Returns a dictionary containing the specified masters state."
+- return self.execute_command("SENTINEL MASTER", service_name)
+-
+- def sentinel_masters(self):
+- "Returns a list of dictionaries containing each master's state."
+- return self.execute_command("SENTINEL MASTERS")
+-
+- def sentinel_monitor(self, name, ip, port, quorum):
+- "Add a new master to Sentinel to be monitored"
+- return self.execute_command("SENTINEL MONITOR", name, ip, port, quorum)
+-
+- def sentinel_remove(self, name):
+- "Remove a master from Sentinel's monitoring"
+- return self.execute_command("SENTINEL REMOVE", name)
+-
+- def sentinel_sentinels(self, service_name):
+- "Returns a list of sentinels for ``service_name``"
+- return self.execute_command("SENTINEL SENTINELS", service_name)
+-
+- def sentinel_set(self, name, option, value):
+- "Set Sentinel monitoring parameters for a given master"
+- return self.execute_command("SENTINEL SET", name, option, value)
+-
+- def sentinel_slaves(self, service_name):
+- "Returns a list of slaves for ``service_name``"
+- return self.execute_command("SENTINEL SLAVES", service_name)
+-
+- def sentinel_reset(self, pattern):
+- """
+- This command will reset all the masters with matching name.
+- The pattern argument is a glob-style pattern.
+-
+- The reset process clears any previous state in a master (including a
+- failover in progress), and removes every slave and sentinel already
+- discovered and associated with the master.
+- """
+- return self.execute_command("SENTINEL RESET", pattern, once=True)
+-
+- def sentinel_failover(self, new_master_name):
+- """
+- Force a failover as if the master was not reachable, and without
+- asking for agreement to other Sentinels (however a new version of the
+- configuration will be published so that the other Sentinels will
+- update their configurations).
+- """
+- return self.execute_command("SENTINEL FAILOVER", new_master_name)
+-
+- def sentinel_ckquorum(self, new_master_name):
+- """
+- Check if the current Sentinel configuration is able to reach the
+- quorum needed to failover a master, and the majority needed to
+- authorize the failover.
+-
+- This command should be used in monitoring systems to check if a
+- Sentinel deployment is ok.
+- """
+- return self.execute_command("SENTINEL CKQUORUM", new_master_name, once=True)
+-
+- def sentinel_flushconfig(self):
+- """
+- Force Sentinel to rewrite its configuration on disk, including the
+- current Sentinel state.
+-
+- Normally Sentinel rewrites the configuration every time something
+- changes in its state (in the context of the subset of the state which
+- is persisted on disk across restart).
+- However sometimes it is possible that the configuration file is lost
+- because of operation errors, disk failures, package upgrade scripts or
+- configuration managers. In those cases a way to to force Sentinel to
+- rewrite the configuration file is handy.
+-
+- This command works even if the previous configuration file is
+- completely missing.
+- """
+- return self.execute_command("SENTINEL FLUSHCONFIG")
diff --git a/redis/commands/timeseries/__init__.py b/redis/commands/timeseries/__init__.py
deleted file mode 100644
-index 5ce538f..0000000
+index 5b1f151..0000000
--- a/redis/commands/timeseries/__init__.py
+++ /dev/null
-@@ -1,85 +0,0 @@
+@@ -1,80 +0,0 @@
-import redis.client
-
--from .utils import (
-- parse_range,
-- parse_get,
-- parse_m_range,
-- parse_m_get,
--)
--from .info import TSInfo
-from ..helpers import parse_to_list
-from .commands import (
- ALTER_CMD,
- CREATE_CMD,
- CREATERULE_CMD,
-- DELETERULE_CMD,
- DEL_CMD,
+- DELETERULE_CMD,
- GET_CMD,
- INFO_CMD,
- MGET_CMD,
@@ -3044,6 +7795,8 @@ index 5ce538f..0000000
- REVRANGE_CMD,
- TimeSeriesCommands,
-)
+-from .info import TSInfo
+-from .utils import parse_get, parse_m_get, parse_m_range, parse_range
-
-
-class TimeSeries(TimeSeriesCommands):
@@ -3105,13 +7858,12 @@ index 5ce538f..0000000
- """Pipeline for the module."""
diff --git a/redis/commands/timeseries/commands.py b/redis/commands/timeseries/commands.py
deleted file mode 100644
-index 3b9ee0f..0000000
+index c86e0b9..0000000
--- a/redis/commands/timeseries/commands.py
+++ /dev/null
-@@ -1,775 +0,0 @@
+@@ -1,768 +0,0 @@
-from redis.exceptions import DataError
-
--
-ADD_CMD = "TS.ADD"
-ALTER_CMD = "TS.ALTER"
-CREATERULE_CMD = "TS.CREATERULE"
@@ -3137,8 +7889,6 @@ index 3b9ee0f..0000000
- def create(self, key, **kwargs):
- """
- Create a new time-series.
-- For more information see
-- `TS.CREATE <https://oss.redis.com/redistimeseries/master/commands/#tscreate>`_. # noqa
-
- Args:
-
@@ -3171,7 +7921,9 @@ index 3b9ee0f..0000000
- - 'min': only override if the value is lower than the existing value.
- - 'max': only override if the value is higher than the existing value.
- When this is not set, the server-wide default will be used.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/commands/#tscreate
+- """ # noqa
- retention_msecs = kwargs.get("retention_msecs", None)
- uncompressed = kwargs.get("uncompressed", False)
- labels = kwargs.get("labels", {})
@@ -3190,10 +7942,11 @@ index 3b9ee0f..0000000
- """
- Update the retention, labels of an existing key.
- For more information see
-- `TS.ALTER <https://oss.redis.com/redistimeseries/master/commands/#tsalter>`_. # noqa
-
- The parameters are the same as TS.CREATE.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/commands/#tsalter
+- """ # noqa
- retention_msecs = kwargs.get("retention_msecs", None)
- labels = kwargs.get("labels", {})
- duplicate_policy = kwargs.get("duplicate_policy", None)
@@ -3208,7 +7961,6 @@ index 3b9ee0f..0000000
- """
- Append (or create and append) a new sample to the series.
- For more information see
-- `TS.ADD <https://oss.redis.com/redistimeseries/master/commands/#tsadd>`_. # noqa
-
- Args:
-
@@ -3240,7 +7992,9 @@ index 3b9ee0f..0000000
- - 'min': only override if the value is lower than the existing value.
- - 'max': only override if the value is higher than the existing value.
- When this is not set, the server-wide default will be used.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsadd
+- """ # noqa
- retention_msecs = kwargs.get("retention_msecs", None)
- uncompressed = kwargs.get("uncompressed", False)
- labels = kwargs.get("labels", {})
@@ -3261,9 +8015,9 @@ index 3b9ee0f..0000000
- `key` with `timestamp`.
- Expects a list of `tuples` as (`key`,`timestamp`, `value`).
- Return value is an array with timestamps of insertions.
-- For more information see
-- `TS.MADD <https://oss.redis.com/redistimeseries/master/commands/#tsmadd>`_. # noqa
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmadd
+- """ # noqa
- params = []
- for ktv in ktv_tuples:
- for item in ktv:
@@ -3277,8 +8031,6 @@ index 3b9ee0f..0000000
- sample's of a series.
- This command can be used as a counter or gauge that automatically gets
- history as a time series.
-- For more information see
-- `TS.INCRBY <https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby>`_. # noqa
-
- Args:
-
@@ -3300,7 +8052,9 @@ index 3b9ee0f..0000000
- chunk_size:
- Each time-series uses chunks of memory of fixed size for time series samples.
- You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+- """ # noqa
- timestamp = kwargs.get("timestamp", None)
- retention_msecs = kwargs.get("retention_msecs", None)
- uncompressed = kwargs.get("uncompressed", False)
@@ -3321,8 +8075,6 @@ index 3b9ee0f..0000000
- latest sample's of a series.
- This command can be used as a counter or gauge that
- automatically gets history as a time series.
-- For more information see
-- `TS.DECRBY <https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby>`_. # noqa
-
- Args:
-
@@ -3348,7 +8100,9 @@ index 3b9ee0f..0000000
- chunk_size:
- Each time-series uses chunks of memory of fixed size for time series samples.
- You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+- """ # noqa
- timestamp = kwargs.get("timestamp", None)
- retention_msecs = kwargs.get("retention_msecs", None)
- uncompressed = kwargs.get("uncompressed", False)
@@ -3371,7 +8125,6 @@ index 3b9ee0f..0000000
- and end data points will also be deleted.
- Return the count for deleted items.
- For more information see
-- `TS.DEL <https://oss.redis.com/redistimeseries/master/commands/#tsdel>`_. # noqa
-
- Args:
-
@@ -3381,24 +8134,20 @@ index 3b9ee0f..0000000
- Start timestamp for the range deletion.
- to_time:
- End timestamp for the range deletion.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdel
+- """ # noqa
- return self.execute_command(DEL_CMD, key, from_time, to_time)
-
-- def createrule(
-- self,
-- source_key,
-- dest_key,
-- aggregation_type,
-- bucket_size_msec
-- ):
+- def createrule(self, source_key, dest_key, aggregation_type, bucket_size_msec):
- """
- Create a compaction rule from values added to `source_key` into `dest_key`.
- Aggregating for `bucket_size_msec` where an `aggregation_type` can be
- [`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`,
- `std.p`, `std.s`, `var.p`, `var.s`]
-- For more information see
-- `TS.CREATERULE <https://oss.redis.com/redistimeseries/master/commands/#tscreaterule>`_. # noqa
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tscreaterule
+- """ # noqa
- params = [source_key, dest_key]
- self._appendAggregation(params, aggregation_type, bucket_size_msec)
-
@@ -3408,8 +8157,9 @@ index 3b9ee0f..0000000
- """
- Delete a compaction rule.
- For more information see
-- `TS.DELETERULE <https://oss.redis.com/redistimeseries/master/commands/#tsdeleterule>`_. # noqa
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdeleterule
+- """ # noqa
- return self.execute_command(DELETERULE_CMD, source_key, dest_key)
-
- def __range_params(
@@ -3428,11 +8178,7 @@ index 3b9ee0f..0000000
- """Create TS.RANGE and TS.REVRANGE arguments."""
- params = [key, from_time, to_time]
- self._appendFilerByTs(params, filter_by_ts)
-- self._appendFilerByValue(
-- params,
-- filter_by_min_value,
-- filter_by_max_value
-- )
+- self._appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
- self._appendCount(params, count)
- self._appendAlign(params, align)
- self._appendAggregation(params, aggregation_type, bucket_size_msec)
@@ -3454,8 +8200,6 @@ index 3b9ee0f..0000000
- ):
- """
- Query a range in forward direction for a specific time-serie.
-- For more information see
-- `TS.RANGE <https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange>`_. # noqa
-
- Args:
-
@@ -3485,7 +8229,9 @@ index 3b9ee0f..0000000
- by_min_value).
- align:
- Timestamp for alignment control for aggregation.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+- """ # noqa
- params = self.__range_params(
- key,
- from_time,
@@ -3515,8 +8261,6 @@ index 3b9ee0f..0000000
- ):
- """
- Query a range in reverse direction for a specific time-series.
-- For more information see
-- `TS.REVRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange>`_. # noqa
-
- **Note**: This command is only available since RedisTimeSeries >= v1.4
-
@@ -3543,7 +8287,9 @@ index 3b9ee0f..0000000
- Filter result by maximum value (must mention also filter_by_min_value).
- align:
- Timestamp for alignment control for aggregation.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+- """ # noqa
- params = self.__range_params(
- key,
- from_time,
@@ -3578,11 +8324,7 @@ index 3b9ee0f..0000000
- """Create TS.MRANGE and TS.MREVRANGE arguments."""
- params = [from_time, to_time]
- self._appendFilerByTs(params, filter_by_ts)
-- self._appendFilerByValue(
-- params,
-- filter_by_min_value,
-- filter_by_max_value
-- )
+- self._appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
- self._appendCount(params, count)
- self._appendAlign(params, align)
- self._appendAggregation(params, aggregation_type, bucket_size_msec)
@@ -3611,8 +8353,6 @@ index 3b9ee0f..0000000
- ):
- """
- Query a range across multiple time-series by filters in forward direction.
-- For more information see
-- `TS.MRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange>`_. # noqa
-
- Args:
-
@@ -3655,7 +8395,9 @@ index 3b9ee0f..0000000
- pair labels of a series.
- align:
- Timestamp for alignment control for aggregation.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+- """ # noqa
- params = self.__mrange_params(
- aggregation_type,
- bucket_size_msec,
@@ -3694,8 +8436,6 @@ index 3b9ee0f..0000000
- ):
- """
- Query a range across multiple time-series by filters in reverse direction.
-- For more information see
-- `TS.MREVRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange>`_. # noqa
-
- Args:
-
@@ -3740,7 +8480,9 @@ index 3b9ee0f..0000000
- labels of a series.
- align:
- Timestamp for alignment control for aggregation.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+- """ # noqa
- params = self.__mrange_params(
- aggregation_type,
- bucket_size_msec,
@@ -3761,17 +8503,19 @@ index 3b9ee0f..0000000
- return self.execute_command(MREVRANGE_CMD, *params)
-
- def get(self, key):
-- """ # noqa
+- """# noqa
- Get the last sample of `key`.
-- For more information see `TS.GET <https://oss.redis.com/redistimeseries/master/commands/#tsget>`_.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsget
+- """ # noqa
- return self.execute_command(GET_CMD, key)
-
- def mget(self, filters, with_labels=False):
-- """ # noqa
+- """# noqa
- Get the last samples matching the specific `filter`.
-- For more information see `TS.MGET <https://oss.redis.com/redistimeseries/master/commands/#tsmget>`_.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmget
+- """ # noqa
- params = []
- self._appendWithLabels(params, with_labels)
- params.extend(["FILTER"])
@@ -3779,17 +8523,19 @@ index 3b9ee0f..0000000
- return self.execute_command(MGET_CMD, *params)
-
- def info(self, key):
-- """ # noqa
+- """# noqa
- Get information of `key`.
-- For more information see `TS.INFO <https://oss.redis.com/redistimeseries/master/commands/#tsinfo>`_.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsinfo
+- """ # noqa
- return self.execute_command(INFO_CMD, key)
-
- def queryindex(self, filters):
-- """ # noqa
+- """# noqa
- Get all the keys matching the `filter` list.
-- For more information see `TS.QUERYINDEX <https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex>`_.
-- """
+-
+- For more information: https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex
+- """ # noq
- return self.execute_command(QUERYINDEX_CMD, *filters)
-
- @staticmethod
@@ -3886,15 +8632,15 @@ index 3b9ee0f..0000000
- params.extend(["FILTER_BY_VALUE", min_value, max_value])
diff --git a/redis/commands/timeseries/info.py b/redis/commands/timeseries/info.py
deleted file mode 100644
-index 3b89503..0000000
+index fba7f09..0000000
--- a/redis/commands/timeseries/info.py
+++ /dev/null
@@ -1,82 +0,0 @@
--from .utils import list_to_dict
-from ..helpers import nativestr
+-from .utils import list_to_dict
-
-
--class TSInfo(object):
+-class TSInfo:
- """
- Hold information and statistics on the time-series.
- Can be created using ``tsinfo`` command
@@ -3974,17 +8720,15 @@ index 3b89503..0000000
- self.duplicate_policy = self.duplicate_policy.decode()
diff --git a/redis/commands/timeseries/utils.py b/redis/commands/timeseries/utils.py
deleted file mode 100644
-index c33b7c5..0000000
+index c49b040..0000000
--- a/redis/commands/timeseries/utils.py
+++ /dev/null
-@@ -1,49 +0,0 @@
+@@ -1,44 +0,0 @@
-from ..helpers import nativestr
-
-
-def list_to_dict(aList):
-- return {
-- nativestr(aList[i][0]): nativestr(aList[i][1])
-- for i in range(len(aList))}
+- return {nativestr(aList[i][0]): nativestr(aList[i][1]) for i in range(len(aList))}
-
-
-def parse_range(response):
@@ -3996,9 +8740,7 @@ index c33b7c5..0000000
- """Parse multi range response. Used by TS.MRANGE and TS.MREVRANGE."""
- res = []
- for item in response:
-- res.append(
-- {nativestr(item[0]):
-- [list_to_dict(item[1]), parse_range(item[2])]})
+- res.append({nativestr(item[0]): [list_to_dict(item[1]), parse_range(item[2])]})
- return sorted(res, key=lambda d: list(d.keys()))
-
-
@@ -4014,8 +8756,7 @@ index c33b7c5..0000000
- res = []
- for item in response:
- if not item[2]:
-- res.append(
-- {nativestr(item[0]): [list_to_dict(item[1]), None, None]})
+- res.append({nativestr(item[0]): [list_to_dict(item[1]), None, None]})
- else:
- res.append(
- {
@@ -4027,46 +8768,4217 @@ index c33b7c5..0000000
- }
- )
- return sorted(res, key=lambda d: list(d.keys()))
+diff --git a/redis/sentinel.py b/redis/sentinel.py
+deleted file mode 100644
+index c9383d3..0000000
+--- a/redis/sentinel.py
++++ /dev/null
+@@ -1,337 +0,0 @@
+-import random
+-import weakref
+-
+-from redis.client import Redis
+-from redis.commands import SentinelCommands
+-from redis.connection import Connection, ConnectionPool, SSLConnection
+-from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError
+-from redis.utils import str_if_bytes
+-
+-
+-class MasterNotFoundError(ConnectionError):
+- pass
+-
+-
+-class SlaveNotFoundError(ConnectionError):
+- pass
+-
+-
+-class SentinelManagedConnection(Connection):
+- def __init__(self, **kwargs):
+- self.connection_pool = kwargs.pop("connection_pool")
+- super().__init__(**kwargs)
+-
+- def __repr__(self):
+- pool = self.connection_pool
+- s = f"{type(self).__name__}<service={pool.service_name}%s>"
+- if self.host:
+- host_info = f",host={self.host},port={self.port}"
+- s = s % host_info
+- return s
+-
+- def connect_to(self, address):
+- self.host, self.port = address
+- super().connect()
+- if self.connection_pool.check_connection:
+- self.send_command("PING")
+- if str_if_bytes(self.read_response()) != "PONG":
+- raise ConnectionError("PING failed")
+-
+- def connect(self):
+- if self._sock:
+- return # already connected
+- if self.connection_pool.is_master:
+- self.connect_to(self.connection_pool.get_master_address())
+- else:
+- for slave in self.connection_pool.rotate_slaves():
+- try:
+- return self.connect_to(slave)
+- except ConnectionError:
+- continue
+- raise SlaveNotFoundError # Never be here
+-
+- def read_response(self, disable_decoding=False):
+- try:
+- return super().read_response(disable_decoding=disable_decoding)
+- except ReadOnlyError:
+- if self.connection_pool.is_master:
+- # When talking to a master, a ReadOnlyError when likely
+- # indicates that the previous master that we're still connected
+- # to has been demoted to a slave and there's a new master.
+- # calling disconnect will force the connection to re-query
+- # sentinel during the next connect() attempt.
+- self.disconnect()
+- raise ConnectionError("The previous master is now a slave")
+- raise
+-
+-
+-class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection):
+- pass
+-
+-
+-class SentinelConnectionPool(ConnectionPool):
+- """
+- Sentinel backed connection pool.
+-
+- If ``check_connection`` flag is set to True, SentinelManagedConnection
+- sends a PING command right after establishing the connection.
+- """
+-
+- def __init__(self, service_name, sentinel_manager, **kwargs):
+- kwargs["connection_class"] = kwargs.get(
+- "connection_class",
+- SentinelManagedSSLConnection
+- if kwargs.pop("ssl", False)
+- else SentinelManagedConnection,
+- )
+- self.is_master = kwargs.pop("is_master", True)
+- self.check_connection = kwargs.pop("check_connection", False)
+- super().__init__(**kwargs)
+- self.connection_kwargs["connection_pool"] = weakref.proxy(self)
+- self.service_name = service_name
+- self.sentinel_manager = sentinel_manager
+-
+- def __repr__(self):
+- role = "master" if self.is_master else "slave"
+- return f"{type(self).__name__}<service={self.service_name}({role})"
+-
+- def reset(self):
+- super().reset()
+- self.master_address = None
+- self.slave_rr_counter = None
+-
+- def owns_connection(self, connection):
+- check = not self.is_master or (
+- self.is_master and self.master_address == (connection.host, connection.port)
+- )
+- parent = super()
+- return check and parent.owns_connection(connection)
+-
+- def get_master_address(self):
+- master_address = self.sentinel_manager.discover_master(self.service_name)
+- if self.is_master:
+- if self.master_address != master_address:
+- self.master_address = master_address
+- # disconnect any idle connections so that they reconnect
+- # to the new master the next time that they are used.
+- self.disconnect(inuse_connections=False)
+- return master_address
+-
+- def rotate_slaves(self):
+- "Round-robin slave balancer"
+- slaves = self.sentinel_manager.discover_slaves(self.service_name)
+- if slaves:
+- if self.slave_rr_counter is None:
+- self.slave_rr_counter = random.randint(0, len(slaves) - 1)
+- for _ in range(len(slaves)):
+- self.slave_rr_counter = (self.slave_rr_counter + 1) % len(slaves)
+- slave = slaves[self.slave_rr_counter]
+- yield slave
+- # Fallback to the master connection
+- try:
+- yield self.get_master_address()
+- except MasterNotFoundError:
+- pass
+- raise SlaveNotFoundError(f"No slave found for {self.service_name!r}")
+-
+-
+-class Sentinel(SentinelCommands):
+- """
+- Redis Sentinel cluster client
+-
+- >>> from redis.sentinel import Sentinel
+- >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
+- >>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
+- >>> master.set('foo', 'bar')
+- >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
+- >>> slave.get('foo')
+- b'bar'
+-
+- ``sentinels`` is a list of sentinel nodes. Each node is represented by
+- a pair (hostname, port).
+-
+- ``min_other_sentinels`` defined a minimum number of peers for a sentinel.
+- When querying a sentinel, if it doesn't meet this threshold, responses
+- from that sentinel won't be considered valid.
+-
+- ``sentinel_kwargs`` is a dictionary of connection arguments used when
+- connecting to sentinel instances. Any argument that can be passed to
+- a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
+- not specified, any socket_timeout and socket_keepalive options specified
+- in ``connection_kwargs`` will be used.
+-
+- ``connection_kwargs`` are keyword arguments that will be used when
+- establishing a connection to a Redis server.
+- """
+-
+- def __init__(
+- self,
+- sentinels,
+- min_other_sentinels=0,
+- sentinel_kwargs=None,
+- **connection_kwargs,
+- ):
+- # if sentinel_kwargs isn't defined, use the socket_* options from
+- # connection_kwargs
+- if sentinel_kwargs is None:
+- sentinel_kwargs = {
+- k: v for k, v in connection_kwargs.items() if k.startswith("socket_")
+- }
+- self.sentinel_kwargs = sentinel_kwargs
+-
+- self.sentinels = [
+- Redis(hostname, port, **self.sentinel_kwargs)
+- for hostname, port in sentinels
+- ]
+- self.min_other_sentinels = min_other_sentinels
+- self.connection_kwargs = connection_kwargs
+-
+- def execute_command(self, *args, **kwargs):
+- """
+- Execute Sentinel command in sentinel nodes.
+- once - If set to True, then execute the resulting command on a single
+- node at random, rather than across the entire sentinel cluster.
+- """
+- once = bool(kwargs.get("once", False))
+- if "once" in kwargs.keys():
+- kwargs.pop("once")
+-
+- if once:
+- for sentinel in self.sentinels:
+- sentinel.execute_command(*args, **kwargs)
+- else:
+- random.choice(self.sentinels).execute_command(*args, **kwargs)
+- return True
+-
+- def __repr__(self):
+- sentinel_addresses = []
+- for sentinel in self.sentinels:
+- sentinel_addresses.append(
+- "{host}:{port}".format_map(
+- sentinel.connection_pool.connection_kwargs,
+- )
+- )
+- return f'{type(self).__name__}<sentinels=[{",".join(sentinel_addresses)}]>'
+-
+- def check_master_state(self, state, service_name):
+- if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
+- return False
+- # Check if our sentinel doesn't see other nodes
+- if state["num-other-sentinels"] < self.min_other_sentinels:
+- return False
+- return True
+-
+- def discover_master(self, service_name):
+- """
+- Asks sentinel servers for the Redis master's address corresponding
+- to the service labeled ``service_name``.
+-
+- Returns a pair (address, port) or raises MasterNotFoundError if no
+- master is found.
+- """
+- for sentinel_no, sentinel in enumerate(self.sentinels):
+- try:
+- masters = sentinel.sentinel_masters()
+- except (ConnectionError, TimeoutError):
+- continue
+- state = masters.get(service_name)
+- if state and self.check_master_state(state, service_name):
+- # Put this sentinel at the top of the list
+- self.sentinels[0], self.sentinels[sentinel_no] = (
+- sentinel,
+- self.sentinels[0],
+- )
+- return state["ip"], state["port"]
+- raise MasterNotFoundError(f"No master found for {service_name!r}")
+-
+- def filter_slaves(self, slaves):
+- "Remove slaves that are in an ODOWN or SDOWN state"
+- slaves_alive = []
+- for slave in slaves:
+- if slave["is_odown"] or slave["is_sdown"]:
+- continue
+- slaves_alive.append((slave["ip"], slave["port"]))
+- return slaves_alive
+-
+- def discover_slaves(self, service_name):
+- "Returns a list of alive slaves for service ``service_name``"
+- for sentinel in self.sentinels:
+- try:
+- slaves = sentinel.sentinel_slaves(service_name)
+- except (ConnectionError, ResponseError, TimeoutError):
+- continue
+- slaves = self.filter_slaves(slaves)
+- if slaves:
+- return slaves
+- return []
+-
+- def master_for(
+- self,
+- service_name,
+- redis_class=Redis,
+- connection_pool_class=SentinelConnectionPool,
+- **kwargs,
+- ):
+- """
+- Returns a redis client instance for the ``service_name`` master.
+-
+- A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
+- used to retrieve the master's address before establishing a new
+- connection.
+-
+- NOTE: If the master's address has changed, any cached connections to
+- the old master are closed.
+-
+- By default clients will be a :py:class:`~redis.Redis` instance.
+- Specify a different class to the ``redis_class`` argument if you
+- desire something different.
+-
+- The ``connection_pool_class`` specifies the connection pool to
+- use. The :py:class:`~redis.sentinel.SentinelConnectionPool`
+- will be used by default.
+-
+- All other keyword arguments are merged with any connection_kwargs
+- passed to this class and passed to the connection pool as keyword
+- arguments to be used to initialize Redis connections.
+- """
+- kwargs["is_master"] = True
+- connection_kwargs = dict(self.connection_kwargs)
+- connection_kwargs.update(kwargs)
+- return redis_class(
+- connection_pool=connection_pool_class(
+- service_name, self, **connection_kwargs
+- )
+- )
+-
+- def slave_for(
+- self,
+- service_name,
+- redis_class=Redis,
+- connection_pool_class=SentinelConnectionPool,
+- **kwargs,
+- ):
+- """
+- Returns redis client instance for the ``service_name`` slave(s).
+-
+- A SentinelConnectionPool class is used to retrieve the slave's
+- address before establishing a new connection.
+-
+- By default clients will be a :py:class:`~redis.Redis` instance.
+- Specify a different class to the ``redis_class`` argument if you
+- desire something different.
+-
+- The ``connection_pool_class`` specifies the connection pool to use.
+- The SentinelConnectionPool will be used by default.
+-
+- All other keyword arguments are merged with any connection_kwargs
+- passed to this class and passed to the connection pool as keyword
+- arguments to be used to initialize Redis connections.
+- """
+- kwargs["is_master"] = False
+- connection_kwargs = dict(self.connection_kwargs)
+- connection_kwargs.update(kwargs)
+- return redis_class(
+- connection_pool=connection_pool_class(
+- service_name, self, **connection_kwargs
+- )
+- )
+diff --git a/setup.py b/setup.py
+index 7733220..d980041 100644
+--- a/setup.py
++++ b/setup.py
+@@ -13,11 +13,6 @@ setup(
+ include=[
+ "redis",
+ "redis.commands",
+- "redis.commands.bf",
+- "redis.commands.json",
+- "redis.commands.search",
+- "redis.commands.timeseries",
+- "redis.commands.graph",
+ ]
+ ),
+ url="https://github.com/redis/redis-py",
+diff --git a/tests/test_bloom.py b/tests/test_bloom.py
+deleted file mode 100644
+index 8936584..0000000
+--- a/tests/test_bloom.py
++++ /dev/null
+@@ -1,383 +0,0 @@
+-import pytest
+-
+-import redis.commands.bf
+-from redis.exceptions import ModuleError, RedisError
+-from redis.utils import HIREDIS_AVAILABLE
+-
+-
+-def intlist(obj):
+- return [int(v) for v in obj]
+-
+-
+-@pytest.fixture
+-def client(modclient):
+- assert isinstance(modclient.bf(), redis.commands.bf.BFBloom)
+- assert isinstance(modclient.cf(), redis.commands.bf.CFBloom)
+- assert isinstance(modclient.cms(), redis.commands.bf.CMSBloom)
+- assert isinstance(modclient.tdigest(), redis.commands.bf.TDigestBloom)
+- assert isinstance(modclient.topk(), redis.commands.bf.TOPKBloom)
+-
+- modclient.flushdb()
+- return modclient
+-
+-
+-@pytest.mark.redismod
+-def test_create(client):
+- """Test CREATE/RESERVE calls"""
+- assert client.bf().create("bloom", 0.01, 1000)
+- assert client.bf().create("bloom_e", 0.01, 1000, expansion=1)
+- assert client.bf().create("bloom_ns", 0.01, 1000, noScale=True)
+- assert client.cf().create("cuckoo", 1000)
+- assert client.cf().create("cuckoo_e", 1000, expansion=1)
+- assert client.cf().create("cuckoo_bs", 1000, bucket_size=4)
+- assert client.cf().create("cuckoo_mi", 1000, max_iterations=10)
+- assert client.cms().initbydim("cmsDim", 100, 5)
+- assert client.cms().initbyprob("cmsProb", 0.01, 0.01)
+- assert client.topk().reserve("topk", 5, 100, 5, 0.9)
+- assert client.tdigest().create("tDigest", 100)
+-
+-
+-# region Test Bloom Filter
+-@pytest.mark.redismod
+-def test_bf_add(client):
+- assert client.bf().create("bloom", 0.01, 1000)
+- assert 1 == client.bf().add("bloom", "foo")
+- assert 0 == client.bf().add("bloom", "foo")
+- assert [0] == intlist(client.bf().madd("bloom", "foo"))
+- assert [0, 1] == client.bf().madd("bloom", "foo", "bar")
+- assert [0, 0, 1] == client.bf().madd("bloom", "foo", "bar", "baz")
+- assert 1 == client.bf().exists("bloom", "foo")
+- assert 0 == client.bf().exists("bloom", "noexist")
+- assert [1, 0] == intlist(client.bf().mexists("bloom", "foo", "noexist"))
+-
+-
+-@pytest.mark.redismod
+-def test_bf_insert(client):
+- assert client.bf().create("bloom", 0.01, 1000)
+- assert [1] == intlist(client.bf().insert("bloom", ["foo"]))
+- assert [0, 1] == intlist(client.bf().insert("bloom", ["foo", "bar"]))
+- assert [1] == intlist(client.bf().insert("captest", ["foo"], capacity=10))
+- assert [1] == intlist(client.bf().insert("errtest", ["foo"], error=0.01))
+- assert 1 == client.bf().exists("bloom", "foo")
+- assert 0 == client.bf().exists("bloom", "noexist")
+- assert [1, 0] == intlist(client.bf().mexists("bloom", "foo", "noexist"))
+- info = client.bf().info("bloom")
+- assert 2 == info.insertedNum
+- assert 1000 == info.capacity
+- assert 1 == info.filterNum
+-
+-
+-@pytest.mark.redismod
+-def test_bf_scandump_and_loadchunk(client):
+- # Store a filter
+- client.bf().create("myBloom", "0.0001", "1000")
+-
+- # test is probabilistic and might fail. It is OK to change variables if
+- # certain to not break anything
+- def do_verify():
+- res = 0
+- for x in range(1000):
+- client.bf().add("myBloom", x)
+- rv = client.bf().exists("myBloom", x)
+- assert rv
+- rv = client.bf().exists("myBloom", f"nonexist_{x}")
+- res += rv == x
+- assert res < 5
+-
+- do_verify()
+- cmds = []
+- if HIREDIS_AVAILABLE:
+- with pytest.raises(ModuleError):
+- cur = client.bf().scandump("myBloom", 0)
+- return
+-
+- cur = client.bf().scandump("myBloom", 0)
+- first = cur[0]
+- cmds.append(cur)
+-
+- while True:
+- cur = client.bf().scandump("myBloom", first)
+- first = cur[0]
+- if first == 0:
+- break
+- else:
+- cmds.append(cur)
+- prev_info = client.bf().execute_command("bf.debug", "myBloom")
+-
+- # Remove the filter
+- client.bf().client.delete("myBloom")
+-
+- # Now, load all the commands:
+- for cmd in cmds:
+- client.bf().loadchunk("myBloom", *cmd)
+-
+- cur_info = client.bf().execute_command("bf.debug", "myBloom")
+- assert prev_info == cur_info
+- do_verify()
+-
+- client.bf().client.delete("myBloom")
+- client.bf().create("myBloom", "0.0001", "10000000")
+-
+-
+-@pytest.mark.redismod
+-def test_bf_info(client):
+- expansion = 4
+- # Store a filter
+- client.bf().create("nonscaling", "0.0001", "1000", noScale=True)
+- info = client.bf().info("nonscaling")
+- assert info.expansionRate is None
+-
+- client.bf().create("expanding", "0.0001", "1000", expansion=expansion)
+- info = client.bf().info("expanding")
+- assert info.expansionRate == 4
+-
+- try:
+- # noScale mean no expansion
+- client.bf().create(
+- "myBloom", "0.0001", "1000", expansion=expansion, noScale=True
+- )
+- assert False
+- except RedisError:
+- assert True
+-
+-
+-# region Test Cuckoo Filter
+-@pytest.mark.redismod
+-def test_cf_add_and_insert(client):
+- assert client.cf().create("cuckoo", 1000)
+- assert client.cf().add("cuckoo", "filter")
+- assert not client.cf().addnx("cuckoo", "filter")
+- assert 1 == client.cf().addnx("cuckoo", "newItem")
+- assert [1] == client.cf().insert("captest", ["foo"])
+- assert [1] == client.cf().insert("captest", ["foo"], capacity=1000)
+- assert [1] == client.cf().insertnx("captest", ["bar"])
+- assert [1] == client.cf().insertnx("captest", ["food"], nocreate="1")
+- assert [0, 0, 1] == client.cf().insertnx("captest", ["foo", "bar", "baz"])
+- assert [0] == client.cf().insertnx("captest", ["bar"], capacity=1000)
+- assert [1] == client.cf().insert("empty1", ["foo"], capacity=1000)
+- assert [1] == client.cf().insertnx("empty2", ["bar"], capacity=1000)
+- info = client.cf().info("captest")
+- assert 5 == info.insertedNum
+- assert 0 == info.deletedNum
+- assert 1 == info.filterNum
+-
+-
+-@pytest.mark.redismod
+-def test_cf_exists_and_del(client):
+- assert client.cf().create("cuckoo", 1000)
+- assert client.cf().add("cuckoo", "filter")
+- assert client.cf().exists("cuckoo", "filter")
+- assert not client.cf().exists("cuckoo", "notexist")
+- assert 1 == client.cf().count("cuckoo", "filter")
+- assert 0 == client.cf().count("cuckoo", "notexist")
+- assert client.cf().delete("cuckoo", "filter")
+- assert 0 == client.cf().count("cuckoo", "filter")
+-
+-
+-# region Test Count-Min Sketch
+-@pytest.mark.redismod
+-def test_cms(client):
+- assert client.cms().initbydim("dim", 1000, 5)
+- assert client.cms().initbyprob("prob", 0.01, 0.01)
+- assert client.cms().incrby("dim", ["foo"], [5])
+- assert [0] == client.cms().query("dim", "notexist")
+- assert [5] == client.cms().query("dim", "foo")
+- assert [10, 15] == client.cms().incrby("dim", ["foo", "bar"], [5, 15])
+- assert [10, 15] == client.cms().query("dim", "foo", "bar")
+- info = client.cms().info("dim")
+- assert 1000 == info.width
+- assert 5 == info.depth
+- assert 25 == info.count
+-
+-
+-@pytest.mark.redismod
+-def test_cms_merge(client):
+- assert client.cms().initbydim("A", 1000, 5)
+- assert client.cms().initbydim("B", 1000, 5)
+- assert client.cms().initbydim("C", 1000, 5)
+- assert client.cms().incrby("A", ["foo", "bar", "baz"], [5, 3, 9])
+- assert client.cms().incrby("B", ["foo", "bar", "baz"], [2, 3, 1])
+- assert [5, 3, 9] == client.cms().query("A", "foo", "bar", "baz")
+- assert [2, 3, 1] == client.cms().query("B", "foo", "bar", "baz")
+- assert client.cms().merge("C", 2, ["A", "B"])
+- assert [7, 6, 10] == client.cms().query("C", "foo", "bar", "baz")
+- assert client.cms().merge("C", 2, ["A", "B"], ["1", "2"])
+- assert [9, 9, 11] == client.cms().query("C", "foo", "bar", "baz")
+- assert client.cms().merge("C", 2, ["A", "B"], ["2", "3"])
+- assert [16, 15, 21] == client.cms().query("C", "foo", "bar", "baz")
+-
+-
+-# endregion
+-
+-
+-# region Test Top-K
+-@pytest.mark.redismod
+-def test_topk(client):
+- # test list with empty buckets
+- assert client.topk().reserve("topk", 3, 50, 4, 0.9)
+- assert [
+- None,
+- None,
+- None,
+- "A",
+- "C",
+- "D",
+- None,
+- None,
+- "E",
+- None,
+- "B",
+- "C",
+- None,
+- None,
+- None,
+- "D",
+- None,
+- ] == client.topk().add(
+- "topk",
+- "A",
+- "B",
+- "C",
+- "D",
+- "E",
+- "A",
+- "A",
+- "B",
+- "C",
+- "G",
+- "D",
+- "B",
+- "D",
+- "A",
+- "E",
+- "E",
+- 1,
+- )
+- assert [1, 1, 0, 0, 1, 0, 0] == client.topk().query(
+- "topk", "A", "B", "C", "D", "E", "F", "G"
+- )
+- assert [4, 3, 2, 3, 3, 0, 1] == client.topk().count(
+- "topk", "A", "B", "C", "D", "E", "F", "G"
+- )
+-
+- # test full list
+- assert client.topk().reserve("topklist", 3, 50, 3, 0.9)
+- assert client.topk().add(
+- "topklist",
+- "A",
+- "B",
+- "C",
+- "D",
+- "E",
+- "A",
+- "A",
+- "B",
+- "C",
+- "G",
+- "D",
+- "B",
+- "D",
+- "A",
+- "E",
+- "E",
+- )
+- assert ["A", "B", "E"] == client.topk().list("topklist")
+- assert ["A", 4, "B", 3, "E", 3] == client.topk().list("topklist", withcount=True)
+- info = client.topk().info("topklist")
+- assert 3 == info.k
+- assert 50 == info.width
+- assert 3 == info.depth
+- assert 0.9 == round(float(info.decay), 1)
+-
+-
+-@pytest.mark.redismod
+-def test_topk_incrby(client):
+- client.flushdb()
+- assert client.topk().reserve("topk", 3, 10, 3, 1)
+- assert [None, None, None] == client.topk().incrby(
+- "topk", ["bar", "baz", "42"], [3, 6, 2]
+- )
+- assert [None, "bar"] == client.topk().incrby("topk", ["42", "xyzzy"], [8, 4])
+- assert [3, 6, 10, 4, 0] == client.topk().count(
+- "topk", "bar", "baz", "42", "xyzzy", 4
+- )
+-
+-
+-# region Test T-Digest
+-@pytest.mark.redismod
+-def test_tdigest_reset(client):
+- assert client.tdigest().create("tDigest", 10)
+- # reset on empty histogram
+- assert client.tdigest().reset("tDigest")
+- # insert data-points into sketch
+- assert client.tdigest().add("tDigest", list(range(10)), [1.0] * 10)
+-
+- assert client.tdigest().reset("tDigest")
+- # assert we have 0 unmerged nodes
+- assert 0 == client.tdigest().info("tDigest").unmergedNodes
+-
+-
+-@pytest.mark.redismod
+-def test_tdigest_merge(client):
+- assert client.tdigest().create("to-tDigest", 10)
+- assert client.tdigest().create("from-tDigest", 10)
+- # insert data-points into sketch
+- assert client.tdigest().add("from-tDigest", [1.0] * 10, [1.0] * 10)
+- assert client.tdigest().add("to-tDigest", [2.0] * 10, [10.0] * 10)
+- # merge from-tdigest into to-tdigest
+- assert client.tdigest().merge("to-tDigest", "from-tDigest")
+- # we should now have 110 weight on to-histogram
+- info = client.tdigest().info("to-tDigest")
+- total_weight_to = float(info.mergedWeight) + float(info.unmergedWeight)
+- assert 110 == total_weight_to
+-
+-
+-@pytest.mark.redismod
+-def test_tdigest_min_and_max(client):
+- assert client.tdigest().create("tDigest", 100)
+- # insert data-points into sketch
+- assert client.tdigest().add("tDigest", [1, 2, 3], [1.0] * 3)
+- # min/max
+- assert 3 == client.tdigest().max("tDigest")
+- assert 1 == client.tdigest().min("tDigest")
+-
+-
+-@pytest.mark.redismod
+-def test_tdigest_quantile(client):
+- assert client.tdigest().create("tDigest", 500)
+- # insert data-points into sketch
+- assert client.tdigest().add(
+- "tDigest", list([x * 0.01 for x in range(1, 10000)]), [1.0] * 10000
+- )
+- # assert min min/max have same result as quantile 0 and 1
+- assert client.tdigest().max("tDigest") == client.tdigest().quantile("tDigest", 1.0)
+- assert client.tdigest().min("tDigest") == client.tdigest().quantile("tDigest", 0.0)
+-
+- assert 1.0 == round(client.tdigest().quantile("tDigest", 0.01), 2)
+- assert 99.0 == round(client.tdigest().quantile("tDigest", 0.99), 2)
+-
+-
+-@pytest.mark.redismod
+-def test_tdigest_cdf(client):
+- assert client.tdigest().create("tDigest", 100)
+- # insert data-points into sketch
+- assert client.tdigest().add("tDigest", list(range(1, 10)), [1.0] * 10)
+- assert 0.1 == round(client.tdigest().cdf("tDigest", 1.0), 1)
+- assert 0.9 == round(client.tdigest().cdf("tDigest", 9.0), 1)
+-
+-
+-# @pytest.mark.redismod
+-# def test_pipeline(client):
+-# pipeline = client.bf().pipeline()
+-# assert not client.bf().execute_command("get pipeline")
+-#
+-# assert client.bf().create("pipeline", 0.01, 1000)
+-# for i in range(100):
+-# pipeline.add("pipeline", i)
+-# for i in range(100):
+-# assert not (client.bf().exists("pipeline", i))
+-#
+-# pipeline.execute()
+-#
+-# for i in range(100):
+-# assert client.bf().exists("pipeline", i)
+diff --git a/tests/test_cluster.py b/tests/test_cluster.py
+deleted file mode 100644
+index 496ed98..0000000
+--- a/tests/test_cluster.py
++++ /dev/null
+@@ -1,2664 +0,0 @@
+-import binascii
+-import datetime
+-import warnings
+-from time import sleep
+-from unittest.mock import DEFAULT, Mock, call, patch
+-
+-import pytest
+-
+-from redis import Redis
+-from redis.cluster import (
+- PRIMARY,
+- REDIS_CLUSTER_HASH_SLOTS,
+- REPLICA,
+- ClusterNode,
+- NodesManager,
+- RedisCluster,
+- get_node_name,
+-)
+-from redis.commands import CommandsParser
+-from redis.connection import Connection
+-from redis.crc import key_slot
+-from redis.exceptions import (
+- AskError,
+- ClusterDownError,
+- ConnectionError,
+- DataError,
+- MovedError,
+- NoPermissionError,
+- RedisClusterException,
+- RedisError,
+-)
+-from redis.utils import str_if_bytes
+-from tests.test_pubsub import wait_for_message
+-
+-from .conftest import (
+- _get_client,
+- skip_if_redis_enterprise,
+- skip_if_server_version_lt,
+- skip_unless_arch_bits,
+- wait_for_command,
+-)
+-
+-default_host = "127.0.0.1"
+-default_port = 7000
+-default_cluster_slots = [
+- [
+- 0,
+- 8191,
+- ["127.0.0.1", 7000, "node_0"],
+- ["127.0.0.1", 7003, "node_3"],
+- ],
+- [8192, 16383, ["127.0.0.1", 7001, "node_1"], ["127.0.0.1", 7002, "node_2"]],
+-]
+-
+-
+-@pytest.fixture()
+-def slowlog(request, r):
+- """
+- Set the slowlog threshold to 0, and the
+- max length to 128. This will force every
+- command into the slowlog and allow us
+- to test it
+- """
+- # Save old values
+- current_config = r.config_get(target_nodes=r.get_primaries()[0])
+- old_slower_than_value = current_config["slowlog-log-slower-than"]
+- old_max_legnth_value = current_config["slowlog-max-len"]
+-
+- # Function to restore the old values
+- def cleanup():
+- r.config_set("slowlog-log-slower-than", old_slower_than_value)
+- r.config_set("slowlog-max-len", old_max_legnth_value)
+-
+- request.addfinalizer(cleanup)
+-
+- # Set the new values
+- r.config_set("slowlog-log-slower-than", 0)
+- r.config_set("slowlog-max-len", 128)
+-
+-
+-def get_mocked_redis_client(func=None, *args, **kwargs):
+- """
+- Return a stable RedisCluster object that have deterministic
+- nodes and slots setup to remove the problem of different IP addresses
+- on different installations and machines.
+- """
+- cluster_slots = kwargs.pop("cluster_slots", default_cluster_slots)
+- coverage_res = kwargs.pop("coverage_result", "yes")
+- cluster_enabled = kwargs.pop("cluster_enabled", True)
+- with patch.object(Redis, "execute_command") as execute_command_mock:
+-
+- def execute_command(*_args, **_kwargs):
+- if _args[0] == "CLUSTER SLOTS":
+- mock_cluster_slots = cluster_slots
+- return mock_cluster_slots
+- elif _args[0] == "COMMAND":
+- return {"get": [], "set": []}
+- elif _args[0] == "INFO":
+- return {"cluster_enabled": cluster_enabled}
+- elif len(_args) > 1 and _args[1] == "cluster-require-full-coverage":
+- return {"cluster-require-full-coverage": coverage_res}
+- elif func is not None:
+- return func(*args, **kwargs)
+- else:
+- return execute_command_mock(*_args, **_kwargs)
+-
+- execute_command_mock.side_effect = execute_command
+-
+- with patch.object(
+- CommandsParser, "initialize", autospec=True
+- ) as cmd_parser_initialize:
+-
+- def cmd_init_mock(self, r):
+- self.commands = {
+- "get": {
+- "name": "get",
+- "arity": 2,
+- "flags": ["readonly", "fast"],
+- "first_key_pos": 1,
+- "last_key_pos": 1,
+- "step_count": 1,
+- }
+- }
+-
+- cmd_parser_initialize.side_effect = cmd_init_mock
+-
+- return RedisCluster(*args, **kwargs)
+-
+-
+-def mock_node_resp(node, response):
+- connection = Mock()
+- connection.read_response.return_value = response
+- node.redis_connection.connection = connection
+- return node
+-
+-
+-def mock_node_resp_func(node, func):
+- connection = Mock()
+- connection.read_response.side_effect = func
+- node.redis_connection.connection = connection
+- return node
+-
+-
+-def mock_all_nodes_resp(rc, response):
+- for node in rc.get_nodes():
+- mock_node_resp(node, response)
+- return rc
+-
+-
+-def find_node_ip_based_on_port(cluster_client, port):
+- for node in cluster_client.get_nodes():
+- if node.port == port:
+- return node.host
+-
+-
+-def moved_redirection_helper(request, failover=False):
+- """
+- Test that the client handles MOVED response after a failover.
+- Redirection after a failover means that the redirection address is of a
+- replica that was promoted to a primary.
+-
+- At first call it should return a MOVED ResponseError that will point
+- the client to the next server it should talk to.
+-
+- Verify that:
+- 1. it tries to talk to the redirected node
+- 2. it updates the slot's primary to the redirected node
+-
+- For a failover, also verify:
+- 3. the redirected node's server type updated to 'primary'
+- 4. the server type of the previous slot owner updated to 'replica'
+- """
+- rc = _get_client(RedisCluster, request, flushdb=False)
+- slot = 12182
+- redirect_node = None
+- # Get the current primary that holds this slot
+- prev_primary = rc.nodes_manager.get_node_from_slot(slot)
+- if failover:
+- if len(rc.nodes_manager.slots_cache[slot]) < 2:
+- warnings.warn("Skipping this test since it requires to have a " "replica")
+- return
+- redirect_node = rc.nodes_manager.slots_cache[slot][1]
+- else:
+- # Use one of the primaries to be the redirected node
+- redirect_node = rc.get_primaries()[0]
+- r_host = redirect_node.host
+- r_port = redirect_node.port
+- with patch.object(Redis, "parse_response") as parse_response:
+-
+- def moved_redirect_effect(connection, *args, **options):
+- def ok_response(connection, *args, **options):
+- assert connection.host == r_host
+- assert connection.port == r_port
+-
+- return "MOCK_OK"
+-
+- parse_response.side_effect = ok_response
+- raise MovedError(f"{slot} {r_host}:{r_port}")
+-
+- parse_response.side_effect = moved_redirect_effect
+- assert rc.execute_command("SET", "foo", "bar") == "MOCK_OK"
+- slot_primary = rc.nodes_manager.slots_cache[slot][0]
+- assert slot_primary == redirect_node
+- if failover:
+- assert rc.get_node(host=r_host, port=r_port).server_type == PRIMARY
+- assert prev_primary.server_type == REPLICA
+-
+-
+-@pytest.mark.onlycluster
+-class TestRedisClusterObj:
+- """
+- Tests for the RedisCluster class
+- """
+-
+- def test_host_port_startup_node(self):
+- """
+- Test that it is possible to use host & port arguments as startup node
+- args
+- """
+- cluster = get_mocked_redis_client(host=default_host, port=default_port)
+- assert cluster.get_node(host=default_host, port=default_port) is not None
+-
+- def test_startup_nodes(self):
+- """
+- Test that it is possible to use startup_nodes
+- argument to init the cluster
+- """
+- port_1 = 7000
+- port_2 = 7001
+- startup_nodes = [
+- ClusterNode(default_host, port_1),
+- ClusterNode(default_host, port_2),
+- ]
+- cluster = get_mocked_redis_client(startup_nodes=startup_nodes)
+- assert (
+- cluster.get_node(host=default_host, port=port_1) is not None
+- and cluster.get_node(host=default_host, port=port_2) is not None
+- )
+-
+- def test_empty_startup_nodes(self):
+- """
+- Test that exception is raised when empty providing empty startup_nodes
+- """
+- with pytest.raises(RedisClusterException) as ex:
+- RedisCluster(startup_nodes=[])
+-
+- assert str(ex.value).startswith(
+- "RedisCluster requires at least one node to discover the " "cluster"
+- ), str_if_bytes(ex.value)
+-
+- def test_from_url(self, r):
+- redis_url = f"redis://{default_host}:{default_port}/0"
+- with patch.object(RedisCluster, "from_url") as from_url:
+-
+- def from_url_mocked(_url, **_kwargs):
+- return get_mocked_redis_client(url=_url, **_kwargs)
+-
+- from_url.side_effect = from_url_mocked
+- cluster = RedisCluster.from_url(redis_url)
+- assert cluster.get_node(host=default_host, port=default_port) is not None
+-
+- def test_execute_command_errors(self, r):
+- """
+- Test that if no key is provided then exception should be raised.
+- """
+- with pytest.raises(RedisClusterException) as ex:
+- r.execute_command("GET")
+- assert str(ex.value).startswith(
+- "No way to dispatch this command to " "Redis Cluster. Missing key."
+- )
+-
+- def test_execute_command_node_flag_primaries(self, r):
+- """
+- Test command execution with nodes flag PRIMARIES
+- """
+- primaries = r.get_primaries()
+- replicas = r.get_replicas()
+- mock_all_nodes_resp(r, "PONG")
+- assert r.ping(target_nodes=RedisCluster.PRIMARIES) is True
+- for primary in primaries:
+- conn = primary.redis_connection.connection
+- assert conn.read_response.called is True
+- for replica in replicas:
+- conn = replica.redis_connection.connection
+- assert conn.read_response.called is not True
+-
+- def test_execute_command_node_flag_replicas(self, r):
+- """
+- Test command execution with nodes flag REPLICAS
+- """
+- replicas = r.get_replicas()
+- if not replicas:
+- r = get_mocked_redis_client(default_host, default_port)
+- primaries = r.get_primaries()
+- mock_all_nodes_resp(r, "PONG")
+- assert r.ping(target_nodes=RedisCluster.REPLICAS) is True
+- for replica in replicas:
+- conn = replica.redis_connection.connection
+- assert conn.read_response.called is True
+- for primary in primaries:
+- conn = primary.redis_connection.connection
+- assert conn.read_response.called is not True
+-
+- def test_execute_command_node_flag_all_nodes(self, r):
+- """
+- Test command execution with nodes flag ALL_NODES
+- """
+- mock_all_nodes_resp(r, "PONG")
+- assert r.ping(target_nodes=RedisCluster.ALL_NODES) is True
+- for node in r.get_nodes():
+- conn = node.redis_connection.connection
+- assert conn.read_response.called is True
+-
+- def test_execute_command_node_flag_random(self, r):
+- """
+- Test command execution with nodes flag RANDOM
+- """
+- mock_all_nodes_resp(r, "PONG")
+- assert r.ping(target_nodes=RedisCluster.RANDOM) is True
+- called_count = 0
+- for node in r.get_nodes():
+- conn = node.redis_connection.connection
+- if conn.read_response.called is True:
+- called_count += 1
+- assert called_count == 1
+-
+- def test_execute_command_default_node(self, r):
+- """
+- Test command execution without node flag is being executed on the
+- default node
+- """
+- def_node = r.get_default_node()
+- mock_node_resp(def_node, "PONG")
+- assert r.ping() is True
+- conn = def_node.redis_connection.connection
+- assert conn.read_response.called
+-
+- def test_ask_redirection(self, r):
+- """
+- Test that the server handles ASK response.
+-
+- At first call it should return a ASK ResponseError that will point
+- the client to the next server it should talk to.
+-
+- Important thing to verify is that it tries to talk to the second node.
+- """
+- redirect_node = r.get_nodes()[0]
+- with patch.object(Redis, "parse_response") as parse_response:
+-
+- def ask_redirect_effect(connection, *args, **options):
+- def ok_response(connection, *args, **options):
+- assert connection.host == redirect_node.host
+- assert connection.port == redirect_node.port
+-
+- return "MOCK_OK"
+-
+- parse_response.side_effect = ok_response
+- raise AskError(f"12182 {redirect_node.host}:{redirect_node.port}")
+-
+- parse_response.side_effect = ask_redirect_effect
+-
+- assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
+-
+- def test_moved_redirection(self, request):
+- """
+- Test that the client handles MOVED response.
+- """
+- moved_redirection_helper(request, failover=False)
+-
+- def test_moved_redirection_after_failover(self, request):
+- """
+- Test that the client handles MOVED response after a failover.
+- """
+- moved_redirection_helper(request, failover=True)
+-
+- def test_refresh_using_specific_nodes(self, request):
+- """
+- Test making calls on specific nodes when the cluster has failed over to
+- another node
+- """
+- node_7006 = ClusterNode(host=default_host, port=7006, server_type=PRIMARY)
+- node_7007 = ClusterNode(host=default_host, port=7007, server_type=PRIMARY)
+- with patch.object(Redis, "parse_response") as parse_response:
+- with patch.object(NodesManager, "initialize", autospec=True) as initialize:
+- with patch.multiple(
+- Connection, send_command=DEFAULT, connect=DEFAULT, can_read=DEFAULT
+- ) as mocks:
+- # simulate 7006 as a failed node
+- def parse_response_mock(connection, command_name, **options):
+- if connection.port == 7006:
+- parse_response.failed_calls += 1
+- raise ClusterDownError(
+- "CLUSTERDOWN The cluster is "
+- "down. Use CLUSTER INFO for "
+- "more information"
+- )
+- elif connection.port == 7007:
+- parse_response.successful_calls += 1
+-
+- def initialize_mock(self):
+- # start with all slots mapped to 7006
+- self.nodes_cache = {node_7006.name: node_7006}
+- self.default_node = node_7006
+- self.slots_cache = {}
+-
+- for i in range(0, 16383):
+- self.slots_cache[i] = [node_7006]
+-
+- # After the first connection fails, a reinitialize
+- # should follow the cluster to 7007
+- def map_7007(self):
+- self.nodes_cache = {node_7007.name: node_7007}
+- self.default_node = node_7007
+- self.slots_cache = {}
+-
+- for i in range(0, 16383):
+- self.slots_cache[i] = [node_7007]
+-
+- # Change initialize side effect for the second call
+- initialize.side_effect = map_7007
+-
+- parse_response.side_effect = parse_response_mock
+- parse_response.successful_calls = 0
+- parse_response.failed_calls = 0
+- initialize.side_effect = initialize_mock
+- mocks["can_read"].return_value = False
+- mocks["send_command"].return_value = "MOCK_OK"
+- mocks["connect"].return_value = None
+- with patch.object(
+- CommandsParser, "initialize", autospec=True
+- ) as cmd_parser_initialize:
+-
+- def cmd_init_mock(self, r):
+- self.commands = {
+- "get": {
+- "name": "get",
+- "arity": 2,
+- "flags": ["readonly", "fast"],
+- "first_key_pos": 1,
+- "last_key_pos": 1,
+- "step_count": 1,
+- }
+- }
+-
+- cmd_parser_initialize.side_effect = cmd_init_mock
+-
+- rc = _get_client(RedisCluster, request, flushdb=False)
+- assert len(rc.get_nodes()) == 1
+- assert rc.get_node(node_name=node_7006.name) is not None
+-
+- rc.get("foo")
+-
+- # Cluster should now point to 7007, and there should be
+- # one failed and one successful call
+- assert len(rc.get_nodes()) == 1
+- assert rc.get_node(node_name=node_7007.name) is not None
+- assert rc.get_node(node_name=node_7006.name) is None
+- assert parse_response.failed_calls == 1
+- assert parse_response.successful_calls == 1
+-
+- def test_reading_from_replicas_in_round_robin(self):
+- with patch.multiple(
+- Connection,
+- send_command=DEFAULT,
+- read_response=DEFAULT,
+- _connect=DEFAULT,
+- can_read=DEFAULT,
+- on_connect=DEFAULT,
+- ) as mocks:
+- with patch.object(Redis, "parse_response") as parse_response:
+-
+- def parse_response_mock_first(connection, *args, **options):
+- # Primary
+- assert connection.port == 7001
+- parse_response.side_effect = parse_response_mock_second
+- return "MOCK_OK"
+-
+- def parse_response_mock_second(connection, *args, **options):
+- # Replica
+- assert connection.port == 7002
+- parse_response.side_effect = parse_response_mock_third
+- return "MOCK_OK"
+-
+- def parse_response_mock_third(connection, *args, **options):
+- # Primary
+- assert connection.port == 7001
+- return "MOCK_OK"
+-
+- # We don't need to create a real cluster connection but we
+- # do want RedisCluster.on_connect function to get called,
+- # so we'll mock some of the Connection's functions to allow it
+- parse_response.side_effect = parse_response_mock_first
+- mocks["send_command"].return_value = True
+- mocks["read_response"].return_value = "OK"
+- mocks["_connect"].return_value = True
+- mocks["can_read"].return_value = False
+- mocks["on_connect"].return_value = True
+-
+- # Create a cluster with reading from replications
+- read_cluster = get_mocked_redis_client(
+- host=default_host, port=default_port, read_from_replicas=True
+- )
+- assert read_cluster.read_from_replicas is True
+- # Check that we read from the slot's nodes in a round robin
+- # matter.
+- # 'foo' belongs to slot 12182 and the slot's nodes are:
+- # [(127.0.0.1,7001,primary), (127.0.0.1,7002,replica)]
+- read_cluster.get("foo")
+- read_cluster.get("foo")
+- read_cluster.get("foo")
+- mocks["send_command"].assert_has_calls([call("READONLY")])
+-
+- def test_keyslot(self, r):
+- """
+- Test that method will compute correct key in all supported cases
+- """
+- assert r.keyslot("foo") == 12182
+- assert r.keyslot("{foo}bar") == 12182
+- assert r.keyslot("{foo}") == 12182
+- assert r.keyslot(1337) == 4314
+-
+- assert r.keyslot(125) == r.keyslot(b"125")
+- assert r.keyslot(125) == r.keyslot("\x31\x32\x35")
+- assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+- assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+- assert r.keyslot(1337.1234) == r.keyslot("1337.1234")
+- assert r.keyslot(1337) == r.keyslot("1337")
+- assert r.keyslot(b"abc") == r.keyslot("abc")
+-
+- def test_get_node_name(self):
+- assert (
+- get_node_name(default_host, default_port)
+- == f"{default_host}:{default_port}"
+- )
+-
+- def test_all_nodes(self, r):
+- """
+- Set a list of nodes and it should be possible to iterate over all
+- """
+- nodes = [node for node in r.nodes_manager.nodes_cache.values()]
+-
+- for i, node in enumerate(r.get_nodes()):
+- assert node in nodes
+-
+- def test_all_nodes_masters(self, r):
+- """
+- Set a list of nodes with random primaries/replicas config and it shold
+- be possible to iterate over all of them.
+- """
+- nodes = [
+- node
+- for node in r.nodes_manager.nodes_cache.values()
+- if node.server_type == PRIMARY
+- ]
+-
+- for node in r.get_primaries():
+- assert node in nodes
+-
+- @pytest.mark.parametrize("error", RedisCluster.ERRORS_ALLOW_RETRY)
+- def test_cluster_down_overreaches_retry_attempts(self, error):
+- """
+- When error that allows retry is thrown, test that we retry executing
+- the command as many times as configured in cluster_error_retry_attempts
+- and then raise the exception
+- """
+- with patch.object(RedisCluster, "_execute_command") as execute_command:
+-
+- def raise_error(target_node, *args, **kwargs):
+- execute_command.failed_calls += 1
+- raise error("mocked error")
+-
+- execute_command.side_effect = raise_error
+-
+- rc = get_mocked_redis_client(host=default_host, port=default_port)
+-
+- with pytest.raises(error):
+- rc.get("bar")
+- assert execute_command.failed_calls == rc.cluster_error_retry_attempts
+-
+- def test_user_on_connect_function(self, request):
+- """
+- Test support in passing on_connect function by the user
+- """
+-
+- def on_connect(connection):
+- assert connection is not None
+-
+- mock = Mock(side_effect=on_connect)
+-
+- _get_client(RedisCluster, request, redis_connect_func=mock)
+- assert mock.called is True
+-
+- def test_set_default_node_success(self, r):
+- """
+- test successful replacement of the default cluster node
+- """
+- default_node = r.get_default_node()
+- # get a different node
+- new_def_node = None
+- for node in r.get_nodes():
+- if node != default_node:
+- new_def_node = node
+- break
+- assert r.set_default_node(new_def_node) is True
+- assert r.get_default_node() == new_def_node
+-
+- def test_set_default_node_failure(self, r):
+- """
+- test failed replacement of the default cluster node
+- """
+- default_node = r.get_default_node()
+- new_def_node = ClusterNode("1.1.1.1", 1111)
+- assert r.set_default_node(None) is False
+- assert r.set_default_node(new_def_node) is False
+- assert r.get_default_node() == default_node
+-
+- def test_get_node_from_key(self, r):
+- """
+- Test that get_node_from_key function returns the correct node
+- """
+- key = "bar"
+- slot = r.keyslot(key)
+- slot_nodes = r.nodes_manager.slots_cache.get(slot)
+- primary = slot_nodes[0]
+- assert r.get_node_from_key(key, replica=False) == primary
+- replica = r.get_node_from_key(key, replica=True)
+- if replica is not None:
+- assert replica.server_type == REPLICA
+- assert replica in slot_nodes
+-
+-
+-@pytest.mark.onlycluster
+-class TestClusterRedisCommands:
+- """
+- Tests for RedisCluster unique commands
+- """
+-
+- def test_case_insensitive_command_names(self, r):
+- assert (
+- r.cluster_response_callbacks["cluster addslots"]
+- == r.cluster_response_callbacks["CLUSTER ADDSLOTS"]
+- )
+-
+- def test_get_and_set(self, r):
+- # get and set can't be tested independently of each other
+- assert r.get("a") is None
+- byte_string = b"value"
+- integer = 5
+- unicode_string = chr(3456) + "abcd" + chr(3421)
+- assert r.set("byte_string", byte_string)
+- assert r.set("integer", 5)
+- assert r.set("unicode_string", unicode_string)
+- assert r.get("byte_string") == byte_string
+- assert r.get("integer") == str(integer).encode()
+- assert r.get("unicode_string").decode("utf-8") == unicode_string
+-
+- def test_mget_nonatomic(self, r):
+- assert r.mget_nonatomic([]) == []
+- assert r.mget_nonatomic(["a", "b"]) == [None, None]
+- r["a"] = "1"
+- r["b"] = "2"
+- r["c"] = "3"
+-
+- assert r.mget_nonatomic("a", "other", "b", "c") == [b"1", None, b"2", b"3"]
+-
+- def test_mset_nonatomic(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- assert r.mset_nonatomic(d)
+- for k, v in d.items():
+- assert r[k] == v
+-
+- def test_config_set(self, r):
+- assert r.config_set("slowlog-log-slower-than", 0)
+-
+- def test_cluster_config_resetstat(self, r):
+- r.ping(target_nodes="all")
+- all_info = r.info(target_nodes="all")
+- prior_commands_processed = -1
+- for node_info in all_info.values():
+- prior_commands_processed = node_info["total_commands_processed"]
+- assert prior_commands_processed >= 1
+- r.config_resetstat(target_nodes="all")
+- all_info = r.info(target_nodes="all")
+- for node_info in all_info.values():
+- reset_commands_processed = node_info["total_commands_processed"]
+- assert reset_commands_processed < prior_commands_processed
+-
+- def test_client_setname(self, r):
+- node = r.get_random_node()
+- r.client_setname("redis_py_test", target_nodes=node)
+- client_name = r.client_getname(target_nodes=node)
+- assert client_name == "redis_py_test"
+-
+- def test_exists(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- r.mset_nonatomic(d)
+- assert r.exists(*d.keys()) == len(d)
+-
+- def test_delete(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- r.mset_nonatomic(d)
+- assert r.delete(*d.keys()) == len(d)
+- assert r.delete(*d.keys()) == 0
+-
+- def test_touch(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- r.mset_nonatomic(d)
+- assert r.touch(*d.keys()) == len(d)
+-
+- def test_unlink(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- r.mset_nonatomic(d)
+- assert r.unlink(*d.keys()) == len(d)
+- # Unlink is non-blocking so we sleep before
+- # verifying the deletion
+- sleep(0.1)
+- assert r.unlink(*d.keys()) == 0
+-
+- def test_pubsub_channels_merge_results(self, r):
+- nodes = r.get_nodes()
+- channels = []
+- pubsub_nodes = []
+- i = 0
+- for node in nodes:
+- channel = f"foo{i}"
+- # We will create different pubsub clients where each one is
+- # connected to a different node
+- p = r.pubsub(node)
+- pubsub_nodes.append(p)
+- p.subscribe(channel)
+- b_channel = channel.encode("utf-8")
+- channels.append(b_channel)
+- # Assert that each node returns only the channel it subscribed to
+- sub_channels = node.redis_connection.pubsub_channels()
+- if not sub_channels:
+- # Try again after a short sleep
+- sleep(0.3)
+- sub_channels = node.redis_connection.pubsub_channels()
+- assert sub_channels == [b_channel]
+- i += 1
+- # Assert that the cluster's pubsub_channels function returns ALL of
+- # the cluster's channels
+- result = r.pubsub_channels(target_nodes="all")
+- result.sort()
+- assert result == channels
+-
+- def test_pubsub_numsub_merge_results(self, r):
+- nodes = r.get_nodes()
+- pubsub_nodes = []
+- channel = "foo"
+- b_channel = channel.encode("utf-8")
+- for node in nodes:
+- # We will create different pubsub clients where each one is
+- # connected to a different node
+- p = r.pubsub(node)
+- pubsub_nodes.append(p)
+- p.subscribe(channel)
+- # Assert that each node returns that only one client is subscribed
+- sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+- if sub_chann_num == [(b_channel, 0)]:
+- sleep(0.3)
+- sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+- assert sub_chann_num == [(b_channel, 1)]
+- # Assert that the cluster's pubsub_numsub function returns ALL clients
+- # subscribed to this channel in the entire cluster
+- assert r.pubsub_numsub(channel, target_nodes="all") == [(b_channel, len(nodes))]
+-
+- def test_pubsub_numpat_merge_results(self, r):
+- nodes = r.get_nodes()
+- pubsub_nodes = []
+- pattern = "foo*"
+- for node in nodes:
+- # We will create different pubsub clients where each one is
+- # connected to a different node
+- p = r.pubsub(node)
+- pubsub_nodes.append(p)
+- p.psubscribe(pattern)
+- # Assert that each node returns that only one client is subscribed
+- sub_num_pat = node.redis_connection.pubsub_numpat()
+- if sub_num_pat == 0:
+- sleep(0.3)
+- sub_num_pat = node.redis_connection.pubsub_numpat()
+- assert sub_num_pat == 1
+- # Assert that the cluster's pubsub_numsub function returns ALL clients
+- # subscribed to this channel in the entire cluster
+- assert r.pubsub_numpat(target_nodes="all") == len(nodes)
+-
+- @skip_if_server_version_lt("2.8.0")
+- def test_cluster_pubsub_channels(self, r):
+- p = r.pubsub()
+- p.subscribe("foo", "bar", "baz", "quux")
+- for i in range(4):
+- assert wait_for_message(p, timeout=0.5)["type"] == "subscribe"
+- expected = [b"bar", b"baz", b"foo", b"quux"]
+- assert all(
+- [channel in r.pubsub_channels(target_nodes="all") for channel in expected]
+- )
+-
+- @skip_if_server_version_lt("2.8.0")
+- def test_cluster_pubsub_numsub(self, r):
+- p1 = r.pubsub()
+- p1.subscribe("foo", "bar", "baz")
+- for i in range(3):
+- assert wait_for_message(p1, timeout=0.5)["type"] == "subscribe"
+- p2 = r.pubsub()
+- p2.subscribe("bar", "baz")
+- for i in range(2):
+- assert wait_for_message(p2, timeout=0.5)["type"] == "subscribe"
+- p3 = r.pubsub()
+- p3.subscribe("baz")
+- assert wait_for_message(p3, timeout=0.5)["type"] == "subscribe"
+-
+- channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)]
+- assert r.pubsub_numsub("foo", "bar", "baz", target_nodes="all") == channels
+-
+- def test_cluster_slots(self, r):
+- mock_all_nodes_resp(r, default_cluster_slots)
+- cluster_slots = r.cluster_slots()
+- assert isinstance(cluster_slots, dict)
+- assert len(default_cluster_slots) == len(cluster_slots)
+- assert cluster_slots.get((0, 8191)) is not None
+- assert cluster_slots.get((0, 8191)).get("primary") == ("127.0.0.1", 7000)
+-
+- def test_cluster_addslots(self, r):
+- node = r.get_random_node()
+- mock_node_resp(node, "OK")
+- assert r.cluster_addslots(node, 1, 2, 3) is True
+-
+- def test_cluster_countkeysinslot(self, r):
+- node = r.nodes_manager.get_node_from_slot(1)
+- mock_node_resp(node, 2)
+- assert r.cluster_countkeysinslot(1) == 2
+-
+- def test_cluster_count_failure_report(self, r):
+- mock_all_nodes_resp(r, 0)
+- assert r.cluster_count_failure_report("node_0") == 0
+-
+- def test_cluster_delslots(self):
+- cluster_slots = [
+- [
+- 0,
+- 8191,
+- ["127.0.0.1", 7000, "node_0"],
+- ],
+- [
+- 8192,
+- 16383,
+- ["127.0.0.1", 7001, "node_1"],
+- ],
+- ]
+- r = get_mocked_redis_client(
+- host=default_host, port=default_port, cluster_slots=cluster_slots
+- )
+- mock_all_nodes_resp(r, "OK")
+- node0 = r.get_node(default_host, 7000)
+- node1 = r.get_node(default_host, 7001)
+- assert r.cluster_delslots(0, 8192) == [True, True]
+- assert node0.redis_connection.connection.read_response.called
+- assert node1.redis_connection.connection.read_response.called
+-
+- def test_cluster_failover(self, r):
+- node = r.get_random_node()
+- mock_node_resp(node, "OK")
+- assert r.cluster_failover(node) is True
+- assert r.cluster_failover(node, "FORCE") is True
+- assert r.cluster_failover(node, "TAKEOVER") is True
+- with pytest.raises(RedisError):
+- r.cluster_failover(node, "FORCT")
+-
+- def test_cluster_info(self, r):
+- info = r.cluster_info()
+- assert isinstance(info, dict)
+- assert info["cluster_state"] == "ok"
+-
+- def test_cluster_keyslot(self, r):
+- mock_all_nodes_resp(r, 12182)
+- assert r.cluster_keyslot("foo") == 12182
+-
+- def test_cluster_meet(self, r):
+- node = r.get_default_node()
+- mock_node_resp(node, "OK")
+- assert r.cluster_meet("127.0.0.1", 6379) is True
+-
+- def test_cluster_nodes(self, r):
+- response = (
+- "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 "
+- "slave aa90da731f673a99617dfe930306549a09f83a6b 0 "
+- "1447836263059 5 connected\n"
+- "9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 "
+- "master - 0 1447836264065 0 connected\n"
+- "aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 "
+- "myself,master - 0 0 2 connected 5461-10922\n"
+- "1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
+- "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
+- "1447836262556 3 connected\n"
+- "4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 "
+- "master - 0 1447836262555 7 connected 0-5460\n"
+- "19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 "
+- "master - 0 1447836263562 3 connected 10923-16383\n"
+- "fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 "
+- "master,fail - 1447829446956 1447829444948 1 disconnected\n"
+- )
+- mock_all_nodes_resp(r, response)
+- nodes = r.cluster_nodes()
+- assert len(nodes) == 7
+- assert nodes.get("172.17.0.7:7006") is not None
+- assert (
+- nodes.get("172.17.0.7:7006").get("node_id")
+- == "c8253bae761cb1ecb2b61857d85dfe455a0fec8b"
+- )
+-
+- def test_cluster_replicate(self, r):
+- node = r.get_random_node()
+- all_replicas = r.get_replicas()
+- mock_all_nodes_resp(r, "OK")
+- assert r.cluster_replicate(node, "c8253bae761cb61857d") is True
+- results = r.cluster_replicate(all_replicas, "c8253bae761cb61857d")
+- if isinstance(results, dict):
+- for res in results.values():
+- assert res is True
+- else:
+- assert results is True
+-
+- def test_cluster_reset(self, r):
+- mock_all_nodes_resp(r, "OK")
+- assert r.cluster_reset() is True
+- assert r.cluster_reset(False) is True
+- all_results = r.cluster_reset(False, target_nodes="all")
+- for res in all_results.values():
+- assert res is True
+-
+- def test_cluster_save_config(self, r):
+- node = r.get_random_node()
+- all_nodes = r.get_nodes()
+- mock_all_nodes_resp(r, "OK")
+- assert r.cluster_save_config(node) is True
+- all_results = r.cluster_save_config(all_nodes)
+- for res in all_results.values():
+- assert res is True
+-
+- def test_cluster_get_keys_in_slot(self, r):
+- response = [b"{foo}1", b"{foo}2"]
+- node = r.nodes_manager.get_node_from_slot(12182)
+- mock_node_resp(node, response)
+- keys = r.cluster_get_keys_in_slot(12182, 4)
+- assert keys == response
+-
+- def test_cluster_set_config_epoch(self, r):
+- mock_all_nodes_resp(r, "OK")
+- assert r.cluster_set_config_epoch(3) is True
+- all_results = r.cluster_set_config_epoch(3, target_nodes="all")
+- for res in all_results.values():
+- assert res is True
+-
+- def test_cluster_setslot(self, r):
+- node = r.get_random_node()
+- mock_node_resp(node, "OK")
+- assert r.cluster_setslot(node, "node_0", 1218, "IMPORTING") is True
+- assert r.cluster_setslot(node, "node_0", 1218, "NODE") is True
+- assert r.cluster_setslot(node, "node_0", 1218, "MIGRATING") is True
+- with pytest.raises(RedisError):
+- r.cluster_failover(node, "STABLE")
+- with pytest.raises(RedisError):
+- r.cluster_failover(node, "STATE")
+-
+- def test_cluster_setslot_stable(self, r):
+- node = r.nodes_manager.get_node_from_slot(12182)
+- mock_node_resp(node, "OK")
+- assert r.cluster_setslot_stable(12182) is True
+- assert node.redis_connection.connection.read_response.called
+-
+- def test_cluster_replicas(self, r):
+- response = [
+- b"01eca22229cf3c652b6fca0d09ff6941e0d2e3 "
+- b"127.0.0.1:6377@16377 slave "
+- b"52611e796814b78e90ad94be9d769a4f668f9a 0 "
+- b"1634550063436 4 connected",
+- b"r4xfga22229cf3c652b6fca0d09ff69f3e0d4d "
+- b"127.0.0.1:6378@16378 slave "
+- b"52611e796814b78e90ad94be9d769a4f668f9a 0 "
+- b"1634550063436 4 connected",
+- ]
+- mock_all_nodes_resp(r, response)
+- replicas = r.cluster_replicas("52611e796814b78e90ad94be9d769a4f668f9a")
+- assert replicas.get("127.0.0.1:6377") is not None
+- assert replicas.get("127.0.0.1:6378") is not None
+- assert (
+- replicas.get("127.0.0.1:6378").get("node_id")
+- == "r4xfga22229cf3c652b6fca0d09ff69f3e0d4d"
+- )
+-
+- def test_readonly(self):
+- r = get_mocked_redis_client(host=default_host, port=default_port)
+- mock_all_nodes_resp(r, "OK")
+- assert r.readonly() is True
+- all_replicas_results = r.readonly(target_nodes="replicas")
+- for res in all_replicas_results.values():
+- assert res is True
+- for replica in r.get_replicas():
+- assert replica.redis_connection.connection.read_response.called
+-
+- def test_readwrite(self):
+- r = get_mocked_redis_client(host=default_host, port=default_port)
+- mock_all_nodes_resp(r, "OK")
+- assert r.readwrite() is True
+- all_replicas_results = r.readwrite(target_nodes="replicas")
+- for res in all_replicas_results.values():
+- assert res is True
+- for replica in r.get_replicas():
+- assert replica.redis_connection.connection.read_response.called
+-
+- def test_bgsave(self, r):
+- assert r.bgsave()
+- sleep(0.3)
+- assert r.bgsave(True)
+-
+- def test_info(self, r):
+- # Map keys to same slot
+- r.set("x{1}", 1)
+- r.set("y{1}", 2)
+- r.set("z{1}", 3)
+- # Get node that handles the slot
+- slot = r.keyslot("x{1}")
+- node = r.nodes_manager.get_node_from_slot(slot)
+- # Run info on that node
+- info = r.info(target_nodes=node)
+- assert isinstance(info, dict)
+- assert info["db0"]["keys"] == 3
+-
+- def _init_slowlog_test(self, r, node):
+- slowlog_lim = r.config_get("slowlog-log-slower-than", target_nodes=node)
+- assert r.config_set("slowlog-log-slower-than", 0, target_nodes=node) is True
+- return slowlog_lim["slowlog-log-slower-than"]
+-
+- def _teardown_slowlog_test(self, r, node, prev_limit):
+- assert (
+- r.config_set("slowlog-log-slower-than", prev_limit, target_nodes=node)
+- is True
+- )
+-
+- def test_slowlog_get(self, r, slowlog):
+- unicode_string = chr(3456) + "abcd" + chr(3421)
+- node = r.get_node_from_key(unicode_string)
+- slowlog_limit = self._init_slowlog_test(r, node)
+- assert r.slowlog_reset(target_nodes=node)
+- r.get(unicode_string)
+- slowlog = r.slowlog_get(target_nodes=node)
+- assert isinstance(slowlog, list)
+- commands = [log["command"] for log in slowlog]
+-
+- get_command = b" ".join((b"GET", unicode_string.encode("utf-8")))
+- assert get_command in commands
+- assert b"SLOWLOG RESET" in commands
+-
+- # the order should be ['GET <uni string>', 'SLOWLOG RESET'],
+- # but if other clients are executing commands at the same time, there
+- # could be commands, before, between, or after, so just check that
+- # the two we care about are in the appropriate order.
+- assert commands.index(get_command) < commands.index(b"SLOWLOG RESET")
+-
+- # make sure other attributes are typed correctly
+- assert isinstance(slowlog[0]["start_time"], int)
+- assert isinstance(slowlog[0]["duration"], int)
+- # rollback the slowlog limit to its original value
+- self._teardown_slowlog_test(r, node, slowlog_limit)
+-
+- def test_slowlog_get_limit(self, r, slowlog):
+- assert r.slowlog_reset()
+- node = r.get_node_from_key("foo")
+- slowlog_limit = self._init_slowlog_test(r, node)
+- r.get("foo")
+- slowlog = r.slowlog_get(1, target_nodes=node)
+- assert isinstance(slowlog, list)
+- # only one command, based on the number we passed to slowlog_get()
+- assert len(slowlog) == 1
+- self._teardown_slowlog_test(r, node, slowlog_limit)
+-
+- def test_slowlog_length(self, r, slowlog):
+- r.get("foo")
+- node = r.nodes_manager.get_node_from_slot(key_slot(b"foo"))
+- slowlog_len = r.slowlog_len(target_nodes=node)
+- assert isinstance(slowlog_len, int)
+-
+- def test_time(self, r):
+- t = r.time(target_nodes=r.get_primaries()[0])
+- assert len(t) == 2
+- assert isinstance(t[0], int)
+- assert isinstance(t[1], int)
+-
+- @skip_if_server_version_lt("4.0.0")
+- def test_memory_usage(self, r):
+- r.set("foo", "bar")
+- assert isinstance(r.memory_usage("foo"), int)
+-
+- @skip_if_server_version_lt("4.0.0")
+- def test_memory_malloc_stats(self, r):
+- assert r.memory_malloc_stats()
+-
+- @skip_if_server_version_lt("4.0.0")
+- def test_memory_stats(self, r):
+- # put a key into the current db to make sure that "db.<current-db>"
+- # has data
+- r.set("foo", "bar")
+- node = r.nodes_manager.get_node_from_slot(key_slot(b"foo"))
+- stats = r.memory_stats(target_nodes=node)
+- assert isinstance(stats, dict)
+- for key, value in stats.items():
+- if key.startswith("db."):
+- assert isinstance(value, dict)
+-
+- @skip_if_server_version_lt("4.0.0")
+- def test_memory_help(self, r):
+- with pytest.raises(NotImplementedError):
+- r.memory_help()
+-
+- @skip_if_server_version_lt("4.0.0")
+- def test_memory_doctor(self, r):
+- with pytest.raises(NotImplementedError):
+- r.memory_doctor()
+-
+- def test_lastsave(self, r):
+- node = r.get_primaries()[0]
+- assert isinstance(r.lastsave(target_nodes=node), datetime.datetime)
+-
+- def test_cluster_echo(self, r):
+- node = r.get_primaries()[0]
+- assert r.echo("foo bar", target_nodes=node) == b"foo bar"
+-
+- @skip_if_server_version_lt("1.0.0")
+- def test_debug_segfault(self, r):
+- with pytest.raises(NotImplementedError):
+- r.debug_segfault()
+-
+- def test_config_resetstat(self, r):
+- node = r.get_primaries()[0]
+- r.ping(target_nodes=node)
+- prior_commands_processed = int(
+- r.info(target_nodes=node)["total_commands_processed"]
+- )
+- assert prior_commands_processed >= 1
+- r.config_resetstat(target_nodes=node)
+- reset_commands_processed = int(
+- r.info(target_nodes=node)["total_commands_processed"]
+- )
+- assert reset_commands_processed < prior_commands_processed
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_client_trackinginfo(self, r):
+- node = r.get_primaries()[0]
+- res = r.client_trackinginfo(target_nodes=node)
+- assert len(res) > 2
+- assert "prefixes" in res
+-
+- @skip_if_server_version_lt("2.9.50")
+- def test_client_pause(self, r):
+- node = r.get_primaries()[0]
+- assert r.client_pause(1, target_nodes=node)
+- assert r.client_pause(timeout=1, target_nodes=node)
+- with pytest.raises(RedisError):
+- r.client_pause(timeout="not an integer", target_nodes=node)
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_client_unpause(self, r):
+- assert r.client_unpause()
+-
+- @skip_if_server_version_lt("5.0.0")
+- def test_client_id(self, r):
+- node = r.get_primaries()[0]
+- assert r.client_id(target_nodes=node) > 0
+-
+- @skip_if_server_version_lt("5.0.0")
+- def test_client_unblock(self, r):
+- node = r.get_primaries()[0]
+- myid = r.client_id(target_nodes=node)
+- assert not r.client_unblock(myid, target_nodes=node)
+- assert not r.client_unblock(myid, error=True, target_nodes=node)
+- assert not r.client_unblock(myid, error=False, target_nodes=node)
+-
+- @skip_if_server_version_lt("6.0.0")
+- def test_client_getredir(self, r):
+- node = r.get_primaries()[0]
+- assert isinstance(r.client_getredir(target_nodes=node), int)
+- assert r.client_getredir(target_nodes=node) == -1
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_client_info(self, r):
+- node = r.get_primaries()[0]
+- info = r.client_info(target_nodes=node)
+- assert isinstance(info, dict)
+- assert "addr" in info
+-
+- @skip_if_server_version_lt("2.6.9")
+- def test_client_kill(self, r, r2):
+- node = r.get_primaries()[0]
+- r.client_setname("redis-py-c1", target_nodes="all")
+- r2.client_setname("redis-py-c2", target_nodes="all")
+- clients = [
+- client
+- for client in r.client_list(target_nodes=node)
+- if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+- ]
+- assert len(clients) == 2
+- clients_by_name = {client.get("name"): client for client in clients}
+-
+- client_addr = clients_by_name["redis-py-c2"].get("addr")
+- assert r.client_kill(client_addr, target_nodes=node) is True
+-
+- clients = [
+- client
+- for client in r.client_list(target_nodes=node)
+- if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+- ]
+- assert len(clients) == 1
+- assert clients[0].get("name") == "redis-py-c1"
+-
+- @skip_if_server_version_lt("2.6.0")
+- def test_cluster_bitop_not_empty_string(self, r):
+- r["{foo}a"] = ""
+- r.bitop("not", "{foo}r", "{foo}a")
+- assert r.get("{foo}r") is None
+-
+- @skip_if_server_version_lt("2.6.0")
+- def test_cluster_bitop_not(self, r):
+- test_str = b"\xAA\x00\xFF\x55"
+- correct = ~0xAA00FF55 & 0xFFFFFFFF
+- r["{foo}a"] = test_str
+- r.bitop("not", "{foo}r", "{foo}a")
+- assert int(binascii.hexlify(r["{foo}r"]), 16) == correct
+-
+- @skip_if_server_version_lt("2.6.0")
+- def test_cluster_bitop_not_in_place(self, r):
+- test_str = b"\xAA\x00\xFF\x55"
+- correct = ~0xAA00FF55 & 0xFFFFFFFF
+- r["{foo}a"] = test_str
+- r.bitop("not", "{foo}a", "{foo}a")
+- assert int(binascii.hexlify(r["{foo}a"]), 16) == correct
+-
+- @skip_if_server_version_lt("2.6.0")
+- def test_cluster_bitop_single_string(self, r):
+- test_str = b"\x01\x02\xFF"
+- r["{foo}a"] = test_str
+- r.bitop("and", "{foo}res1", "{foo}a")
+- r.bitop("or", "{foo}res2", "{foo}a")
+- r.bitop("xor", "{foo}res3", "{foo}a")
+- assert r["{foo}res1"] == test_str
+- assert r["{foo}res2"] == test_str
+- assert r["{foo}res3"] == test_str
+-
+- @skip_if_server_version_lt("2.6.0")
+- def test_cluster_bitop_string_operands(self, r):
+- r["{foo}a"] = b"\x01\x02\xFF\xFF"
+- r["{foo}b"] = b"\x01\x02\xFF"
+- r.bitop("and", "{foo}res1", "{foo}a", "{foo}b")
+- r.bitop("or", "{foo}res2", "{foo}a", "{foo}b")
+- r.bitop("xor", "{foo}res3", "{foo}a", "{foo}b")
+- assert int(binascii.hexlify(r["{foo}res1"]), 16) == 0x0102FF00
+- assert int(binascii.hexlify(r["{foo}res2"]), 16) == 0x0102FFFF
+- assert int(binascii.hexlify(r["{foo}res3"]), 16) == 0x000000FF
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_copy(self, r):
+- assert r.copy("{foo}a", "{foo}b") == 0
+- r.set("{foo}a", "bar")
+- assert r.copy("{foo}a", "{foo}b") == 1
+- assert r.get("{foo}a") == b"bar"
+- assert r.get("{foo}b") == b"bar"
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_copy_and_replace(self, r):
+- r.set("{foo}a", "foo1")
+- r.set("{foo}b", "foo2")
+- assert r.copy("{foo}a", "{foo}b") == 0
+- assert r.copy("{foo}a", "{foo}b", replace=True) == 1
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_lmove(self, r):
+- r.rpush("{foo}a", "one", "two", "three", "four")
+- assert r.lmove("{foo}a", "{foo}b")
+- assert r.lmove("{foo}a", "{foo}b", "right", "left")
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_blmove(self, r):
+- r.rpush("{foo}a", "one", "two", "three", "four")
+- assert r.blmove("{foo}a", "{foo}b", 5)
+- assert r.blmove("{foo}a", "{foo}b", 1, "RIGHT", "LEFT")
+-
+- def test_cluster_msetnx(self, r):
+- d = {"{foo}a": b"1", "{foo}b": b"2", "{foo}c": b"3"}
+- assert r.msetnx(d)
+- d2 = {"{foo}a": b"x", "{foo}d": b"4"}
+- assert not r.msetnx(d2)
+- for k, v in d.items():
+- assert r[k] == v
+- assert r.get("{foo}d") is None
+-
+- def test_cluster_rename(self, r):
+- r["{foo}a"] = "1"
+- assert r.rename("{foo}a", "{foo}b")
+- assert r.get("{foo}a") is None
+- assert r["{foo}b"] == b"1"
+-
+- def test_cluster_renamenx(self, r):
+- r["{foo}a"] = "1"
+- r["{foo}b"] = "2"
+- assert not r.renamenx("{foo}a", "{foo}b")
+- assert r["{foo}a"] == b"1"
+- assert r["{foo}b"] == b"2"
+-
+- # LIST COMMANDS
+- def test_cluster_blpop(self, r):
+- r.rpush("{foo}a", "1", "2")
+- r.rpush("{foo}b", "3", "4")
+- assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"3")
+- assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"4")
+- assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"1")
+- assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"2")
+- assert r.blpop(["{foo}b", "{foo}a"], timeout=1) is None
+- r.rpush("{foo}c", "1")
+- assert r.blpop("{foo}c", timeout=1) == (b"{foo}c", b"1")
+-
+- def test_cluster_brpop(self, r):
+- r.rpush("{foo}a", "1", "2")
+- r.rpush("{foo}b", "3", "4")
+- assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"4")
+- assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"3")
+- assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"2")
+- assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"1")
+- assert r.brpop(["{foo}b", "{foo}a"], timeout=1) is None
+- r.rpush("{foo}c", "1")
+- assert r.brpop("{foo}c", timeout=1) == (b"{foo}c", b"1")
+-
+- def test_cluster_brpoplpush(self, r):
+- r.rpush("{foo}a", "1", "2")
+- r.rpush("{foo}b", "3", "4")
+- assert r.brpoplpush("{foo}a", "{foo}b") == b"2"
+- assert r.brpoplpush("{foo}a", "{foo}b") == b"1"
+- assert r.brpoplpush("{foo}a", "{foo}b", timeout=1) is None
+- assert r.lrange("{foo}a", 0, -1) == []
+- assert r.lrange("{foo}b", 0, -1) == [b"1", b"2", b"3", b"4"]
+-
+- def test_cluster_brpoplpush_empty_string(self, r):
+- r.rpush("{foo}a", "")
+- assert r.brpoplpush("{foo}a", "{foo}b") == b""
+-
+- def test_cluster_rpoplpush(self, r):
+- r.rpush("{foo}a", "a1", "a2", "a3")
+- r.rpush("{foo}b", "b1", "b2", "b3")
+- assert r.rpoplpush("{foo}a", "{foo}b") == b"a3"
+- assert r.lrange("{foo}a", 0, -1) == [b"a1", b"a2"]
+- assert r.lrange("{foo}b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"]
+-
+- def test_cluster_sdiff(self, r):
+- r.sadd("{foo}a", "1", "2", "3")
+- assert r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"}
+- r.sadd("{foo}b", "2", "3")
+- assert r.sdiff("{foo}a", "{foo}b") == {b"1"}
+-
+- def test_cluster_sdiffstore(self, r):
+- r.sadd("{foo}a", "1", "2", "3")
+- assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3
+- assert r.smembers("{foo}c") == {b"1", b"2", b"3"}
+- r.sadd("{foo}b", "2", "3")
+- assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1
+- assert r.smembers("{foo}c") == {b"1"}
+-
+- def test_cluster_sinter(self, r):
+- r.sadd("{foo}a", "1", "2", "3")
+- assert r.sinter("{foo}a", "{foo}b") == set()
+- r.sadd("{foo}b", "2", "3")
+- assert r.sinter("{foo}a", "{foo}b") == {b"2", b"3"}
+-
+- def test_cluster_sinterstore(self, r):
+- r.sadd("{foo}a", "1", "2", "3")
+- assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0
+- assert r.smembers("{foo}c") == set()
+- r.sadd("{foo}b", "2", "3")
+- assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2
+- assert r.smembers("{foo}c") == {b"2", b"3"}
+-
+- def test_cluster_smove(self, r):
+- r.sadd("{foo}a", "a1", "a2")
+- r.sadd("{foo}b", "b1", "b2")
+- assert r.smove("{foo}a", "{foo}b", "a1")
+- assert r.smembers("{foo}a") == {b"a2"}
+- assert r.smembers("{foo}b") == {b"b1", b"b2", b"a1"}
+-
+- def test_cluster_sunion(self, r):
+- r.sadd("{foo}a", "1", "2")
+- r.sadd("{foo}b", "2", "3")
+- assert r.sunion("{foo}a", "{foo}b") == {b"1", b"2", b"3"}
+-
+- def test_cluster_sunionstore(self, r):
+- r.sadd("{foo}a", "1", "2")
+- r.sadd("{foo}b", "2", "3")
+- assert r.sunionstore("{foo}c", "{foo}a", "{foo}b") == 3
+- assert r.smembers("{foo}c") == {b"1", b"2", b"3"}
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_zdiff(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+- r.zadd("{foo}b", {"a1": 1, "a2": 2})
+- assert r.zdiff(["{foo}a", "{foo}b"]) == [b"a3"]
+- assert r.zdiff(["{foo}a", "{foo}b"], withscores=True) == [b"a3", b"3"]
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_zdiffstore(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+- r.zadd("{foo}b", {"a1": 1, "a2": 2})
+- assert r.zdiffstore("{foo}out", ["{foo}a", "{foo}b"])
+- assert r.zrange("{foo}out", 0, -1) == [b"a3"]
+- assert r.zrange("{foo}out", 0, -1, withscores=True) == [(b"a3", 3.0)]
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_zinter(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert r.zinter(["{foo}a", "{foo}b", "{foo}c"]) == [b"a3", b"a1"]
+- # invalid aggregation
+- with pytest.raises(DataError):
+- r.zinter(["{foo}a", "{foo}b", "{foo}c"], aggregate="foo", withscores=True)
+- # aggregate with SUM
+- assert r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
+- (b"a3", 8),
+- (b"a1", 9),
+- ]
+- # aggregate with MAX
+- assert r.zinter(
+- ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX", withscores=True
+- ) == [(b"a3", 5), (b"a1", 6)]
+- # aggregate with MIN
+- assert r.zinter(
+- ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
+- ) == [(b"a1", 1), (b"a3", 1)]
+- # with weights
+- assert r.zinter({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
+- (b"a3", 20),
+- (b"a1", 23),
+- ]
+-
+- def test_cluster_zinterstore_sum(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 2
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 8), (b"a1", 9)]
+-
+- def test_cluster_zinterstore_max(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert (
+- r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX")
+- == 2
+- )
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 5), (b"a1", 6)]
+-
+- def test_cluster_zinterstore_min(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+- r.zadd("{foo}b", {"a1": 2, "a2": 3, "a3": 5})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert (
+- r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN")
+- == 2
+- )
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a1", 1), (b"a3", 3)]
+-
+- def test_cluster_zinterstore_with_weight(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert r.zinterstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 2
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 20), (b"a1", 23)]
+-
+- @skip_if_server_version_lt("4.9.0")
+- def test_cluster_bzpopmax(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2})
+- r.zadd("{foo}b", {"b1": 10, "b2": 20})
+- assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b2", 20)
+- assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b1", 10)
+- assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a2", 2)
+- assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a1", 1)
+- assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) is None
+- r.zadd("{foo}c", {"c1": 100})
+- assert r.bzpopmax("{foo}c", timeout=1) == (b"{foo}c", b"c1", 100)
+-
+- @skip_if_server_version_lt("4.9.0")
+- def test_cluster_bzpopmin(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2})
+- r.zadd("{foo}b", {"b1": 10, "b2": 20})
+- assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b1", 10)
+- assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b2", 20)
+- assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a1", 1)
+- assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a2", 2)
+- assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) is None
+- r.zadd("{foo}c", {"c1": 100})
+- assert r.bzpopmin("{foo}c", timeout=1) == (b"{foo}c", b"c1", 100)
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_zrangestore(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+- assert r.zrangestore("{foo}b", "{foo}a", 0, 1)
+- assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"]
+- assert r.zrangestore("{foo}b", "{foo}a", 1, 2)
+- assert r.zrange("{foo}b", 0, -1) == [b"a2", b"a3"]
+- assert r.zrange("{foo}b", 0, -1, withscores=True) == [(b"a2", 2), (b"a3", 3)]
+- # reversed order
+- assert r.zrangestore("{foo}b", "{foo}a", 1, 2, desc=True)
+- assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"]
+- # by score
+- assert r.zrangestore(
+- "{foo}b", "{foo}a", 2, 1, byscore=True, offset=0, num=1, desc=True
+- )
+- assert r.zrange("{foo}b", 0, -1) == [b"a2"]
+- # by lex
+- assert r.zrangestore(
+- "{foo}b", "{foo}a", "[a2", "(a3", bylex=True, offset=0, num=1
+- )
+- assert r.zrange("{foo}b", 0, -1) == [b"a2"]
+-
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_zunion(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- # sum
+- assert r.zunion(["{foo}a", "{foo}b", "{foo}c"]) == [b"a2", b"a4", b"a3", b"a1"]
+- assert r.zunion(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
+- (b"a2", 3),
+- (b"a4", 4),
+- (b"a3", 8),
+- (b"a1", 9),
+- ]
+- # max
+- assert r.zunion(
+- ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX", withscores=True
+- ) == [(b"a2", 2), (b"a4", 4), (b"a3", 5), (b"a1", 6)]
+- # min
+- assert r.zunion(
+- ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
+- ) == [(b"a1", 1), (b"a2", 1), (b"a3", 1), (b"a4", 4)]
+- # with weight
+- assert r.zunion({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
+- (b"a2", 5),
+- (b"a4", 12),
+- (b"a3", 20),
+- (b"a1", 23),
+- ]
+-
+- def test_cluster_zunionstore_sum(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 4
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+- (b"a2", 3),
+- (b"a4", 4),
+- (b"a3", 8),
+- (b"a1", 9),
+- ]
+-
+- def test_cluster_zunionstore_max(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert (
+- r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX")
+- == 4
+- )
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+- (b"a2", 2),
+- (b"a4", 4),
+- (b"a3", 5),
+- (b"a1", 6),
+- ]
+-
+- def test_cluster_zunionstore_min(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 4})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert (
+- r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN")
+- == 4
+- )
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+- (b"a1", 1),
+- (b"a2", 2),
+- (b"a3", 3),
+- (b"a4", 4),
+- ]
+-
+- def test_cluster_zunionstore_with_weight(self, r):
+- r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+- r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+- r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+- assert r.zunionstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 4
+- assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+- (b"a2", 5),
+- (b"a4", 12),
+- (b"a3", 20),
+- (b"a1", 23),
+- ]
+-
+- @skip_if_server_version_lt("2.8.9")
+- def test_cluster_pfcount(self, r):
+- members = {b"1", b"2", b"3"}
+- r.pfadd("{foo}a", *members)
+- assert r.pfcount("{foo}a") == len(members)
+- members_b = {b"2", b"3", b"4"}
+- r.pfadd("{foo}b", *members_b)
+- assert r.pfcount("{foo}b") == len(members_b)
+- assert r.pfcount("{foo}a", "{foo}b") == len(members_b.union(members))
+-
+- @skip_if_server_version_lt("2.8.9")
+- def test_cluster_pfmerge(self, r):
+- mema = {b"1", b"2", b"3"}
+- memb = {b"2", b"3", b"4"}
+- memc = {b"5", b"6", b"7"}
+- r.pfadd("{foo}a", *mema)
+- r.pfadd("{foo}b", *memb)
+- r.pfadd("{foo}c", *memc)
+- r.pfmerge("{foo}d", "{foo}c", "{foo}a")
+- assert r.pfcount("{foo}d") == 6
+- r.pfmerge("{foo}d", "{foo}b")
+- assert r.pfcount("{foo}d") == 7
+-
+- def test_cluster_sort_store(self, r):
+- r.rpush("{foo}a", "2", "3", "1")
+- assert r.sort("{foo}a", store="{foo}sorted_values") == 3
+- assert r.lrange("{foo}sorted_values", 0, -1) == [b"1", b"2", b"3"]
+-
+- # GEO COMMANDS
+- @skip_if_server_version_lt("6.2.0")
+- def test_cluster_geosearchstore(self, r):
+- values = (2.1909389952632, 41.433791470673, "place1") + (
+- 2.1873744593677,
+- 41.406342043777,
+- "place2",
+- )
+-
+- r.geoadd("{foo}barcelona", values)
+- r.geosearchstore(
+- "{foo}places_barcelona",
+- "{foo}barcelona",
+- longitude=2.191,
+- latitude=41.433,
+- radius=1000,
+- )
+- assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"]
+-
+- @skip_unless_arch_bits(64)
+- @skip_if_server_version_lt("6.2.0")
+- def test_geosearchstore_dist(self, r):
+- values = (2.1909389952632, 41.433791470673, "place1") + (
+- 2.1873744593677,
+- 41.406342043777,
+- "place2",
+- )
+-
+- r.geoadd("{foo}barcelona", values)
+- r.geosearchstore(
+- "{foo}places_barcelona",
+- "{foo}barcelona",
+- longitude=2.191,
+- latitude=41.433,
+- radius=1000,
+- storedist=True,
+- )
+- # instead of save the geo score, the distance is saved.
+- assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301
+-
+- @skip_if_server_version_lt("3.2.0")
+- def test_cluster_georadius_store(self, r):
+- values = (2.1909389952632, 41.433791470673, "place1") + (
+- 2.1873744593677,
+- 41.406342043777,
+- "place2",
+- )
+-
+- r.geoadd("{foo}barcelona", values)
+- r.georadius(
+- "{foo}barcelona", 2.191, 41.433, 1000, store="{foo}places_barcelona"
+- )
+- assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"]
+-
+- @skip_unless_arch_bits(64)
+- @skip_if_server_version_lt("3.2.0")
+- def test_cluster_georadius_store_dist(self, r):
+- values = (2.1909389952632, 41.433791470673, "place1") + (
+- 2.1873744593677,
+- 41.406342043777,
+- "place2",
+- )
+-
+- r.geoadd("{foo}barcelona", values)
+- r.georadius(
+- "{foo}barcelona", 2.191, 41.433, 1000, store_dist="{foo}places_barcelona"
+- )
+- # instead of save the geo score, the distance is saved.
+- assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301
+-
+- def test_cluster_dbsize(self, r):
+- d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
+- assert r.mset_nonatomic(d)
+- assert r.dbsize(target_nodes="primaries") == len(d)
+-
+- def test_cluster_keys(self, r):
+- assert r.keys() == []
+- keys_with_underscores = {b"test_a", b"test_b"}
+- keys = keys_with_underscores.union({b"testc"})
+- for key in keys:
+- r[key] = 1
+- assert (
+- set(r.keys(pattern="test_*", target_nodes="primaries"))
+- == keys_with_underscores
+- )
+- assert set(r.keys(pattern="test*", target_nodes="primaries")) == keys
+-
+- # SCAN COMMANDS
+- @skip_if_server_version_lt("2.8.0")
+- def test_cluster_scan(self, r):
+- r.set("a", 1)
+- r.set("b", 2)
+- r.set("c", 3)
+- cursor, keys = r.scan(target_nodes="primaries")
+- assert cursor == 0
+- assert set(keys) == {b"a", b"b", b"c"}
+- _, keys = r.scan(match="a", target_nodes="primaries")
+- assert set(keys) == {b"a"}
+-
+- @skip_if_server_version_lt("6.0.0")
+- def test_cluster_scan_type(self, r):
+- r.sadd("a-set", 1)
+- r.hset("a-hash", "foo", 2)
+- r.lpush("a-list", "aux", 3)
+- _, keys = r.scan(match="a*", _type="SET", target_nodes="primaries")
+- assert set(keys) == {b"a-set"}
+-
+- @skip_if_server_version_lt("2.8.0")
+- def test_cluster_scan_iter(self, r):
+- r.set("a", 1)
+- r.set("b", 2)
+- r.set("c", 3)
+- keys = list(r.scan_iter(target_nodes="primaries"))
+- assert set(keys) == {b"a", b"b", b"c"}
+- keys = list(r.scan_iter(match="a", target_nodes="primaries"))
+- assert set(keys) == {b"a"}
+-
+- def test_cluster_randomkey(self, r):
+- node = r.get_node_from_key("{foo}")
+- assert r.randomkey(target_nodes=node) is None
+- for key in ("{foo}a", "{foo}b", "{foo}c"):
+- r[key] = 1
+- assert r.randomkey(target_nodes=node) in (b"{foo}a", b"{foo}b", b"{foo}c")
+-
+- @skip_if_server_version_lt("6.0.0")
+- @skip_if_redis_enterprise()
+- def test_acl_log(self, r, request):
+- key = "{cache}:"
+- node = r.get_node_from_key(key)
+- username = "redis-py-user"
+-
+- def teardown():
+- r.acl_deluser(username, target_nodes="primaries")
+-
+- request.addfinalizer(teardown)
+- r.acl_setuser(
+- username,
+- enabled=True,
+- reset=True,
+- commands=["+get", "+set", "+select", "+cluster", "+command", "+info"],
+- keys=["{cache}:*"],
+- nopass=True,
+- target_nodes="primaries",
+- )
+- r.acl_log_reset(target_nodes=node)
+-
+- user_client = _get_client(
+- RedisCluster, request, flushdb=False, username=username
+- )
+-
+- # Valid operation and key
+- assert user_client.set("{cache}:0", 1)
+- assert user_client.get("{cache}:0") == b"1"
+-
+- # Invalid key
+- with pytest.raises(NoPermissionError):
+- user_client.get("{cache}violated_cache:0")
+-
+- # Invalid operation
+- with pytest.raises(NoPermissionError):
+- user_client.hset("{cache}:0", "hkey", "hval")
+-
+- assert isinstance(r.acl_log(target_nodes=node), list)
+- assert len(r.acl_log(target_nodes=node)) == 2
+- assert len(r.acl_log(count=1, target_nodes=node)) == 1
+- assert isinstance(r.acl_log(target_nodes=node)[0], dict)
+- assert "client-info" in r.acl_log(count=1, target_nodes=node)[0]
+- assert r.acl_log_reset(target_nodes=node)
+-
+-
+-@pytest.mark.onlycluster
+-class TestNodesManager:
+- """
+- Tests for the NodesManager class
+- """
+-
+- def test_load_balancer(self, r):
+- n_manager = r.nodes_manager
+- lb = n_manager.read_load_balancer
+- slot_1 = 1257
+- slot_2 = 8975
+- node_1 = ClusterNode(default_host, 6379, PRIMARY)
+- node_2 = ClusterNode(default_host, 6378, REPLICA)
+- node_3 = ClusterNode(default_host, 6377, REPLICA)
+- node_4 = ClusterNode(default_host, 6376, PRIMARY)
+- node_5 = ClusterNode(default_host, 6375, REPLICA)
+- n_manager.slots_cache = {
+- slot_1: [node_1, node_2, node_3],
+- slot_2: [node_4, node_5],
+- }
+- primary1_name = n_manager.slots_cache[slot_1][0].name
+- primary2_name = n_manager.slots_cache[slot_2][0].name
+- list1_size = len(n_manager.slots_cache[slot_1])
+- list2_size = len(n_manager.slots_cache[slot_2])
+- # slot 1
+- assert lb.get_server_index(primary1_name, list1_size) == 0
+- assert lb.get_server_index(primary1_name, list1_size) == 1
+- assert lb.get_server_index(primary1_name, list1_size) == 2
+- assert lb.get_server_index(primary1_name, list1_size) == 0
+- # slot 2
+- assert lb.get_server_index(primary2_name, list2_size) == 0
+- assert lb.get_server_index(primary2_name, list2_size) == 1
+- assert lb.get_server_index(primary2_name, list2_size) == 0
+-
+- lb.reset()
+- assert lb.get_server_index(primary1_name, list1_size) == 0
+- assert lb.get_server_index(primary2_name, list2_size) == 0
+-
+- def test_init_slots_cache_not_all_slots_covered(self):
+- """
+- Test that if not all slots are covered it should raise an exception
+- """
+- # Missing slot 5460
+- cluster_slots = [
+- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
+- ]
+- with pytest.raises(RedisClusterException) as ex:
+- get_mocked_redis_client(
+- host=default_host, port=default_port, cluster_slots=cluster_slots
+- )
+- assert str(ex.value).startswith(
+- "All slots are not covered after query all startup_nodes."
+- )
+-
+- def test_init_slots_cache_not_require_full_coverage_error(self):
+- """
+- When require_full_coverage is set to False and not all slots are
+- covered, if one of the nodes has 'cluster-require_full_coverage'
+- config set to 'yes' the cluster initialization should fail
+- """
+- # Missing slot 5460
+- cluster_slots = [
+- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
+- ]
+-
+- with pytest.raises(RedisClusterException):
+- get_mocked_redis_client(
+- host=default_host,
+- port=default_port,
+- cluster_slots=cluster_slots,
+- require_full_coverage=False,
+- coverage_result="yes",
+- )
+-
+- def test_init_slots_cache_not_require_full_coverage_success(self):
+- """
+- When require_full_coverage is set to False and not all slots are
+- covered, if all of the nodes has 'cluster-require_full_coverage'
+- config set to 'no' the cluster initialization should succeed
+- """
+- # Missing slot 5460
+- cluster_slots = [
+- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
+- ]
+-
+- rc = get_mocked_redis_client(
+- host=default_host,
+- port=default_port,
+- cluster_slots=cluster_slots,
+- require_full_coverage=False,
+- coverage_result="no",
+- )
+-
+- assert 5460 not in rc.nodes_manager.slots_cache
+-
+- def test_init_slots_cache_not_require_full_coverage_skips_check(self):
+- """
+- Test that when require_full_coverage is set to False and
+- skip_full_coverage_check is set to true, the cluster initialization
+- succeed without checking the nodes' Redis configurations
+- """
+- # Missing slot 5460
+- cluster_slots = [
+- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
+- ]
+-
+- with patch.object(
+- NodesManager, "cluster_require_full_coverage"
+- ) as conf_check_mock:
+- rc = get_mocked_redis_client(
+- host=default_host,
+- port=default_port,
+- cluster_slots=cluster_slots,
+- require_full_coverage=False,
+- skip_full_coverage_check=True,
+- coverage_result="no",
+- )
+-
+- assert conf_check_mock.called is False
+- assert 5460 not in rc.nodes_manager.slots_cache
+-
+- def test_init_slots_cache(self):
+- """
+- Test that slots cache can in initialized and all slots are covered
+- """
+- good_slots_resp = [
+- [0, 5460, ["127.0.0.1", 7000], ["127.0.0.2", 7003]],
+- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.2", 7004]],
+- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.2", 7005]],
+- ]
+-
+- rc = get_mocked_redis_client(
+- host=default_host, port=default_port, cluster_slots=good_slots_resp
+- )
+- n_manager = rc.nodes_manager
+- assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+- for slot_info in good_slots_resp:
+- all_hosts = ["127.0.0.1", "127.0.0.2"]
+- all_ports = [7000, 7001, 7002, 7003, 7004, 7005]
+- slot_start = slot_info[0]
+- slot_end = slot_info[1]
+- for i in range(slot_start, slot_end + 1):
+- assert len(n_manager.slots_cache[i]) == len(slot_info[2:])
+- assert n_manager.slots_cache[i][0].host in all_hosts
+- assert n_manager.slots_cache[i][1].host in all_hosts
+- assert n_manager.slots_cache[i][0].port in all_ports
+- assert n_manager.slots_cache[i][1].port in all_ports
+-
+- assert len(n_manager.nodes_cache) == 6
+-
+- def test_init_slots_cache_cluster_mode_disabled(self):
+- """
+- Test that creating a RedisCluster failes if one of the startup nodes
+- has cluster mode disabled
+- """
+- with pytest.raises(RedisClusterException) as e:
+- get_mocked_redis_client(
+- host=default_host, port=default_port, cluster_enabled=False
+- )
+- assert "Cluster mode is not enabled on this node" in str(e.value)
+-
+- def test_empty_startup_nodes(self):
+- """
+- It should not be possible to create a node manager with no nodes
+- specified
+- """
+- with pytest.raises(RedisClusterException):
+- NodesManager([])
+-
+- def test_wrong_startup_nodes_type(self):
+- """
+- If something other then a list type itteratable is provided it should
+- fail
+- """
+- with pytest.raises(RedisClusterException):
+- NodesManager({})
+-
+- def test_init_slots_cache_slots_collision(self, request):
+- """
+- Test that if 2 nodes do not agree on the same slots setup it should
+- raise an error. In this test both nodes will say that the first
+- slots block should be bound to different servers.
+- """
+- with patch.object(NodesManager, "create_redis_node") as create_redis_node:
+-
+- def create_mocked_redis_node(host, port, **kwargs):
+- """
+- Helper function to return custom slots cache data from
+- different redis nodes
+- """
+- if port == 7000:
+- result = [
+- [
+- 0,
+- 5460,
+- ["127.0.0.1", 7000],
+- ["127.0.0.1", 7003],
+- ],
+- [
+- 5461,
+- 10922,
+- ["127.0.0.1", 7001],
+- ["127.0.0.1", 7004],
+- ],
+- ]
+-
+- elif port == 7001:
+- result = [
+- [
+- 0,
+- 5460,
+- ["127.0.0.1", 7001],
+- ["127.0.0.1", 7003],
+- ],
+- [
+- 5461,
+- 10922,
+- ["127.0.0.1", 7000],
+- ["127.0.0.1", 7004],
+- ],
+- ]
+- else:
+- result = []
+-
+- r_node = Redis(host=host, port=port)
+-
+- orig_execute_command = r_node.execute_command
+-
+- def execute_command(*args, **kwargs):
+- if args[0] == "CLUSTER SLOTS":
+- return result
+- elif args[0] == "INFO":
+- return {"cluster_enabled": True}
+- elif args[1] == "cluster-require-full-coverage":
+- return {"cluster-require-full-coverage": "yes"}
+- else:
+- return orig_execute_command(*args, **kwargs)
+-
+- r_node.execute_command = execute_command
+- return r_node
+-
+- create_redis_node.side_effect = create_mocked_redis_node
+-
+- with pytest.raises(RedisClusterException) as ex:
+- node_1 = ClusterNode("127.0.0.1", 7000)
+- node_2 = ClusterNode("127.0.0.1", 7001)
+- RedisCluster(startup_nodes=[node_1, node_2])
+- assert str(ex.value).startswith(
+- "startup_nodes could not agree on a valid slots cache"
+- ), str(ex.value)
+-
+- def test_cluster_one_instance(self):
+- """
+- If the cluster exists of only 1 node then there is some hacks that must
+- be validated they work.
+- """
+- node = ClusterNode(default_host, default_port)
+- cluster_slots = [[0, 16383, ["", default_port]]]
+- rc = get_mocked_redis_client(startup_nodes=[node], cluster_slots=cluster_slots)
+-
+- n = rc.nodes_manager
+- assert len(n.nodes_cache) == 1
+- n_node = rc.get_node(node_name=node.name)
+- assert n_node is not None
+- assert n_node == node
+- assert n_node.server_type == PRIMARY
+- assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+- for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
+- assert n.slots_cache[i] == [n_node]
+-
+- def test_init_with_down_node(self):
+- """
+- If I can't connect to one of the nodes, everything should still work.
+- But if I can't connect to any of the nodes, exception should be thrown.
+- """
+- with patch.object(NodesManager, "create_redis_node") as create_redis_node:
+-
+- def create_mocked_redis_node(host, port, **kwargs):
+- if port == 7000:
+- raise ConnectionError("mock connection error for 7000")
+-
+- r_node = Redis(host=host, port=port, decode_responses=True)
+-
+- def execute_command(*args, **kwargs):
+- if args[0] == "CLUSTER SLOTS":
+- return [
+- [
+- 0,
+- 8191,
+- ["127.0.0.1", 7001, "node_1"],
+- ],
+- [
+- 8192,
+- 16383,
+- ["127.0.0.1", 7002, "node_2"],
+- ],
+- ]
+- elif args[0] == "INFO":
+- return {"cluster_enabled": True}
+- elif args[1] == "cluster-require-full-coverage":
+- return {"cluster-require-full-coverage": "yes"}
+-
+- r_node.execute_command = execute_command
+-
+- return r_node
+-
+- create_redis_node.side_effect = create_mocked_redis_node
+-
+- node_1 = ClusterNode("127.0.0.1", 7000)
+- node_2 = ClusterNode("127.0.0.1", 7001)
+-
+- # If all startup nodes fail to connect, connection error should be
+- # thrown
+- with pytest.raises(RedisClusterException) as e:
+- RedisCluster(startup_nodes=[node_1])
+- assert "Redis Cluster cannot be connected" in str(e.value)
+-
+- with patch.object(
+- CommandsParser, "initialize", autospec=True
+- ) as cmd_parser_initialize:
+-
+- def cmd_init_mock(self, r):
+- self.commands = {
+- "get": {
+- "name": "get",
+- "arity": 2,
+- "flags": ["readonly", "fast"],
+- "first_key_pos": 1,
+- "last_key_pos": 1,
+- "step_count": 1,
+- }
+- }
+-
+- cmd_parser_initialize.side_effect = cmd_init_mock
+- # When at least one startup node is reachable, the cluster
+- # initialization should succeeds
+- rc = RedisCluster(startup_nodes=[node_1, node_2])
+- assert rc.get_node(host=default_host, port=7001) is not None
+- assert rc.get_node(host=default_host, port=7002) is not None
+-
+-
+-@pytest.mark.onlycluster
+-class TestClusterPubSubObject:
+- """
+- Tests for the ClusterPubSub class
+- """
+-
+- def test_init_pubsub_with_host_and_port(self, r):
+- """
+- Test creation of pubsub instance with passed host and port
+- """
+- node = r.get_default_node()
+- p = r.pubsub(host=node.host, port=node.port)
+- assert p.get_pubsub_node() == node
+-
+- def test_init_pubsub_with_node(self, r):
+- """
+- Test creation of pubsub instance with passed node
+- """
+- node = r.get_default_node()
+- p = r.pubsub(node=node)
+- assert p.get_pubsub_node() == node
+-
+- def test_init_pubusub_without_specifying_node(self, r):
+- """
+- Test creation of pubsub instance without specifying a node. The node
+- should be determined based on the keyslot of the first command
+- execution.
+- """
+- channel_name = "foo"
+- node = r.get_node_from_key(channel_name)
+- p = r.pubsub()
+- assert p.get_pubsub_node() is None
+- p.subscribe(channel_name)
+- assert p.get_pubsub_node() == node
+-
+- def test_init_pubsub_with_a_non_existent_node(self, r):
+- """
+- Test creation of pubsub instance with node that doesn't exists in the
+- cluster. RedisClusterException should be raised.
+- """
+- node = ClusterNode("1.1.1.1", 1111)
+- with pytest.raises(RedisClusterException):
+- r.pubsub(node)
+-
+- def test_init_pubsub_with_a_non_existent_host_port(self, r):
+- """
+- Test creation of pubsub instance with host and port that don't belong
+- to a node in the cluster.
+- RedisClusterException should be raised.
+- """
+- with pytest.raises(RedisClusterException):
+- r.pubsub(host="1.1.1.1", port=1111)
+-
+- def test_init_pubsub_host_or_port(self, r):
+- """
+- Test creation of pubsub instance with host but without port, and vice
+- versa. DataError should be raised.
+- """
+- with pytest.raises(DataError):
+- r.pubsub(host="localhost")
+-
+- with pytest.raises(DataError):
+- r.pubsub(port=16379)
+-
+- def test_get_redis_connection(self, r):
+- """
+- Test that get_redis_connection() returns the redis connection of the
+- set pubsub node
+- """
+- node = r.get_default_node()
+- p = r.pubsub(node=node)
+- assert p.get_redis_connection() == node.redis_connection
+-
+-
+-@pytest.mark.onlycluster
+-class TestClusterPipeline:
+- """
+- Tests for the ClusterPipeline class
+- """
+-
+- def test_blocked_methods(self, r):
+- """
+- Currently some method calls on a Cluster pipeline
+- is blocked when using in cluster mode.
+- They maybe implemented in the future.
+- """
+- pipe = r.pipeline()
+- with pytest.raises(RedisClusterException):
+- pipe.multi()
+-
+- with pytest.raises(RedisClusterException):
+- pipe.immediate_execute_command()
+-
+- with pytest.raises(RedisClusterException):
+- pipe._execute_transaction(None, None, None)
+-
+- with pytest.raises(RedisClusterException):
+- pipe.load_scripts()
+-
+- with pytest.raises(RedisClusterException):
+- pipe.watch()
+-
+- with pytest.raises(RedisClusterException):
+- pipe.unwatch()
+-
+- with pytest.raises(RedisClusterException):
+- pipe.script_load_for_pipeline(None)
+-
+- with pytest.raises(RedisClusterException):
+- pipe.eval()
+-
+- def test_blocked_arguments(self, r):
+- """
+- Currently some arguments is blocked when using in cluster mode.
+- They maybe implemented in the future.
+- """
+- with pytest.raises(RedisClusterException) as ex:
+- r.pipeline(transaction=True)
+-
+- assert (
+- str(ex.value).startswith("transaction is deprecated in cluster mode")
+- is True
+- )
+-
+- with pytest.raises(RedisClusterException) as ex:
+- r.pipeline(shard_hint=True)
+-
+- assert (
+- str(ex.value).startswith("shard_hint is deprecated in cluster mode") is True
+- )
+-
+- def test_redis_cluster_pipeline(self, r):
+- """
+- Test that we can use a pipeline with the RedisCluster class
+- """
+- with r.pipeline() as pipe:
+- pipe.set("foo", "bar")
+- pipe.get("foo")
+- assert pipe.execute() == [True, b"bar"]
+-
+- def test_mget_disabled(self, r):
+- """
+- Test that mget is disabled for ClusterPipeline
+- """
+- with r.pipeline() as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.mget(["a"])
+-
+- def test_mset_disabled(self, r):
+- """
+- Test that mset is disabled for ClusterPipeline
+- """
+- with r.pipeline() as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.mset({"a": 1, "b": 2})
+-
+- def test_rename_disabled(self, r):
+- """
+- Test that rename is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.rename("a", "b")
+-
+- def test_renamenx_disabled(self, r):
+- """
+- Test that renamenx is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.renamenx("a", "b")
+-
+- def test_delete_single(self, r):
+- """
+- Test a single delete operation
+- """
+- r["a"] = 1
+- with r.pipeline(transaction=False) as pipe:
+- pipe.delete("a")
+- assert pipe.execute() == [1]
+-
+- def test_multi_delete_unsupported(self, r):
+- """
+- Test that multi delete operation is unsupported
+- """
+- with r.pipeline(transaction=False) as pipe:
+- r["a"] = 1
+- r["b"] = 2
+- with pytest.raises(RedisClusterException):
+- pipe.delete("a", "b")
+-
+- def test_brpoplpush_disabled(self, r):
+- """
+- Test that brpoplpush is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.brpoplpush()
+-
+- def test_rpoplpush_disabled(self, r):
+- """
+- Test that rpoplpush is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.rpoplpush()
+-
+- def test_sort_disabled(self, r):
+- """
+- Test that sort is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sort()
+-
+- def test_sdiff_disabled(self, r):
+- """
+- Test that sdiff is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sdiff()
+-
+- def test_sdiffstore_disabled(self, r):
+- """
+- Test that sdiffstore is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sdiffstore()
+-
+- def test_sinter_disabled(self, r):
+- """
+- Test that sinter is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sinter()
+-
+- def test_sinterstore_disabled(self, r):
+- """
+- Test that sinterstore is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sinterstore()
+-
+- def test_smove_disabled(self, r):
+- """
+- Test that move is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.smove()
+-
+- def test_sunion_disabled(self, r):
+- """
+- Test that sunion is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sunion()
+-
+- def test_sunionstore_disabled(self, r):
+- """
+- Test that sunionstore is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.sunionstore()
+-
+- def test_spfmerge_disabled(self, r):
+- """
+- Test that spfmerge is disabled for ClusterPipeline
+- """
+- with r.pipeline(transaction=False) as pipe:
+- with pytest.raises(RedisClusterException):
+- pipe.pfmerge()
+-
+- def test_multi_key_operation_with_a_single_slot(self, r):
+- """
+- Test multi key operation with a single slot
+- """
+- pipe = r.pipeline(transaction=False)
+- pipe.set("a{foo}", 1)
+- pipe.set("b{foo}", 2)
+- pipe.set("c{foo}", 3)
+- pipe.get("a{foo}")
+- pipe.get("b{foo}")
+- pipe.get("c{foo}")
+-
+- res = pipe.execute()
+- assert res == [True, True, True, b"1", b"2", b"3"]
+-
+- def test_multi_key_operation_with_multi_slots(self, r):
+- """
+- Test multi key operation with more than one slot
+- """
+- pipe = r.pipeline(transaction=False)
+- pipe.set("a{foo}", 1)
+- pipe.set("b{foo}", 2)
+- pipe.set("c{foo}", 3)
+- pipe.set("bar", 4)
+- pipe.set("bazz", 5)
+- pipe.get("a{foo}")
+- pipe.get("b{foo}")
+- pipe.get("c{foo}")
+- pipe.get("bar")
+- pipe.get("bazz")
+- res = pipe.execute()
+- assert res == [True, True, True, True, True, b"1", b"2", b"3", b"4", b"5"]
+-
+- def test_connection_error_not_raised(self, r):
+- """
+- Test that the pipeline doesn't raise an error on connection error when
+- raise_on_error=False
+- """
+- key = "foo"
+- node = r.get_node_from_key(key, False)
+-
+- def raise_connection_error():
+- e = ConnectionError("error")
+- return e
+-
+- with r.pipeline() as pipe:
+- mock_node_resp_func(node, raise_connection_error)
+- res = pipe.get(key).get(key).execute(raise_on_error=False)
+- assert node.redis_connection.connection.read_response.called
+- assert isinstance(res[0], ConnectionError)
+-
+- def test_connection_error_raised(self, r):
+- """
+- Test that the pipeline raises an error on connection error when
+- raise_on_error=True
+- """
+- key = "foo"
+- node = r.get_node_from_key(key, False)
+-
+- def raise_connection_error():
+- e = ConnectionError("error")
+- return e
+-
+- with r.pipeline() as pipe:
+- mock_node_resp_func(node, raise_connection_error)
+- with pytest.raises(ConnectionError):
+- pipe.get(key).get(key).execute(raise_on_error=True)
+-
+- def test_asking_error(self, r):
+- """
+- Test redirection on ASK error
+- """
+- key = "foo"
+- first_node = r.get_node_from_key(key, False)
+- ask_node = None
+- for node in r.get_nodes():
+- if node != first_node:
+- ask_node = node
+- break
+- if ask_node is None:
+- warnings.warn("skipping this test since the cluster has only one " "node")
+- return
+- ask_msg = f"{r.keyslot(key)} {ask_node.host}:{ask_node.port}"
+-
+- def raise_ask_error():
+- raise AskError(ask_msg)
+-
+- with r.pipeline() as pipe:
+- mock_node_resp_func(first_node, raise_ask_error)
+- mock_node_resp(ask_node, "MOCK_OK")
+- res = pipe.get(key).execute()
+- assert first_node.redis_connection.connection.read_response.called
+- assert ask_node.redis_connection.connection.read_response.called
+- assert res == ["MOCK_OK"]
+-
+- def test_empty_stack(self, r):
+- """
+- If pipeline is executed with no commands it should
+- return a empty list.
+- """
+- p = r.pipeline()
+- result = p.execute()
+- assert result == []
+-
+-
+-@pytest.mark.onlycluster
+-class TestReadOnlyPipeline:
+- """
+- Tests for ClusterPipeline class in readonly mode
+- """
+-
+- def test_pipeline_readonly(self, r):
+- """
+- On readonly mode, we supports get related stuff only.
+- """
+- r.readonly(target_nodes="all")
+- r.set("foo71", "a1") # we assume this key is set on 127.0.0.1:7001
+- r.zadd("foo88", {"z1": 1}) # we assume this key is set on 127.0.0.1:7002
+- r.zadd("foo88", {"z2": 4})
+-
+- with r.pipeline() as readonly_pipe:
+- readonly_pipe.get("foo71").zrange("foo88", 0, 5, withscores=True)
+- assert readonly_pipe.execute() == [
+- b"a1",
+- [(b"z1", 1.0), (b"z2", 4)],
+- ]
+-
+- def test_moved_redirection_on_slave_with_default(self, r):
+- """
+- On Pipeline, we redirected once and finally get from master with
+- readonly client when data is completely moved.
+- """
+- key = "bar"
+- r.set(key, "foo")
+- # set read_from_replicas to True
+- r.read_from_replicas = True
+- primary = r.get_node_from_key(key, False)
+- replica = r.get_node_from_key(key, True)
+- with r.pipeline() as readwrite_pipe:
+- mock_node_resp(primary, "MOCK_FOO")
+- if replica is not None:
+- moved_error = f"{r.keyslot(key)} {primary.host}:{primary.port}"
+-
+- def raise_moved_error():
+- raise MovedError(moved_error)
+-
+- mock_node_resp_func(replica, raise_moved_error)
+- assert readwrite_pipe.reinitialize_counter == 0
+- readwrite_pipe.get(key).get(key)
+- assert readwrite_pipe.execute() == ["MOCK_FOO", "MOCK_FOO"]
+- if replica is not None:
+- # the slot has a replica as well, so MovedError should have
+- # occurred. If MovedError occurs, we should see the
+- # reinitialize_counter increase.
+- assert readwrite_pipe.reinitialize_counter == 1
+- conn = replica.redis_connection.connection
+- assert conn.read_response.called is True
+-
+- def test_readonly_pipeline_from_readonly_client(self, request):
+- """
+- Test that the pipeline is initialized with readonly mode if the client
+- has it enabled
+- """
+- # Create a cluster with reading from replications
+- ro = _get_client(RedisCluster, request, read_from_replicas=True)
+- key = "bar"
+- ro.set(key, "foo")
+- import time
+-
+- time.sleep(0.2)
+- with ro.pipeline() as readonly_pipe:
+- mock_all_nodes_resp(ro, "MOCK_OK")
+- assert readonly_pipe.read_from_replicas is True
+- assert readonly_pipe.get(key).get(key).execute() == ["MOCK_OK", "MOCK_OK"]
+- slot_nodes = ro.nodes_manager.slots_cache[ro.keyslot(key)]
+- if len(slot_nodes) > 1:
+- executed_on_replica = False
+- for node in slot_nodes:
+- if node.server_type == REPLICA:
+- conn = node.redis_connection.connection
+- executed_on_replica = conn.read_response.called
+- if executed_on_replica:
+- break
+- assert executed_on_replica is True
+-
+-
+-@pytest.mark.onlycluster
+-class TestClusterMonitor:
+- def test_wait_command_not_found(self, r):
+- "Make sure the wait_for_command func works when command is not found"
+- key = "foo"
+- node = r.get_node_from_key(key)
+- with r.monitor(target_node=node) as m:
+- response = wait_for_command(r, m, "nothing", key=key)
+- assert response is None
+-
+- def test_response_values(self, r):
+- db = 0
+- key = "foo"
+- node = r.get_node_from_key(key)
+- with r.monitor(target_node=node) as m:
+- r.ping(target_nodes=node)
+- response = wait_for_command(r, m, "PING", key=key)
+- assert isinstance(response["time"], float)
+- assert response["db"] == db
+- assert response["client_type"] in ("tcp", "unix")
+- assert isinstance(response["client_address"], str)
+- assert isinstance(response["client_port"], str)
+- assert response["command"] == "PING"
+-
+- def test_command_with_quoted_key(self, r):
+- key = "{foo}1"
+- node = r.get_node_from_key(key)
+- with r.monitor(node) as m:
+- r.get('{foo}"bar')
+- response = wait_for_command(r, m, 'GET {foo}"bar', key=key)
+- assert response["command"] == 'GET {foo}"bar'
+-
+- def test_command_with_binary_data(self, r):
+- key = "{foo}1"
+- node = r.get_node_from_key(key)
+- with r.monitor(target_node=node) as m:
+- byte_string = b"{foo}bar\x92"
+- r.get(byte_string)
+- response = wait_for_command(r, m, "GET {foo}bar\\x92", key=key)
+- assert response["command"] == "GET {foo}bar\\x92"
+-
+- def test_command_with_escaped_data(self, r):
+- key = "{foo}1"
+- node = r.get_node_from_key(key)
+- with r.monitor(target_node=node) as m:
+- byte_string = b"{foo}bar\\x92"
+- r.get(byte_string)
+- response = wait_for_command(r, m, "GET {foo}bar\\\\x92", key=key)
+- assert response["command"] == "GET {foo}bar\\\\x92"
+diff --git a/tests/test_commands.py b/tests/test_commands.py
+index b28b63e..744697f 100644
+--- a/tests/test_commands.py
++++ b/tests/test_commands.py
+@@ -4193,18 +4193,6 @@ class TestRedisCommands:
+ assert r.replicaof("NO ONE")
+ assert r.replicaof("NO", "ONE")
+
+- @skip_if_server_version_lt("2.8.0")
+- def test_sync(self, r):
+- r2 = redis.Redis(port=6380, decode_responses=False)
+- res = r2.sync()
+- assert b"REDIS" in res
+-
+- @skip_if_server_version_lt("2.8.0")
+- def test_psync(self, r):
+- r2 = redis.Redis(port=6380, decode_responses=False)
+- res = r2.psync(r2.client_id(), 1)
+- assert b"FULLRESYNC" in res
+-
+
+ @pytest.mark.onlynoncluster
+ class TestBinarySave:
diff --git a/tests/test_connection.py b/tests/test_connection.py
-index 7c44768..0a8e9ad 100644
+index d94a815..7da8789 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
-@@ -15,24 +15,3 @@ def test_invalid_response(r):
- with pytest.raises(InvalidResponse) as cm:
- parser.read_response()
- assert str(cm.value) == 'Protocol Error: %r' % raw
--
--
--@skip_if_server_version_lt('4.0.0')
+@@ -21,28 +21,6 @@ def test_invalid_response(r):
+ assert str(cm.value) == f"Protocol Error: {raw!r}"
+
+
+-@skip_if_server_version_lt("4.0.0")
-@pytest.mark.redismod
-def test_loading_external_modules(modclient):
- def inner():
- pass
-
-- modclient.load_external_module('myfuncname', inner)
-- assert getattr(modclient, 'myfuncname') == inner
-- assert isinstance(getattr(modclient, 'myfuncname'), types.FunctionType)
+- modclient.load_external_module("myfuncname", inner)
+- assert getattr(modclient, "myfuncname") == inner
+- assert isinstance(getattr(modclient, "myfuncname"), types.FunctionType)
-
- # and call it
- from redis.commands import RedisModuleCommands
+-
- j = RedisModuleCommands.json
-- modclient.load_external_module('sometestfuncname', j)
+- modclient.load_external_module("sometestfuncname", j)
-
- # d = {'hello': 'world!'}
- # mod = j(modclient)
- # mod.set("fookey", ".", d)
- # assert mod.get('fookey') == d
+-
+-
+ class TestConnection:
+ def test_disconnect(self):
+ conn = Connection()
+diff --git a/tests/test_graph.py b/tests/test_graph.py
+deleted file mode 100644
+index c6dc9a4..0000000
+--- a/tests/test_graph.py
++++ /dev/null
+@@ -1,477 +0,0 @@
+-import pytest
+-
+-from redis.commands.graph import Edge, Node, Path
+-from redis.exceptions import ResponseError
+-
+-
+-@pytest.fixture
+-def client(modclient):
+- modclient.flushdb()
+- return modclient
+-
+-
+-@pytest.mark.redismod
+-def test_bulk(client):
+- with pytest.raises(NotImplementedError):
+- client.graph().bulk()
+- client.graph().bulk(foo="bar!")
+-
+-
+-@pytest.mark.redismod
+-def test_graph_creation(client):
+- graph = client.graph()
+-
+- john = Node(
+- label="person",
+- properties={
+- "name": "John Doe",
+- "age": 33,
+- "gender": "male",
+- "status": "single",
+- },
+- )
+- graph.add_node(john)
+- japan = Node(label="country", properties={"name": "Japan"})
+-
+- graph.add_node(japan)
+- edge = Edge(john, "visited", japan, properties={"purpose": "pleasure"})
+- graph.add_edge(edge)
+-
+- graph.commit()
+-
+- query = (
+- 'MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) '
+- "RETURN p, v, c"
+- )
+-
+- result = graph.query(query)
+-
+- person = result.result_set[0][0]
+- visit = result.result_set[0][1]
+- country = result.result_set[0][2]
+-
+- assert person == john
+- assert visit.properties == edge.properties
+- assert country == japan
+-
+- query = """RETURN [1, 2.3, "4", true, false, null]"""
+- result = graph.query(query)
+- assert [1, 2.3, "4", True, False, None] == result.result_set[0][0]
+-
+- # All done, remove graph.
+- graph.delete()
+-
+-
+-@pytest.mark.redismod
+-def test_array_functions(client):
+- query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})"""
+- client.graph().query(query)
+-
+- query = """WITH [0,1,2] as x return x"""
+- result = client.graph().query(query)
+- assert [0, 1, 2] == result.result_set[0][0]
+-
+- query = """MATCH(n) return collect(n)"""
+- result = client.graph().query(query)
+-
+- a = Node(
+- node_id=0,
+- label="person",
+- properties={"name": "a", "age": 32, "array": [0, 1, 2]},
+- )
+-
+- assert [a] == result.result_set[0][0]
+-
+-
+-@pytest.mark.redismod
+-def test_path(client):
+- node0 = Node(node_id=0, label="L1")
+- node1 = Node(node_id=1, label="L1")
+- edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1})
+-
+- graph = client.graph()
+- graph.add_node(node0)
+- graph.add_node(node1)
+- graph.add_edge(edge01)
+- graph.flush()
+-
+- path01 = Path.new_empty_path().add_node(node0).add_edge(edge01).add_node(node1)
+- expected_results = [[path01]]
+-
+- query = "MATCH p=(:L1)-[:R1]->(:L1) RETURN p ORDER BY p"
+- result = graph.query(query)
+- assert expected_results == result.result_set
+-
+-
+-@pytest.mark.redismod
+-def test_param(client):
+- params = [1, 2.3, "str", True, False, None, [0, 1, 2]]
+- query = "RETURN $param"
+- for param in params:
+- result = client.graph().query(query, {"param": param})
+- expected_results = [[param]]
+- assert expected_results == result.result_set
+-
+-
+-@pytest.mark.redismod
+-def test_map(client):
+- query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}"
+-
+- actual = client.graph().query(query).result_set[0][0]
+- expected = {
+- "a": 1,
+- "b": "str",
+- "c": None,
+- "d": [1, 2, 3],
+- "e": True,
+- "f": {"x": 1, "y": 2},
+- }
+-
+- assert actual == expected
+-
+-
+-@pytest.mark.redismod
+-def test_point(client):
+- query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})"
+- expected_lat = 32.070794860
+- expected_lon = 34.820751118
+- actual = client.graph().query(query).result_set[0][0]
+- assert abs(actual["latitude"] - expected_lat) < 0.001
+- assert abs(actual["longitude"] - expected_lon) < 0.001
+-
+- query = "RETURN point({latitude: 32, longitude: 34.0})"
+- expected_lat = 32
+- expected_lon = 34
+- actual = client.graph().query(query).result_set[0][0]
+- assert abs(actual["latitude"] - expected_lat) < 0.001
+- assert abs(actual["longitude"] - expected_lon) < 0.001
+-
+-
+-@pytest.mark.redismod
+-def test_index_response(client):
+- result_set = client.graph().query("CREATE INDEX ON :person(age)")
+- assert 1 == result_set.indices_created
+-
+- result_set = client.graph().query("CREATE INDEX ON :person(age)")
+- assert 0 == result_set.indices_created
+-
+- result_set = client.graph().query("DROP INDEX ON :person(age)")
+- assert 1 == result_set.indices_deleted
+-
+- with pytest.raises(ResponseError):
+- client.graph().query("DROP INDEX ON :person(age)")
+-
+-
+-@pytest.mark.redismod
+-def test_stringify_query_result(client):
+- graph = client.graph()
+-
+- john = Node(
+- alias="a",
+- label="person",
+- properties={
+- "name": "John Doe",
+- "age": 33,
+- "gender": "male",
+- "status": "single",
+- },
+- )
+- graph.add_node(john)
+-
+- japan = Node(alias="b", label="country", properties={"name": "Japan"})
+- graph.add_node(japan)
+-
+- edge = Edge(john, "visited", japan, properties={"purpose": "pleasure"})
+- graph.add_edge(edge)
+-
+- assert (
+- str(john)
+- == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" # noqa
+- )
+- assert (
+- str(edge)
+- == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})""" # noqa
+- + """-[:visited{purpose:"pleasure"}]->"""
+- + """(b:country{name:"Japan"})"""
+- )
+- assert str(japan) == """(b:country{name:"Japan"})"""
+-
+- graph.commit()
+-
+- query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country)
+- RETURN p, v, c"""
+-
+- result = client.graph().query(query)
+- person = result.result_set[0][0]
+- visit = result.result_set[0][1]
+- country = result.result_set[0][2]
+-
+- assert (
+- str(person)
+- == """(:person{age:33,gender:"male",name:"John Doe",status:"single"})""" # noqa
+- )
+- assert str(visit) == """()-[:visited{purpose:"pleasure"}]->()"""
+- assert str(country) == """(:country{name:"Japan"})"""
+-
+- graph.delete()
+-
+-
+-@pytest.mark.redismod
+-def test_optional_match(client):
+- # Build a graph of form (a)-[R]->(b)
+- node0 = Node(node_id=0, label="L1", properties={"value": "a"})
+- node1 = Node(node_id=1, label="L1", properties={"value": "b"})
+-
+- edge01 = Edge(node0, "R", node1, edge_id=0)
+-
+- graph = client.graph()
+- graph.add_node(node0)
+- graph.add_node(node1)
+- graph.add_edge(edge01)
+- graph.flush()
+-
+- # Issue a query that collects all outgoing edges from both nodes
+- # (the second has none)
+- query = """MATCH (a) OPTIONAL MATCH (a)-[e]->(b) RETURN a, e, b ORDER BY a.value""" # noqa
+- expected_results = [[node0, edge01, node1], [node1, None, None]]
+-
+- result = client.graph().query(query)
+- assert expected_results == result.result_set
+-
+- graph.delete()
+-
+-
+-@pytest.mark.redismod
+-def test_cached_execution(client):
+- client.graph().query("CREATE ()")
+-
+- uncached_result = client.graph().query("MATCH (n) RETURN n, $param", {"param": [0]})
+- assert uncached_result.cached_execution is False
+-
+- # loop to make sure the query is cached on each thread on server
+- for x in range(0, 64):
+- cached_result = client.graph().query(
+- "MATCH (n) RETURN n, $param", {"param": [0]}
+- )
+- assert uncached_result.result_set == cached_result.result_set
+-
+- # should be cached on all threads by now
+- assert cached_result.cached_execution
+-
+-
+-@pytest.mark.redismod
+-def test_explain(client):
+- create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+- (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
+- (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
+- client.graph().query(create_query)
+-
+- result = client.graph().explain(
+- "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params", # noqa
+- {"name": "Yehuda"},
+- )
+- expected = "Results\n Project\n Conditional Traverse | (t:Team)->(r:Rider)\n Filter\n Node By Label Scan | (t:Team)" # noqa
+- assert result == expected
+-
+-
+-@pytest.mark.redismod
+-def test_slowlog(client):
+- create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+- (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
+- (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
+- client.graph().query(create_query)
+-
+- results = client.graph().slowlog()
+- assert results[0][1] == "GRAPH.QUERY"
+- assert results[0][2] == create_query
+-
+-
+-@pytest.mark.redismod
+-def test_query_timeout(client):
+- # Build a sample graph with 1000 nodes.
+- client.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
+- # Issue a long-running query with a 1-millisecond timeout.
+- with pytest.raises(ResponseError):
+- client.graph().query("MATCH (a), (b), (c), (d) RETURN *", timeout=1)
+- assert False is False
+-
+- with pytest.raises(Exception):
+- client.graph().query("RETURN 1", timeout="str")
+- assert False is False
+-
+-
+-@pytest.mark.redismod
+-def test_read_only_query(client):
+- with pytest.raises(Exception):
+- # Issue a write query, specifying read-only true,
+- # this call should fail.
+- client.graph().query("CREATE (p:person {name:'a'})", read_only=True)
+- assert False is False
+-
+-
+-@pytest.mark.redismod
+-def test_profile(client):
+- q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
+- profile = client.graph().profile(q).result_set
+- assert "Create | Records produced: 3" in profile
+- assert "Unwind | Records produced: 3" in profile
+-
+- q = "MATCH (p:Person) WHERE p.v > 1 RETURN p"
+- profile = client.graph().profile(q).result_set
+- assert "Results | Records produced: 2" in profile
+- assert "Project | Records produced: 2" in profile
+- assert "Filter | Records produced: 2" in profile
+- assert "Node By Label Scan | (p:Person) | Records produced: 3" in profile
+-
+-
+-@pytest.mark.redismod
+-def test_config(client):
+- config_name = "RESULTSET_SIZE"
+- config_value = 3
+-
+- # Set configuration
+- response = client.graph().config(config_name, config_value, set=True)
+- assert response == "OK"
+-
+- # Make sure config been updated.
+- response = client.graph().config(config_name, set=False)
+- expected_response = [config_name, config_value]
+- assert response == expected_response
+-
+- config_name = "QUERY_MEM_CAPACITY"
+- config_value = 1 << 20 # 1MB
+-
+- # Set configuration
+- response = client.graph().config(config_name, config_value, set=True)
+- assert response == "OK"
+-
+- # Make sure config been updated.
+- response = client.graph().config(config_name, set=False)
+- expected_response = [config_name, config_value]
+- assert response == expected_response
+-
+- # reset to default
+- client.graph().config("QUERY_MEM_CAPACITY", 0, set=True)
+- client.graph().config("RESULTSET_SIZE", -100, set=True)
+-
+-
+-@pytest.mark.redismod
+-def test_list_keys(client):
+- result = client.graph().list_keys()
+- assert result == []
+-
+- client.execute_command("GRAPH.EXPLAIN", "G", "RETURN 1")
+- result = client.graph().list_keys()
+- assert result == ["G"]
+-
+- client.execute_command("GRAPH.EXPLAIN", "X", "RETURN 1")
+- result = client.graph().list_keys()
+- assert result == ["G", "X"]
+-
+- client.delete("G")
+- client.rename("X", "Z")
+- result = client.graph().list_keys()
+- assert result == ["Z"]
+-
+- client.delete("Z")
+- result = client.graph().list_keys()
+- assert result == []
+-
+-
+-@pytest.mark.redismod
+-def test_multi_label(client):
+- redis_graph = client.graph("g")
+-
+- node = Node(label=["l", "ll"])
+- redis_graph.add_node(node)
+- redis_graph.commit()
+-
+- query = "MATCH (n) RETURN n"
+- result = redis_graph.query(query)
+- result_node = result.result_set[0][0]
+- assert result_node == node
+-
+- try:
+- Node(label=1)
+- assert False
+- except AssertionError:
+- assert True
+-
+- try:
+- Node(label=["l", 1])
+- assert False
+- except AssertionError:
+- assert True
+-
+-
+-@pytest.mark.redismod
+-def test_cache_sync(client):
+- pass
+- return
+- # This test verifies that client internal graph schema cache stays
+- # in sync with the graph schema
+- #
+- # Client B will try to get Client A out of sync by:
+- # 1. deleting the graph
+- # 2. reconstructing the graph in a different order, this will casuse
+- # a differance in the current mapping between string IDs and the
+- # mapping Client A is aware of
+- #
+- # Client A should pick up on the changes by comparing graph versions
+- # and resyncing its cache.
+-
+- A = client.graph("cache-sync")
+- B = client.graph("cache-sync")
+-
+- # Build order:
+- # 1. introduce label 'L' and 'K'
+- # 2. introduce attribute 'x' and 'q'
+- # 3. introduce relationship-type 'R' and 'S'
+-
+- A.query("CREATE (:L)")
+- B.query("CREATE (:K)")
+- A.query("MATCH (n) SET n.x = 1")
+- B.query("MATCH (n) SET n.q = 1")
+- A.query("MATCH (n) CREATE (n)-[:R]->()")
+- B.query("MATCH (n) CREATE (n)-[:S]->()")
+-
+- # Cause client A to populate its cache
+- A.query("MATCH (n)-[e]->() RETURN n, e")
+-
+- assert len(A._labels) == 2
+- assert len(A._properties) == 2
+- assert len(A._relationshipTypes) == 2
+- assert A._labels[0] == "L"
+- assert A._labels[1] == "K"
+- assert A._properties[0] == "x"
+- assert A._properties[1] == "q"
+- assert A._relationshipTypes[0] == "R"
+- assert A._relationshipTypes[1] == "S"
+-
+- # Have client B reconstruct the graph in a different order.
+- B.delete()
+-
+- # Build order:
+- # 1. introduce relationship-type 'R'
+- # 2. introduce label 'L'
+- # 3. introduce attribute 'x'
+- B.query("CREATE ()-[:S]->()")
+- B.query("CREATE ()-[:R]->()")
+- B.query("CREATE (:K)")
+- B.query("CREATE (:L)")
+- B.query("MATCH (n) SET n.q = 1")
+- B.query("MATCH (n) SET n.x = 1")
+-
+- # A's internal cached mapping is now out of sync
+- # issue a query and make sure A's cache is synced.
+- A.query("MATCH (n)-[e]->() RETURN n, e")
+-
+- assert len(A._labels) == 2
+- assert len(A._properties) == 2
+- assert len(A._relationshipTypes) == 2
+- assert A._labels[0] == "K"
+- assert A._labels[1] == "L"
+- assert A._properties[0] == "q"
+- assert A._properties[1] == "x"
+- assert A._relationshipTypes[0] == "S"
+- assert A._relationshipTypes[1] == "R"
+diff --git a/tests/test_graph_utils/__init__.py b/tests/test_graph_utils/__init__.py
+deleted file mode 100644
+index e69de29..0000000
+diff --git a/tests/test_graph_utils/test_edge.py b/tests/test_graph_utils/test_edge.py
+deleted file mode 100644
+index 42358de..0000000
+--- a/tests/test_graph_utils/test_edge.py
++++ /dev/null
+@@ -1,77 +0,0 @@
+-import pytest
+-
+-from redis.commands.graph import edge, node
+-
+-
+-@pytest.mark.redismod
+-def test_init():
+-
+- with pytest.raises(AssertionError):
+- edge.Edge(None, None, None)
+- edge.Edge(node.Node(), None, None)
+- edge.Edge(None, None, node.Node())
+-
+- assert isinstance(
+- edge.Edge(node.Node(node_id=1), None, node.Node(node_id=2)), edge.Edge
+- )
+-
+-
+-@pytest.mark.redismod
+-def test_toString():
+- props_result = edge.Edge(
+- node.Node(), None, node.Node(), properties={"a": "a", "b": 10}
+- ).toString()
+- assert props_result == '{a:"a",b:10}'
+-
+- no_props_result = edge.Edge(
+- node.Node(), None, node.Node(), properties={}
+- ).toString()
+- assert no_props_result == ""
+-
+-
+-@pytest.mark.redismod
+-def test_stringify():
+- john = node.Node(
+- alias="a",
+- label="person",
+- properties={"name": "John Doe", "age": 33, "someArray": [1, 2, 3]},
+- )
+- japan = node.Node(alias="b", label="country", properties={"name": "Japan"})
+- edge_with_relation = edge.Edge(
+- john, "visited", japan, properties={"purpose": "pleasure"}
+- )
+- assert (
+- '(a:person{age:33,name:"John Doe",someArray:[1, 2, 3]})'
+- '-[:visited{purpose:"pleasure"}]->'
+- '(b:country{name:"Japan"})' == str(edge_with_relation)
+- )
+-
+- edge_no_relation_no_props = edge.Edge(japan, "", john)
+- assert (
+- '(b:country{name:"Japan"})'
+- "-[]->"
+- '(a:person{age:33,name:"John Doe",someArray:[1, 2, 3]})'
+- == str(edge_no_relation_no_props)
+- )
+-
+- edge_only_props = edge.Edge(john, "", japan, properties={"a": "b", "c": 3})
+- assert (
+- '(a:person{age:33,name:"John Doe",someArray:[1, 2, 3]})'
+- '-[{a:"b",c:3}]->'
+- '(b:country{name:"Japan"})' == str(edge_only_props)
+- )
+-
+-
+-@pytest.mark.redismod
+-def test_comparision():
+- node1 = node.Node(node_id=1)
+- node2 = node.Node(node_id=2)
+- node3 = node.Node(node_id=3)
+-
+- edge1 = edge.Edge(node1, None, node2)
+- assert edge1 == edge.Edge(node1, None, node2)
+- assert edge1 != edge.Edge(node1, "bla", node2)
+- assert edge1 != edge.Edge(node1, None, node3)
+- assert edge1 != edge.Edge(node3, None, node2)
+- assert edge1 != edge.Edge(node2, None, node1)
+- assert edge1 != edge.Edge(node1, None, node2, properties={"a": 10})
+diff --git a/tests/test_graph_utils/test_node.py b/tests/test_graph_utils/test_node.py
+deleted file mode 100644
+index faf8ab6..0000000
+--- a/tests/test_graph_utils/test_node.py
++++ /dev/null
+@@ -1,52 +0,0 @@
+-import pytest
+-
+-from redis.commands.graph import node
+-
+-
+-@pytest.fixture
+-def fixture():
+- no_args = node.Node()
+- no_props = node.Node(node_id=1, alias="alias", label="l")
+- props_only = node.Node(properties={"a": "a", "b": 10})
+- no_label = node.Node(node_id=1, alias="alias", properties={"a": "a"})
+- multi_label = node.Node(node_id=1, alias="alias", label=["l", "ll"])
+- return no_args, no_props, props_only, no_label, multi_label
+-
+-
+-@pytest.mark.redismod
+-def test_toString(fixture):
+- no_args, no_props, props_only, no_label, multi_label = fixture
+- assert no_args.toString() == ""
+- assert no_props.toString() == ""
+- assert props_only.toString() == '{a:"a",b:10}'
+- assert no_label.toString() == '{a:"a"}'
+- assert multi_label.toString() == ""
+-
+-
+-@pytest.mark.redismod
+-def test_stringify(fixture):
+- no_args, no_props, props_only, no_label, multi_label = fixture
+- assert str(no_args) == "()"
+- assert str(no_props) == "(alias:l)"
+- assert str(props_only) == '({a:"a",b:10})'
+- assert str(no_label) == '(alias{a:"a"})'
+- assert str(multi_label) == "(alias:l:ll)"
+-
+-
+-@pytest.mark.redismod
+-def test_comparision(fixture):
+- no_args, no_props, props_only, no_label, multi_label = fixture
+-
+- assert node.Node() == node.Node()
+- assert node.Node(node_id=1) == node.Node(node_id=1)
+- assert node.Node(node_id=1) != node.Node(node_id=2)
+- assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="b")
+- assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="a")
+- assert node.Node(node_id=1, label="a") == node.Node(node_id=1, label="a")
+- assert node.Node(node_id=1, label="a") != node.Node(node_id=1, label="b")
+- assert node.Node(node_id=1, alias="a", label="l") == node.Node(
+- node_id=1, alias="a", label="l"
+- )
+- assert node.Node(alias="a", label="l") != node.Node(alias="a", label="l1")
+- assert node.Node(properties={"a": 10}) == node.Node(properties={"a": 10})
+- assert node.Node() != node.Node(properties={"a": 10})
+diff --git a/tests/test_graph_utils/test_path.py b/tests/test_graph_utils/test_path.py
+deleted file mode 100644
+index d581269..0000000
+--- a/tests/test_graph_utils/test_path.py
++++ /dev/null
+@@ -1,91 +0,0 @@
+-import pytest
+-
+-from redis.commands.graph import edge, node, path
+-
+-
+-@pytest.mark.redismod
+-def test_init():
+- with pytest.raises(TypeError):
+- path.Path(None, None)
+- path.Path([], None)
+- path.Path(None, [])
+-
+- assert isinstance(path.Path([], []), path.Path)
+-
+-
+-@pytest.mark.redismod
+-def test_new_empty_path():
+- new_empty_path = path.Path.new_empty_path()
+- assert isinstance(new_empty_path, path.Path)
+- assert new_empty_path._nodes == []
+- assert new_empty_path._edges == []
+-
+-
+-@pytest.mark.redismod
+-def test_wrong_flows():
+- node_1 = node.Node(node_id=1)
+- node_2 = node.Node(node_id=2)
+- node_3 = node.Node(node_id=3)
+-
+- edge_1 = edge.Edge(node_1, None, node_2)
+- edge_2 = edge.Edge(node_1, None, node_3)
+-
+- p = path.Path.new_empty_path()
+- with pytest.raises(AssertionError):
+- p.add_edge(edge_1)
+-
+- p.add_node(node_1)
+- with pytest.raises(AssertionError):
+- p.add_node(node_2)
+-
+- p.add_edge(edge_1)
+- with pytest.raises(AssertionError):
+- p.add_edge(edge_2)
+-
+-
+-@pytest.mark.redismod
+-def test_nodes_and_edges():
+- node_1 = node.Node(node_id=1)
+- node_2 = node.Node(node_id=2)
+- edge_1 = edge.Edge(node_1, None, node_2)
+-
+- p = path.Path.new_empty_path()
+- assert p.nodes() == []
+- p.add_node(node_1)
+- assert [] == p.edges()
+- assert 0 == p.edge_count()
+- assert [node_1] == p.nodes()
+- assert node_1 == p.get_node(0)
+- assert node_1 == p.first_node()
+- assert node_1 == p.last_node()
+- assert 1 == p.nodes_count()
+- p.add_edge(edge_1)
+- assert [edge_1] == p.edges()
+- assert 1 == p.edge_count()
+- assert edge_1 == p.get_relationship(0)
+- p.add_node(node_2)
+- assert [node_1, node_2] == p.nodes()
+- assert node_1 == p.first_node()
+- assert node_2 == p.last_node()
+- assert 2 == p.nodes_count()
+-
+-
+-@pytest.mark.redismod
+-def test_compare():
+- node_1 = node.Node(node_id=1)
+- node_2 = node.Node(node_id=2)
+- edge_1 = edge.Edge(node_1, None, node_2)
+-
+- assert path.Path.new_empty_path() == path.Path.new_empty_path()
+- assert path.Path(nodes=[node_1, node_2], edges=[edge_1]) == path.Path(
+- nodes=[node_1, node_2], edges=[edge_1]
+- )
+- assert path.Path(nodes=[node_1], edges=[]) != path.Path(nodes=[], edges=[])
+- assert path.Path(nodes=[node_1], edges=[]) != path.Path(nodes=[], edges=[])
+- assert path.Path(nodes=[node_1], edges=[]) != path.Path(nodes=[node_2], edges=[])
+- assert path.Path(nodes=[node_1], edges=[edge_1]) != path.Path(
+- nodes=[node_1], edges=[]
+- )
+- assert path.Path(nodes=[node_1], edges=[edge_1]) != path.Path(
+- nodes=[node_2], edges=[edge_1]
+- )
diff --git a/tests/test_json.py b/tests/test_json.py
deleted file mode 100644
-index abc5776..0000000
+index 6980e67..0000000
--- a/tests/test_json.py
+++ /dev/null
-@@ -1,1416 +0,0 @@
+@@ -1,1432 +0,0 @@
-import pytest
+-
-import redis
--from redis.commands.json.path import Path
-from redis import exceptions
--from redis.commands.json.decoders import unstring, decode_list
+-from redis.commands.json.decoders import decode_list, unstring
+-from redis.commands.json.path import Path
+-
-from .conftest import skip_ifmodversion_lt
-
-
@@ -4112,9 +13024,7 @@ index abc5776..0000000
-@pytest.mark.redismod
-def test_nonascii_setgetdelete(client):
- assert client.json().set("notascii", Path.rootPath(), "hyvää-élève")
-- assert "hyvää-élève" == client.json().get(
-- "notascii",
-- no_escape=True)
+- assert "hyvää-élève" == client.json().get("notascii", no_escape=True)
- assert 1 == client.json().delete("notascii")
- assert client.exists("notascii") == 0
-
@@ -4198,14 +13108,14 @@ index abc5776..0000000
- assert "foobar" == client.json().get("jsonkey", Path.rootPath())
-
-
--@pytest.mark.redismod
--def test_debug(client):
-- client.json().set("str", Path.rootPath(), "foo")
-- assert 24 == client.json().debug("MEMORY", "str", Path.rootPath())
-- assert 24 == client.json().debug("MEMORY", "str")
--
-- # technically help is valid
-- assert isinstance(client.json().debug("HELP"), list)
+-# @pytest.mark.redismod
+-# def test_debug(client):
+-# client.json().set("str", Path.rootPath(), "foo")
+-# assert 24 == client.json().debug("MEMORY", "str", Path.rootPath())
+-# assert 24 == client.json().debug("MEMORY", "str")
+-#
+-# # technically help is valid
+-# assert isinstance(client.json().debug("HELP"), list)
-
-
-@pytest.mark.redismod
@@ -4243,7 +13153,7 @@ index abc5776..0000000
- 1,
- 2,
- 3,
-- ]
+- ],
- )
- assert [0, 1, 2, 3, 4] == client.json().get("arr")
-
@@ -4339,7 +13249,6 @@ index abc5776..0000000
- assert len(obj) == client.json().objlen("obj")
-
-
--@pytest.mark.pipeline
-@pytest.mark.redismod
-def test_json_commands_in_pipeline(client):
- p = client.json().pipeline()
@@ -4354,8 +13263,9 @@ index abc5776..0000000
- client.flushdb()
- p = client.json().pipeline()
- d = {"hello": "world", "oh": "snap"}
-- p.jsonset("foo", Path.rootPath(), d)
-- p.jsonget("foo")
+- with pytest.deprecated_call():
+- p.jsonset("foo", Path.rootPath(), d)
+- p.jsonget("foo")
- p.exists("notarealkey")
- p.delete("foo")
- assert [True, d, 0, 1] == p.execute()
@@ -4371,8 +13281,7 @@ index abc5776..0000000
- r = client.json().get("doc1", "$")
- assert r == [{"nested": {"b": 3}}]
-
-- doc2 = {"a": {"a": 2, "b": 3}, "b": [
-- "a", "b"], "nested": {"b": [True, "a", "b"]}}
+- doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
- assert client.json().set("doc2", "$", doc2)
- assert client.json().delete("doc2", "$..a") == 1
- res = client.json().get("doc2", "$")
@@ -4425,8 +13334,7 @@ index abc5776..0000000
- r = client.json().get("doc1", "$")
- assert r == [{"nested": {"b": 3}}]
-
-- doc2 = {"a": {"a": 2, "b": 3}, "b": [
-- "a", "b"], "nested": {"b": [True, "a", "b"]}}
+- doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
- assert client.json().set("doc2", "$", doc2)
- assert client.json().forget("doc2", "$..a") == 1
- res = client.json().get("doc2", "$")
@@ -4477,16 +13385,12 @@ index abc5776..0000000
- client.json().set(
- "doc1",
- "$",
-- {"a": 1,
-- "b": 2,
-- "nested": {"a": 3},
-- "c": None, "nested2": {"a": None}},
+- {"a": 1, "b": 2, "nested": {"a": 3}, "c": None, "nested2": {"a": None}},
- )
- client.json().set(
- "doc2",
- "$",
-- {"a": 4, "b": 5, "nested": {"a": 6},
-- "c": None, "nested2": {"a": [None]}},
+- {"a": 4, "b": 5, "nested": {"a": 6}, "c": None, "nested2": {"a": [None]}},
- )
- # Compare also to single JSON.GET
- assert client.json().get("doc1", "$..a") == [1, 3, None]
@@ -4495,8 +13399,7 @@ index abc5776..0000000
- # Test mget with single path
- client.json().mget("doc1", "$..a") == [1, 3, None]
- # Test mget with multi path
-- client.json().mget(["doc1", "doc2"], "$..a") == [
-- [1, 3, None], [4, 6, [None]]]
+- client.json().mget(["doc1", "doc2"], "$..a") == [[1, 3, None], [4, 6, [None]]]
-
- # Test missing key
- client.json().mget(["doc1", "missing_doc"], "$..a") == [[1, 3, None], None]
@@ -4508,15 +13411,11 @@ index abc5776..0000000
-def test_numby_commands_dollar(client):
-
- # Test NUMINCRBY
-- client.json().set(
-- "doc1",
-- "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
+- client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- # Test multi
-- assert client.json().numincrby("doc1", "$..a", 2) == \
-- [None, 4, 7.0, None]
+- assert client.json().numincrby("doc1", "$..a", 2) == [None, 4, 7.0, None]
-
-- assert client.json().numincrby("doc1", "$..a", 2.5) == \
-- [None, 6.5, 9.5, None]
+- assert client.json().numincrby("doc1", "$..a", 2.5) == [None, 6.5, 9.5, None]
- # Test single
- assert client.json().numincrby("doc1", "$.b[1].a", 2) == [11.5]
-
@@ -4524,17 +13423,18 @@ index abc5776..0000000
- assert client.json().numincrby("doc1", "$.b[1].a", 3.5) == [15.0]
-
- # Test NUMMULTBY
-- client.json().set("doc1", "$", {"a": "b", "b": [
-- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
+- client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
+-
+- # test list
+- with pytest.deprecated_call():
+- assert client.json().nummultby("doc1", "$..a", 2) == [None, 4, 10, None]
+- assert client.json().nummultby("doc1", "$..a", 2.5) == [None, 10.0, 25.0, None]
-
-- assert client.json().nummultby("doc1", "$..a", 2) == \
-- [None, 4, 10, None]
-- assert client.json().nummultby("doc1", "$..a", 2.5) == \
-- [None, 10.0, 25.0, None]
- # Test single
-- assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
-- assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
-- assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
+- with pytest.deprecated_call():
+- assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
+- assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
+- assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
-
- # test missing keys
- with pytest.raises(exceptions.ResponseError):
@@ -4542,22 +13442,21 @@ index abc5776..0000000
- client.json().nummultby("non_existing_doc", "$..a", 2)
-
- # Test legacy NUMINCRBY
-- client.json().set("doc1", "$", {"a": "b", "b": [
-- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
+- client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- client.json().numincrby("doc1", ".b[0].a", 3) == 5
-
- # Test legacy NUMMULTBY
-- client.json().set("doc1", "$", {"a": "b", "b": [
-- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
-- client.json().nummultby("doc1", ".b[0].a", 3) == 6
+- client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
+-
+- with pytest.deprecated_call():
+- client.json().nummultby("doc1", ".b[0].a", 3) == 6
-
-
-@pytest.mark.redismod
-def test_strappend_dollar(client):
-
- client.json().set(
-- "doc1", "$", {"a": "foo", "nested1": {
-- "a": "hello"}, "nested2": {"a": 31}}
+- "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
- )
- # Test multi
- client.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
@@ -4592,8 +13491,7 @@ index abc5776..0000000
-
- # Test multi
- client.json().set(
-- "doc1", "$", {"a": "foo", "nested1": {
-- "a": "hello"}, "nested2": {"a": 31}}
+- "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
- )
- assert client.json().strlen("doc1", "$..a") == [3, 5, None]
-
@@ -4692,8 +13590,7 @@ index abc5776..0000000
- },
- )
- # Test multi
-- assert client.json().arrinsert("doc1", "$..a", "1",
-- "bar", "racuda") == [3, 5, None]
+- assert client.json().arrinsert("doc1", "$..a", "1", "bar", "racuda") == [3, 5, None]
-
- assert client.json().get("doc1", "$") == [
- {
@@ -4732,8 +13629,11 @@ index abc5776..0000000
-
- # Test multi
- assert client.json().arrlen("doc1", "$..a") == [1, 3, None]
-- assert client.json().arrappend("doc1", "$..a", "non", "abba", "stanza") \
-- == [4, 6, None]
+- assert client.json().arrappend("doc1", "$..a", "non", "abba", "stanza") == [
+- 4,
+- 6,
+- None,
+- ]
-
- client.json().clear("doc1", "$.a")
- assert client.json().arrlen("doc1", "$..a") == [0, 6, None]
@@ -4888,9 +13788,11 @@ index abc5776..0000000
- # Test missing key
- assert client.json().objkeys("non_existing_doc", "..a") is None
-
-- # Test missing key
+- # Test non existing doc
- with pytest.raises(exceptions.ResponseError):
-- client.json().objkeys("doc1", "$.nowhere")
+- assert client.json().objkeys("non_existing_doc", "$..a") == []
+-
+- assert client.json().objkeys("doc1", "$..nowhere") == []
-
-
-@pytest.mark.redismod
@@ -4909,12 +13811,11 @@ index abc5776..0000000
- # Test single
- assert client.json().objlen("doc1", "$.nested1.a") == [2]
-
-- # Test missing key
-- assert client.json().objlen("non_existing_doc", "$..a") is None
--
-- # Test missing path
+- # Test missing key, and path
- with pytest.raises(exceptions.ResponseError):
-- client.json().objlen("doc1", "$.nowhere")
+- client.json().objlen("non_existing_doc", "$..a")
+-
+- assert client.json().objlen("doc1", "$.nowhere") == []
-
- # Test legacy
- assert client.json().objlen("doc1", ".*.a") == 2
@@ -4926,8 +13827,8 @@ index abc5776..0000000
- assert client.json().objlen("non_existing_doc", "..a") is None
-
- # Test missing path
-- with pytest.raises(exceptions.ResponseError):
-- client.json().objlen("doc1", ".nowhere")
+- # with pytest.raises(exceptions.ResponseError):
+- client.json().objlen("doc1", ".nowhere")
-
-
-@pytest.mark.redismod
@@ -4981,8 +13882,7 @@ index abc5776..0000000
- assert client.json().clear("doc1", "$..a") == 3
-
- assert client.json().get("doc1", "$") == [
-- {"nested1": {"a": {}}, "a": [], "nested2": {
-- "a": "claro"}, "nested3": {"a": {}}}
+- {"nested1": {"a": {}}, "a": [], "nested2": {"a": "claro"}, "nested3": {"a": {}}}
- ]
-
- # Test single
@@ -5043,28 +13943,27 @@ index abc5776..0000000
- client.json().toggle("non_existing_doc", "$..a")
-
-
--@pytest.mark.redismod
--def test_debug_dollar(client):
--
-- jdata, jtypes = load_types_data("a")
--
-- client.json().set("doc1", "$", jdata)
--
-- # Test multi
-- assert client.json().debug("MEMORY", "doc1", "$..a") == [
-- 72, 24, 24, 16, 16, 1, 0]
--
-- # Test single
-- assert client.json().debug("MEMORY", "doc1", "$.nested2.a") == [24]
--
-- # Test legacy
-- assert client.json().debug("MEMORY", "doc1", "..a") == 72
--
-- # Test missing path (defaults to root)
-- assert client.json().debug("MEMORY", "doc1") == 72
--
-- # Test missing key
-- assert client.json().debug("MEMORY", "non_existing_doc", "$..a") == []
+-# @pytest.mark.redismod
+-# def test_debug_dollar(client):
+-#
+-# jdata, jtypes = load_types_data("a")
+-#
+-# client.json().set("doc1", "$", jdata)
+-#
+-# # Test multi
+-# assert client.json().debug("MEMORY", "doc1", "$..a") == [72, 24, 24, 16, 16, 1, 0]
+-#
+-# # Test single
+-# assert client.json().debug("MEMORY", "doc1", "$.nested2.a") == [24]
+-#
+-# # Test legacy
+-# assert client.json().debug("MEMORY", "doc1", "..a") == 72
+-#
+-# # Test missing path (defaults to root)
+-# assert client.json().debug("MEMORY", "doc1") == 72
+-#
+-# # Test missing key
+-# assert client.json().debug("MEMORY", "non_existing_doc", "$..a") == []
-
-
-@pytest.mark.redismod
@@ -5207,11 +14106,11 @@ index abc5776..0000000
- ]
-
- # Test missing path
-- with pytest.raises(exceptions.ResponseError):
-- client.json().resp("doc1", "$.nowhere")
+- client.json().resp("doc1", "$.nowhere")
-
- # Test missing key
-- assert client.json().resp("non_existing_doc", "$..a") is None
+- # with pytest.raises(exceptions.ResponseError):
+- client.json().resp("non_existing_doc", "$..a")
-
-
-@pytest.mark.redismod
@@ -5291,12 +14190,10 @@ index abc5776..0000000
- [],
- ]
-
-- assert client.json().arrindex("test_num", "$..arr", 3) == [
-- 3, 2, -1, None, -1]
+- assert client.json().arrindex("test_num", "$..arr", 3) == [3, 2, -1, None, -1]
-
- # Test index of double scalar in multi values
-- assert client.json().arrindex("test_num", "$..arr", 3.0) == [
-- 2, 8, -1, None, -1]
+- assert client.json().arrindex("test_num", "$..arr", 3.0) == [2, 8, -1, None, -1]
-
- # Test index of string scalar in multi values
- client.json().set(
@@ -5306,10 +14203,7 @@ index abc5776..0000000
- {"arr": ["bazzz", "bar", 2, "baz", 2, "ba", "baz", 3]},
- {
- "nested1_found": {
-- "arr": [
-- None,
-- "baz2",
-- "buzz", 2, 1, 0, 1, "2", "baz", 2, 4, 5]
+- "arr": [None, "baz2", "buzz", 2, 1, 0, 1, "2", "baz", 2, 4, 5]
- }
- },
- {"nested2_not_found": {"arr": ["baz2", 4, 6]}},
@@ -5401,11 +14295,7 @@ index abc5776..0000000
- {"arr": ["bazzz", "None", 2, None, 2, "ba", "baz", 3]},
- {
- "nested1_found": {
-- "arr": [
-- "zaz",
-- "baz2",
-- "buzz",
-- 2, 1, 0, 1, "2", None, 2, 4, 5]
+- "arr": ["zaz", "baz2", "buzz", 2, 1, 0, 1, "2", None, 2, 4, 5]
- }
- },
- {"nested2_not_found": {"arr": ["None", 4, 6]}},
@@ -5426,8 +14316,7 @@ index abc5776..0000000
-
- # Fail with none-scalar value
- with pytest.raises(exceptions.ResponseError):
-- client.json().arrindex(
-- "test_None", "$..nested42_empty_arr.arr", {"arr": []})
+- client.json().arrindex("test_None", "$..nested42_empty_arr.arr", {"arr": []})
-
- # Do not fail with none-scalar value in legacy mode
- assert (
@@ -5449,12 +14338,10 @@ index abc5776..0000000
- assert client.json().arrindex("test_string", ".[0].arr", "faz") == -1
- # Test index of None scalar in single value
- assert client.json().arrindex("test_None", ".[0].arr", "None") == 1
-- assert client.json().arrindex(
-- "test_None",
-- "..nested2_not_found.arr",
-- "None") == 0
+- assert client.json().arrindex("test_None", "..nested2_not_found.arr", "None") == 0
-
-
+-@pytest.mark.redismod
-def test_decoders_and_unstring():
- assert unstring("4") == 4
- assert unstring("45.55") == 45.55
@@ -5462,14 +14349,15 @@ index abc5776..0000000
-
- assert decode_list(b"45.55") == 45.55
- assert decode_list("45.55") == 45.55
-- assert decode_list(['hello', b'world']) == ['hello', 'world']
+- assert decode_list(["hello", b"world"]) == ["hello", "world"]
-
-
-@pytest.mark.redismod
-def test_custom_decoder(client):
-- import ujson
- import json
-
+- import ujson
+-
- cj = client.json(encoder=ujson, decoder=ujson)
- assert cj.set("foo", Path.rootPath(), "bar")
- assert "bar" == cj.get("foo")
@@ -5478,61 +14366,98 @@ index abc5776..0000000
- assert client.exists("foo") == 0
- assert not isinstance(cj.__encoder__, json.JSONEncoder)
- assert not isinstance(cj.__decoder__, json.JSONDecoder)
+-
+-
+-@pytest.mark.redismod
+-def test_set_file(client):
+- import json
+- import tempfile
+-
+- obj = {"hello": "world"}
+- jsonfile = tempfile.NamedTemporaryFile(suffix=".json")
+- with open(jsonfile.name, "w+") as fp:
+- fp.write(json.dumps(obj))
+-
+- nojsonfile = tempfile.NamedTemporaryFile()
+- nojsonfile.write(b"Hello World")
+-
+- assert client.json().set_file("test", Path.rootPath(), jsonfile.name)
+- assert client.json().get("test") == obj
+- with pytest.raises(json.JSONDecodeError):
+- client.json().set_file("test2", Path.rootPath(), nojsonfile.name)
+-
+-
+-@pytest.mark.redismod
+-def test_set_path(client):
+- import json
+- import tempfile
+-
+- root = tempfile.mkdtemp()
+- sub = tempfile.mkdtemp(dir=root)
+- jsonfile = tempfile.mktemp(suffix=".json", dir=sub)
+- nojsonfile = tempfile.mktemp(dir=root)
+-
+- with open(jsonfile, "w+") as fp:
+- fp.write(json.dumps({"hello": "world"}))
+- open(nojsonfile, "a+").write("hello")
+-
+- result = {jsonfile: True, nojsonfile: False}
+- assert client.json().set_path(Path.rootPath(), root) == result
+- assert client.json().get(jsonfile.rsplit(".")[0]) == {"hello": "world"}
+diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
+index 23af461..9b983c9 100644
+--- a/tests/test_pubsub.py
++++ b/tests/test_pubsub.py
+@@ -597,14 +597,3 @@ class TestPubSubWorkerThread:
+ pubsub_thread.join(timeout=1.0)
+ assert not pubsub_thread.is_alive()
+
+-
+-class TestPubSubDeadlock:
+- @pytest.mark.timeout(30, method="thread")
+- def test_pubsub_deadlock(self, master_host):
+- pool = redis.ConnectionPool(host=master_host[0], port=master_host[1])
+- r = redis.Redis(connection_pool=pool)
+-
+- for i in range(60):
+- p = r.pubsub()
+- p.subscribe("my-channel-1", "my-channel-2")
+- pool.reset()
diff --git a/tests/test_search.py b/tests/test_search.py
deleted file mode 100644
-index d1fc75f..0000000
+index 7d666cb..0000000
--- a/tests/test_search.py
+++ /dev/null
-@@ -1,1315 +0,0 @@
--import pytest
--import redis
+@@ -1,1457 +0,0 @@
-import bz2
-import csv
--import time
-import os
--
+-import time
-from io import TextIOWrapper
--from .conftest import skip_ifmodversion_lt, default_redismod_url
--from redis import Redis
-
+-import pytest
+-
+-import redis
-import redis.commands.search
+-import redis.commands.search.aggregation as aggregations
+-import redis.commands.search.reducers as reducers
+-from redis import Redis
-from redis.commands.json.path import Path
-from redis.commands.search import Search
--from redis.commands.search.field import (
-- GeoField,
-- NumericField,
-- TagField,
-- TextField
--)
--from redis.commands.search.query import (
-- GeoFilter,
-- NumericFilter,
-- Query
--)
--from redis.commands.search.result import Result
+-from redis.commands.search.field import GeoField, NumericField, TagField, TextField
-from redis.commands.search.indexDefinition import IndexDefinition, IndexType
+-from redis.commands.search.query import GeoFilter, NumericFilter, Query
+-from redis.commands.search.result import Result
-from redis.commands.search.suggestion import Suggestion
--import redis.commands.search.aggregation as aggregations
--import redis.commands.search.reducers as reducers
-
--WILL_PLAY_TEXT = (
-- os.path.abspath(
-- os.path.join(
-- os.path.dirname(__file__),
-- "testdata",
-- "will_play_text.csv.bz2"
-- )
-- )
+-from .conftest import default_redismod_url, skip_ifmodversion_lt
+-
+-WILL_PLAY_TEXT = os.path.abspath(
+- os.path.join(os.path.dirname(__file__), "testdata", "will_play_text.csv.bz2")
-)
-
--TITLES_CSV = (
-- os.path.abspath(
-- os.path.join(
-- os.path.dirname(__file__),
-- "testdata",
-- "titles.csv"
-- )
-- )
+-TITLES_CSV = os.path.abspath(
+- os.path.join(os.path.dirname(__file__), "testdata", "titles.csv")
-)
-
-
@@ -5567,9 +14492,7 @@ index d1fc75f..0000000
-def createIndex(client, num_docs=100, definition=None):
- try:
- client.create_index(
-- (TextField("play", weight=5.0),
-- TextField("txt"),
-- NumericField("chapter")),
+- (TextField("play", weight=5.0), TextField("txt"), NumericField("chapter")),
- definition=definition,
- )
- except redis.ResponseError:
@@ -5582,10 +14505,9 @@ index d1fc75f..0000000
- r = csv.reader(bzfp, delimiter=";")
- for n, line in enumerate(r):
-
-- play, chapter, _, text = \
-- line[1], line[2], line[4], line[5]
+- play, chapter, _, text = line[1], line[2], line[4], line[5]
-
-- key = "{}:{}".format(play, chapter).lower()
+- key = f"{play}:{chapter}".lower()
- d = chapters.setdefault(key, {})
- d["play"] = play
- d["txt"] = d.get("txt", "") + " " + text
@@ -5669,12 +14591,10 @@ index d1fc75f..0000000
-
- # test in fields
- txt_total = (
-- client.ft().search(
-- Query("henry").no_content().limit_fields("txt")).total
+- client.ft().search(Query("henry").no_content().limit_fields("txt")).total
- )
- play_total = (
-- client.ft().search(
-- Query("henry").no_content().limit_fields("play")).total
+- client.ft().search(Query("henry").no_content().limit_fields("play")).total
- )
- both_total = (
- client.ft()
@@ -5703,10 +14623,8 @@ index d1fc75f..0000000
-
- # test slop and in order
- assert 193 == client.ft().search(Query("henry king")).total
-- assert 3 == client.ft().search(
-- Query("henry king").slop(0).in_order()).total
-- assert 52 == client.ft().search(
-- Query("king henry").slop(0).in_order()).total
+- assert 3 == client.ft().search(Query("henry king").slop(0).in_order()).total
+- assert 52 == client.ft().search(Query("king henry").slop(0).in_order()).total
- assert 53 == client.ft().search(Query("henry king").slop(0)).total
- assert 167 == client.ft().search(Query("henry king").slop(100)).total
-
@@ -5770,11 +14688,7 @@ index d1fc75f..0000000
-
- res = client.ft().search("foo bar")
- assert 2 == res.total
-- client.ft().add_document(
-- "doc1",
-- replace=True,
-- txt="this is a replaced doc"
-- )
+- client.ft().add_document("doc1", replace=True, txt="this is a replaced doc")
-
- res = client.ft().search("foo bar")
- assert 1 == res.total
@@ -5787,10 +14701,7 @@ index d1fc75f..0000000
-
-@pytest.mark.redismod
-def test_stopwords(client):
-- client.ft().create_index(
-- (TextField("txt"),),
-- stopwords=["foo", "bar", "baz"]
-- )
+- client.ft().create_index((TextField("txt"),), stopwords=["foo", "bar", "baz"])
- client.ft().add_document("doc1", txt="foo bar")
- client.ft().add_document("doc2", txt="hello world")
- waitForIndex(client, "idx")
@@ -5804,17 +14715,8 @@ index d1fc75f..0000000
-
-@pytest.mark.redismod
-def test_filters(client):
-- client.ft().create_index(
-- (TextField("txt"),
-- NumericField("num"),
-- GeoField("loc"))
-- )
-- client.ft().add_document(
-- "doc1",
-- txt="foo bar",
-- num=3.141,
-- loc="-0.441,51.458"
-- )
+- client.ft().create_index((TextField("txt"), NumericField("num"), GeoField("loc")))
+- client.ft().add_document("doc1", txt="foo bar", num=3.141, loc="-0.441,51.458")
- client.ft().add_document("doc2", txt="foo baz", num=2, loc="-0.1,51.2")
-
- waitForIndex(client, "idx")
@@ -5822,8 +14724,7 @@ index d1fc75f..0000000
- q1 = Query("foo").add_filter(NumericFilter("num", 0, 2)).no_content()
- q2 = (
- Query("foo")
-- .add_filter(
-- NumericFilter("num", 2, NumericFilter.INF, minExclusive=True))
+- .add_filter(NumericFilter("num", 2, NumericFilter.INF, minExclusive=True))
- .no_content()
- )
- res1, res2 = client.ft().search(q1), client.ft().search(q2)
@@ -5834,10 +14735,8 @@ index d1fc75f..0000000
- assert "doc1" == res2.docs[0].id
-
- # Test geo filter
-- q1 = Query("foo").add_filter(
-- GeoFilter("loc", -0.44, 51.45, 10)).no_content()
-- q2 = Query("foo").add_filter(
-- GeoFilter("loc", -0.44, 51.45, 100)).no_content()
+- q1 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 10)).no_content()
+- q2 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 100)).no_content()
- res1, res2 = client.ft().search(q1), client.ft().search(q2)
-
- assert 1 == res1.total
@@ -5863,10 +14762,7 @@ index d1fc75f..0000000
-
-@pytest.mark.redismod
-def test_sort_by(client):
-- client.ft().create_index(
-- (TextField("txt"),
-- NumericField("num", sortable=True))
-- )
+- client.ft().create_index((TextField("txt"), NumericField("num", sortable=True)))
- client.ft().add_document("doc1", txt="foo bar", num=1)
- client.ft().add_document("doc2", txt="foo baz", num=2)
- client.ft().add_document("doc3", txt="foo qux", num=3)
@@ -5908,10 +14804,7 @@ index d1fc75f..0000000
-@pytest.mark.redismod
-def test_example(client):
- # Creating the index definition and schema
-- client.ft().create_index(
-- (TextField("title", weight=5.0),
-- TextField("body"))
-- )
+- client.ft().create_index((TextField("title", weight=5.0), TextField("body")))
-
- # Indexing a document
- client.ft().add_document(
@@ -5969,12 +14862,7 @@ index d1fc75f..0000000
- client.ft().sugadd("ac", Suggestion("pay2", payload="pl2"))
- client.ft().sugadd("ac", Suggestion("pay3", payload="pl3"))
-
-- sugs = client.ft().sugget(
-- "ac",
-- "pay",
-- with_payloads=True,
-- with_scores=True
-- )
+- sugs = client.ft().sugget("ac", "pay", with_payloads=True, with_scores=True)
- assert 3 == len(sugs)
- for sug in sugs:
- assert sug.payload
@@ -6036,11 +14924,7 @@ index d1fc75f..0000000
-
-@pytest.mark.redismod
-def test_partial(client):
-- client.ft().create_index(
-- (TextField("f1"),
-- TextField("f2"),
-- TextField("f3"))
-- )
+- client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
- client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
- client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
- client.ft().add_document("doc1", f3="f3_val", partial=True)
@@ -6058,11 +14942,7 @@ index d1fc75f..0000000
-
-@pytest.mark.redismod
-def test_no_create(client):
-- client.ft().create_index(
-- (TextField("f1"),
-- TextField("f2"),
-- TextField("f3"))
-- )
+- client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
- client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
- client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
- client.ft().add_document("doc1", f3="f3_val", no_create=True)
@@ -6078,21 +14958,12 @@ index d1fc75f..0000000
- assert 1 == res.total
-
- with pytest.raises(redis.ResponseError):
-- client.ft().add_document(
-- "doc3",
-- f2="f2_val",
-- f3="f3_val",
-- no_create=True
-- )
+- client.ft().add_document("doc3", f2="f2_val", f3="f3_val", no_create=True)
-
-
-@pytest.mark.redismod
-def test_explain(client):
-- client.ft().create_index(
-- (TextField("f1"),
-- TextField("f2"),
-- TextField("f3"))
-- )
+- client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
- res = client.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
- assert res
-
@@ -6272,11 +15143,7 @@ index d1fc75f..0000000
-def test_spell_check(client):
- client.ft().create_index((TextField("f1"), TextField("f2")))
-
-- client.ft().add_document(
-- "doc1",
-- f1="some valid content",
-- f2="this is sample text"
-- )
+- client.ft().add_document("doc1", f1="some valid content", f2="this is sample text")
- client.ft().add_document("doc2", f1="very important", f2="lorem ipsum")
- waitForIndex(client, "idx")
-
@@ -6347,7 +15214,7 @@ index d1fc75f..0000000
-
- res = client.ft().search(Query("Jon"))
- assert 2 == len(res.docs)
-- assert ["John", "Jon"] == sorted([d.name for d in res.docs])
+- assert ["John", "Jon"] == sorted(d.name for d in res.docs)
-
-
-@pytest.mark.redismod
@@ -6367,8 +15234,7 @@ index d1fc75f..0000000
- assert 1.0 == res.docs[0].score
- res = client.ft().search(Query("quick").scorer("TFIDF").with_scores())
- assert 1.0 == res.docs[0].score
-- res = client.ft().search(
-- Query("quick").scorer("TFIDF.DOCNORM").with_scores())
+- res = client.ft().search(Query("quick").scorer("TFIDF.DOCNORM").with_scores())
- assert 0.1111111111111111 == res.docs[0].score
- res = client.ft().search(Query("quick").scorer("BM25").with_scores())
- assert 0.17699114465425977 == res.docs[0].score
@@ -6416,7 +15282,7 @@ index d1fc75f..0000000
-
-
-@pytest.mark.redismod
--def test_aggregations(client):
+-def test_aggregations_groupby(client):
- # Creating the index definition and schema
- client.ft().create_index(
- (
@@ -6453,36 +15319,226 @@ index d1fc75f..0000000
- req = aggregations.AggregateRequest("redis").group_by(
- "@parent",
- reducers.count(),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "3"
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.count_distinct("@title"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "3"
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.count_distinctish("@title"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "3"
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.sum("@random_num"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "21" # 10+8+3
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.min("@random_num"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "3" # min(10,8,3)
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.max("@random_num"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "10" # max(10,8,3)
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.avg("@random_num"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "7" # (10+3+8)/3
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.stddev("random_num"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "3.60555127546"
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.quantile("@random_num", 0.5),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == "10"
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
- reducers.tolist("@title"),
-- reducers.first_value("@title"),
-- reducers.random_sample("@title", 2),
- )
-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[3] == ["RediSearch", "RedisAI", "RedisJson"]
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
+- reducers.first_value("@title").alias("first"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res == ["parent", "redis", "first", "RediSearch"]
+-
+- req = aggregations.AggregateRequest("redis").group_by(
+- "@parent",
+- reducers.random_sample("@title", 2).alias("random"),
+- )
+-
+- res = client.ft().aggregate(req).rows[0]
+- assert res[1] == "redis"
+- assert res[2] == "random"
+- assert len(res[3]) == 2
+- assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"]
+-
+-
+-@pytest.mark.redismod
+-def test_aggregations_sort_by_and_limit(client):
+- client.ft().create_index(
+- (
+- TextField("t1"),
+- TextField("t2"),
+- )
+- )
+-
+- client.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"})
+- client.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"})
+-
+- # test sort_by using SortDirection
+- req = aggregations.AggregateRequest("*").sort_by(
+- aggregations.Asc("@t2"), aggregations.Desc("@t1")
+- )
+- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["t2", "a", "t1", "b"]
+- assert res.rows[1] == ["t2", "b", "t1", "a"]
+-
+- # test sort_by without SortDirection
+- req = aggregations.AggregateRequest("*").sort_by("@t1")
+- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["t1", "a"]
+- assert res.rows[1] == ["t1", "b"]
+-
+- # test sort_by with max
+- req = aggregations.AggregateRequest("*").sort_by("@t1", max=1)
+- res = client.ft().aggregate(req)
+- assert len(res.rows) == 1
+-
+- # test limit
+- req = aggregations.AggregateRequest("*").sort_by("@t1").limit(1, 1)
+- res = client.ft().aggregate(req)
+- assert len(res.rows) == 1
+- assert res.rows[0] == ["t1", "b"]
+-
+-
+-@pytest.mark.redismod
+-def test_aggregations_load(client):
+- client.ft().create_index(
+- (
+- TextField("t1"),
+- TextField("t2"),
+- )
+- )
+-
+- client.ft().client.hset("doc1", mapping={"t1": "hello", "t2": "world"})
+-
+- # load t1
+- req = aggregations.AggregateRequest("*").load("t1")
- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["t1", "hello"]
-
-- res = res.rows[0]
-- assert len(res) == 26
-- assert "redis" == res[1]
-- assert "3" == res[3]
-- assert "3" == res[5]
-- assert "3" == res[7]
-- assert "21" == res[9]
-- assert "3" == res[11]
-- assert "10" == res[13]
-- assert "7" == res[15]
-- assert "3.60555127546" == res[17]
-- assert "10" == res[19]
-- assert ["RediSearch", "RedisAI", "RedisJson"] == res[21]
-- assert "RediSearch" == res[23]
-- assert 2 == len(res[25])
+- # load t2
+- req = aggregations.AggregateRequest("*").load("t2")
+- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["t2", "world"]
+-
+- # load all
+- req = aggregations.AggregateRequest("*").load()
+- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["t1", "hello", "t2", "world"]
+-
+-
+-@pytest.mark.redismod
+-def test_aggregations_apply(client):
+- client.ft().create_index(
+- (
+- TextField("PrimaryKey", sortable=True),
+- NumericField("CreatedDateTimeUTC", sortable=True),
+- )
+- )
+-
+- client.ft().client.hset(
+- "doc1",
+- mapping={"PrimaryKey": "9::362330", "CreatedDateTimeUTC": "637387878524969984"},
+- )
+- client.ft().client.hset(
+- "doc2",
+- mapping={"PrimaryKey": "9::362329", "CreatedDateTimeUTC": "637387875859270016"},
+- )
+-
+- req = aggregations.AggregateRequest("*").apply(
+- CreatedDateTimeUTC="@CreatedDateTimeUTC * 10"
+- )
+- res = client.ft().aggregate(req)
+- assert res.rows[0] == ["CreatedDateTimeUTC", "6373878785249699840"]
+- assert res.rows[1] == ["CreatedDateTimeUTC", "6373878758592700416"]
+-
+-
+-@pytest.mark.redismod
+-def test_aggregations_filter(client):
+- client.ft().create_index(
+- (
+- TextField("name", sortable=True),
+- NumericField("age", sortable=True),
+- )
+- )
+-
+- client.ft().client.hset("doc1", mapping={"name": "bar", "age": "25"})
+- client.ft().client.hset("doc2", mapping={"name": "foo", "age": "19"})
+-
+- req = aggregations.AggregateRequest("*").filter("@name=='foo' && @age < 20")
+- res = client.ft().aggregate(req)
+- assert len(res.rows) == 1
+- assert res.rows[0] == ["name", "foo", "age", "19"]
+-
+- req = aggregations.AggregateRequest("*").filter("@age > 15").sort_by("@age")
+- res = client.ft().aggregate(req)
+- assert len(res.rows) == 2
+- assert res.rows[0] == ["age", "19"]
+- assert res.rows[1] == ["age", "25"]
-
-
-@pytest.mark.redismod
@@ -6554,10 +15610,7 @@ index d1fc75f..0000000
- Create definition with IndexType.HASH as index type (ON HASH),
- and use hset to test the client definition.
- """
-- definition = IndexDefinition(
-- prefix=["hset:", "henry"],
-- index_type=IndexType.HASH
-- )
+- definition = IndexDefinition(prefix=["hset:", "henry"], index_type=IndexType.HASH)
- createIndex(client.ft(), num_docs=500, definition=definition)
-
- info = client.ft().info()
@@ -6600,15 +15653,10 @@ index d1fc75f..0000000
- client.ft().create_index(SCHEMA, definition=definition)
-
- # insert json data
-- res = client.json().set(
-- "doc:1",
-- Path.rootPath(),
-- {"name": "Jon", "age": 25}
-- )
+- res = client.json().set("doc:1", Path.rootPath(), {"name": "Jon", "age": 25})
- assert res
-
-- total = client.ft().search(
-- Query("Jon").return_fields("name", "just_a_number")).docs
+- total = client.ft().search(Query("Jon").return_fields("name", "just_a_number")).docs
- assert 1 == len(total)
- assert "doc:1" == total[0].id
- assert "Jon" == total[0].name
@@ -6634,14 +15682,12 @@ index d1fc75f..0000000
- client.ft().create_index(SCHEMA, definition=definition)
- waitForIndex(client, "idx")
-
-- total = client.ft().search(
-- Query("*").return_field("$.t", as_field="txt")).docs
+- total = client.ft().search(Query("*").return_field("$.t", as_field="txt")).docs
- assert 1 == len(total)
- assert "doc:1" == total[0].id
- assert "riceratops" == total[0].txt
-
-- total = client.ft().search(
-- Query("*").return_field("$.t2", as_field="txt")).docs
+- total = client.ft().search(Query("*").return_field("$.t2", as_field="txt")).docs
- assert 1 == len(total)
- assert "doc:1" == total[0].id
- assert "telmatosaurus" == total[0].txt
@@ -6659,17 +15705,10 @@ index d1fc75f..0000000
- )
-
- client.ft().synupdate("id1", True, "boy", "child", "offspring")
-- client.ft().add_document(
-- "doc1",
-- title="he is a baby",
-- body="this is a test")
+- client.ft().add_document("doc1", title="he is a baby", body="this is a test")
-
- client.ft().synupdate("id1", True, "baby")
-- client.ft().add_document(
-- "doc2",
-- title="he is another baby",
-- body="another test"
-- )
+- client.ft().add_document("doc2", title="he is another baby", body="another test")
-
- res = client.ft().search(Query("child").expander("SYNONYM"))
- assert res.docs[0].id == "doc2"
@@ -6711,15 +15750,12 @@ index d1fc75f..0000000
- """
- definition = IndexDefinition(prefix=["king:"], index_type=IndexType.JSON)
- client.ft().create_index(
-- (TextField("$.name", as_name="name"),
-- NumericField("$.num", as_name="num")),
-- definition=definition
+- (TextField("$.name", as_name="name"), NumericField("$.num", as_name="num")),
+- definition=definition,
- )
-
-- client.json().set("king:1", Path.rootPath(), {"name": "henry",
-- "num": 42})
-- client.json().set("king:2", Path.rootPath(), {"name": "james",
-- "num": 3.14})
+- client.json().set("king:1", Path.rootPath(), {"name": "henry", "num": 42})
+- client.json().set("king:2", Path.rootPath(), {"name": "james", "num": 3.14})
-
- res = client.ft().search("@name:henry")
- assert res.docs[0].id == "king:1"
@@ -6746,12 +15782,12 @@ index d1fc75f..0000000
- """
- definition = IndexDefinition(prefix=["king:"], index_type=IndexType.JSON)
- client.ft().create_index(
-- (TagField("$..name", as_name="name")),
-- definition=definition
+- (TagField("$..name", as_name="name")), definition=definition
- )
-
-- client.json().set("king:1", Path.rootPath(),
-- {"name": "henry", "country": {"name": "england"}})
+- client.json().set(
+- "king:1", Path.rootPath(), {"name": "henry", "country": {"name": "england"}}
+- )
-
- res = client.ft().search("@name:{henry}")
- assert res.docs[0].id == "king:1"
@@ -6769,9 +15805,11 @@ index d1fc75f..0000000
-def test_json_with_jsonpath(client):
- definition = IndexDefinition(index_type=IndexType.JSON)
- client.ft().create_index(
-- (TextField('$["prod:name"]', as_name="name"),
-- TextField('$.prod:name', as_name="name_unsupported")),
-- definition=definition
+- (
+- TextField('$["prod:name"]', as_name="name"),
+- TextField("$.prod:name", as_name="name_unsupported"),
+- ),
+- definition=definition,
- )
-
- client.json().set("doc:1", Path.rootPath(), {"prod:name": "RediSearch"})
@@ -6790,24 +15828,315 @@ index d1fc75f..0000000
- res = client.ft().search(Query("@name:RediSearch").return_field("name"))
- assert res.total == 1
- assert res.docs[0].id == "doc:1"
-- assert res.docs[0].name == 'RediSearch'
+- assert res.docs[0].name == "RediSearch"
-
- # return of an unsupported field fails
-- res = client.ft().search(Query("@name:RediSearch")
-- .return_field("name_unsupported"))
+- res = client.ft().search(Query("@name:RediSearch").return_field("name_unsupported"))
- assert res.total == 1
- assert res.docs[0].id == "doc:1"
- with pytest.raises(Exception):
- res.docs[0].name_unsupported
+-
+-
+-@pytest.mark.redismod
+-def test_profile(client):
+- client.ft().create_index((TextField("t"),))
+- client.ft().client.hset("1", "t", "hello")
+- client.ft().client.hset("2", "t", "world")
+-
+- # check using Query
+- q = Query("hello|world").no_content()
+- res, det = client.ft().profile(q)
+- assert det["Iterators profile"]["Counter"] == 2.0
+- assert len(det["Iterators profile"]["Child iterators"]) == 2
+- assert det["Iterators profile"]["Type"] == "UNION"
+- assert det["Parsing time"] < 0.5
+- assert len(res.docs) == 2 # check also the search result
+-
+- # check using AggregateRequest
+- req = (
+- aggregations.AggregateRequest("*")
+- .load("t")
+- .apply(prefix="startswith(@t, 'hel')")
+- )
+- res, det = client.ft().profile(req)
+- assert det["Iterators profile"]["Counter"] == 2.0
+- assert det["Iterators profile"]["Type"] == "WILDCARD"
+- assert det["Parsing time"] < 0.5
+- assert len(res.rows) == 2 # check also the search result
+-
+-
+-@pytest.mark.redismod
+-def test_profile_limited(client):
+- client.ft().create_index((TextField("t"),))
+- client.ft().client.hset("1", "t", "hello")
+- client.ft().client.hset("2", "t", "hell")
+- client.ft().client.hset("3", "t", "help")
+- client.ft().client.hset("4", "t", "helowa")
+-
+- q = Query("%hell% hel*")
+- res, det = client.ft().profile(q, limited=True)
+- assert (
+- det["Iterators profile"]["Child iterators"][0]["Child iterators"]
+- == "The number of iterators in the union is 3"
+- )
+- assert (
+- det["Iterators profile"]["Child iterators"][1]["Child iterators"]
+- == "The number of iterators in the union is 4"
+- )
+- assert det["Iterators profile"]["Type"] == "INTERSECT"
+- assert len(res.docs) == 3 # check also the search result
+diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py
+deleted file mode 100644
+index 0357443..0000000
+--- a/tests/test_sentinel.py
++++ /dev/null
+@@ -1,234 +0,0 @@
+-import socket
+-
+-import pytest
+-
+-import redis.sentinel
+-from redis import exceptions
+-from redis.sentinel import (
+- MasterNotFoundError,
+- Sentinel,
+- SentinelConnectionPool,
+- SlaveNotFoundError,
+-)
+-
+-
+-@pytest.fixture(scope="module")
+-def master_ip(master_host):
+- yield socket.gethostbyname(master_host[0])
+-
+-
+-class SentinelTestClient:
+- def __init__(self, cluster, id):
+- self.cluster = cluster
+- self.id = id
+-
+- def sentinel_masters(self):
+- self.cluster.connection_error_if_down(self)
+- self.cluster.timeout_if_down(self)
+- return {self.cluster.service_name: self.cluster.master}
+-
+- def sentinel_slaves(self, master_name):
+- self.cluster.connection_error_if_down(self)
+- self.cluster.timeout_if_down(self)
+- if master_name != self.cluster.service_name:
+- return []
+- return self.cluster.slaves
+-
+- def execute_command(self, *args, **kwargs):
+- # wrapper purely to validate the calls don't explode
+- from redis.client import bool_ok
+-
+- return bool_ok
+-
+-
+-class SentinelTestCluster:
+- def __init__(self, servisentinel_ce_name="mymaster", ip="127.0.0.1", port=6379):
+- self.clients = {}
+- self.master = {
+- "ip": ip,
+- "port": port,
+- "is_master": True,
+- "is_sdown": False,
+- "is_odown": False,
+- "num-other-sentinels": 0,
+- }
+- self.service_name = servisentinel_ce_name
+- self.slaves = []
+- self.nodes_down = set()
+- self.nodes_timeout = set()
+-
+- def connection_error_if_down(self, node):
+- if node.id in self.nodes_down:
+- raise exceptions.ConnectionError
+-
+- def timeout_if_down(self, node):
+- if node.id in self.nodes_timeout:
+- raise exceptions.TimeoutError
+-
+- def client(self, host, port, **kwargs):
+- return SentinelTestClient(self, (host, port))
+-
+-
+-@pytest.fixture()
+-def cluster(request, master_ip):
+- def teardown():
+- redis.sentinel.Redis = saved_Redis
+-
+- cluster = SentinelTestCluster(ip=master_ip)
+- saved_Redis = redis.sentinel.Redis
+- redis.sentinel.Redis = cluster.client
+- request.addfinalizer(teardown)
+- return cluster
+-
+-
+-@pytest.fixture()
+-def sentinel(request, cluster):
+- return Sentinel([("foo", 26379), ("bar", 26379)])
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_discover_master(sentinel, master_ip):
+- address = sentinel.discover_master("mymaster")
+- assert address == (master_ip, 6379)
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_discover_master_error(sentinel):
+- with pytest.raises(MasterNotFoundError):
+- sentinel.discover_master("xxx")
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
+- # Put first sentinel 'foo' down
+- cluster.nodes_down.add(("foo", 26379))
+- address = sentinel.discover_master("mymaster")
+- assert address == (master_ip, 6379)
+- # 'bar' is now first sentinel
+- assert sentinel.sentinels[0].id == ("bar", 26379)
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
+- # Put first sentinel 'foo' down
+- cluster.nodes_timeout.add(("foo", 26379))
+- address = sentinel.discover_master("mymaster")
+- assert address == (master_ip, 6379)
+- # 'bar' is now first sentinel
+- assert sentinel.sentinels[0].id == ("bar", 26379)
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_master_min_other_sentinels(cluster, master_ip):
+- sentinel = Sentinel([("foo", 26379)], min_other_sentinels=1)
+- # min_other_sentinels
+- with pytest.raises(MasterNotFoundError):
+- sentinel.discover_master("mymaster")
+- cluster.master["num-other-sentinels"] = 2
+- address = sentinel.discover_master("mymaster")
+- assert address == (master_ip, 6379)
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_master_odown(cluster, sentinel):
+- cluster.master["is_odown"] = True
+- with pytest.raises(MasterNotFoundError):
+- sentinel.discover_master("mymaster")
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_master_sdown(cluster, sentinel):
+- cluster.master["is_sdown"] = True
+- with pytest.raises(MasterNotFoundError):
+- sentinel.discover_master("mymaster")
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_discover_slaves(cluster, sentinel):
+- assert sentinel.discover_slaves("mymaster") == []
+-
+- cluster.slaves = [
+- {"ip": "slave0", "port": 1234, "is_odown": False, "is_sdown": False},
+- {"ip": "slave1", "port": 1234, "is_odown": False, "is_sdown": False},
+- ]
+- assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
+-
+- # slave0 -> ODOWN
+- cluster.slaves[0]["is_odown"] = True
+- assert sentinel.discover_slaves("mymaster") == [("slave1", 1234)]
+-
+- # slave1 -> SDOWN
+- cluster.slaves[1]["is_sdown"] = True
+- assert sentinel.discover_slaves("mymaster") == []
+-
+- cluster.slaves[0]["is_odown"] = False
+- cluster.slaves[1]["is_sdown"] = False
+-
+- # node0 -> DOWN
+- cluster.nodes_down.add(("foo", 26379))
+- assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
+- cluster.nodes_down.clear()
+-
+- # node0 -> TIMEOUT
+- cluster.nodes_timeout.add(("foo", 26379))
+- assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_master_for(cluster, sentinel, master_ip):
+- master = sentinel.master_for("mymaster", db=9)
+- assert master.ping()
+- assert master.connection_pool.master_address == (master_ip, 6379)
+-
+- # Use internal connection check
+- master = sentinel.master_for("mymaster", db=9, check_connection=True)
+- assert master.ping()
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_slave_for(cluster, sentinel):
+- cluster.slaves = [
+- {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False},
+- ]
+- slave = sentinel.slave_for("mymaster", db=9)
+- assert slave.ping()
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_slave_for_slave_not_found_error(cluster, sentinel):
+- cluster.master["is_odown"] = True
+- slave = sentinel.slave_for("mymaster", db=9)
+- with pytest.raises(SlaveNotFoundError):
+- slave.ping()
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_slave_round_robin(cluster, sentinel, master_ip):
+- cluster.slaves = [
+- {"ip": "slave0", "port": 6379, "is_odown": False, "is_sdown": False},
+- {"ip": "slave1", "port": 6379, "is_odown": False, "is_sdown": False},
+- ]
+- pool = SentinelConnectionPool("mymaster", sentinel)
+- rotator = pool.rotate_slaves()
+- assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
+- assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
+- # Fallback to master
+- assert next(rotator) == (master_ip, 6379)
+- with pytest.raises(SlaveNotFoundError):
+- next(rotator)
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_ckquorum(cluster, sentinel):
+- assert sentinel.sentinel_ckquorum("mymaster")
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_flushconfig(cluster, sentinel):
+- assert sentinel.sentinel_flushconfig()
+-
+-
+-@pytest.mark.onlynoncluster
+-def test_reset(cluster, sentinel):
+- cluster.master["is_odown"] = True
+- assert sentinel.sentinel_reset("mymaster")
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
deleted file mode 100644
-index 99c6083..0000000
+index 8c97ab8..0000000
--- a/tests/test_timeseries.py
+++ /dev/null
-@@ -1,588 +0,0 @@
--import pytest
+@@ -1,514 +0,0 @@
-import time
-from time import sleep
+-
+-import pytest
+-
-from .conftest import skip_ifmodversion_lt
-
-
@@ -6838,7 +16167,7 @@ index 99c6083..0000000
-def test_create_duplicate_policy(client):
- # Test for duplicate policy
- for duplicate_policy in ["block", "last", "first", "min", "max"]:
-- ts_name = "time-serie-ooo-{0}".format(duplicate_policy)
+- ts_name = f"time-serie-ooo-{duplicate_policy}"
- assert client.ts().create(ts_name, duplicate_policy=duplicate_policy)
- info = client.ts().info(ts_name)
- assert duplicate_policy == info.duplicate_policy
@@ -6875,8 +16204,7 @@ index 99c6083..0000000
- assert 4 == client.ts().add(
- 4, 4, 2, retention_msecs=10, labels={"Redis": "Labs", "Time": "Series"}
- )
-- assert round(time.time()) == \
-- round(float(client.ts().add(5, "*", 1)) / 1000)
+- assert round(time.time()) == round(float(client.ts().add(5, "*", 1)) / 1000)
-
- info = client.ts().info(4)
- assert 10 == info.retention_msecs
@@ -6895,12 +16223,7 @@ index 99c6083..0000000
- # Test for duplicate policy BLOCK
- assert 1 == client.ts().add("time-serie-add-ooo-block", 1, 5.0)
- with pytest.raises(Exception):
-- client.ts().add(
-- "time-serie-add-ooo-block",
-- 1,
-- 5.0,
-- duplicate_policy="block"
-- )
+- client.ts().add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block")
-
- # Test for duplicate policy LAST
- assert 1 == client.ts().add("time-serie-add-ooo-last", 1, 5.0)
@@ -6934,8 +16257,7 @@ index 99c6083..0000000
-@pytest.mark.redismod
-def test_madd(client):
- client.ts().create("a")
-- assert [1, 2, 3] == \
-- client.ts().madd([("a", 1, 5), ("a", 2, 10), ("a", 3, 15)])
+- assert [1, 2, 3] == client.ts().madd([("a", 1, 5), ("a", 2, 10), ("a", 3, 15)])
-
-
-@pytest.mark.redismod
@@ -7013,13 +16335,7 @@ index 99c6083..0000000
- assert 200 == len(client.ts().range(1, 0, 500))
- # last sample isn't returned
- assert 20 == len(
-- client.ts().range(
-- 1,
-- 0,
-- 500,
-- aggregation_type="avg",
-- bucket_size_msec=10
-- )
+- client.ts().range(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
- )
- assert 10 == len(client.ts().range(1, 0, 500, count=10))
-
@@ -7060,13 +16376,7 @@ index 99c6083..0000000
- assert 200 == len(client.ts().range(1, 0, 500))
- # first sample isn't returned
- assert 20 == len(
-- client.ts().revrange(
-- 1,
-- 0,
-- 500,
-- aggregation_type="avg",
-- bucket_size_msec=10
-- )
+- client.ts().revrange(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
- )
- assert 10 == len(client.ts().revrange(1, 0, 500, count=10))
- assert 2 == len(
@@ -7090,10 +16400,7 @@ index 99c6083..0000000
-@pytest.mark.redismod
-def testMultiRange(client):
- client.ts().create(1, labels={"Test": "This", "team": "ny"})
-- client.ts().create(
-- 2,
-- labels={"Test": "This", "Taste": "That", "team": "sf"}
-- )
+- client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
- for i in range(100):
- client.ts().add(1, i, i % 7)
- client.ts().add(2, i, i % 11)
@@ -7108,11 +16415,7 @@ index 99c6083..0000000
- for i in range(100):
- client.ts().add(1, i + 200, i % 7)
- res = client.ts().mrange(
-- 0,
-- 500,
-- filters=["Test=This"],
-- aggregation_type="avg",
-- bucket_size_msec=10
+- 0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
- )
- assert 2 == len(res)
- assert 20 == len(res[0]["1"][1])
@@ -7127,21 +16430,13 @@ index 99c6083..0000000
-@skip_ifmodversion_lt("99.99.99", "timeseries")
-def test_multi_range_advanced(client):
- client.ts().create(1, labels={"Test": "This", "team": "ny"})
-- client.ts().create(
-- 2,
-- labels={"Test": "This", "Taste": "That", "team": "sf"}
-- )
+- client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
- for i in range(100):
- client.ts().add(1, i, i % 7)
- client.ts().add(2, i, i % 11)
-
- # test with selected labels
-- res = client.ts().mrange(
-- 0,
-- 200,
-- filters=["Test=This"],
-- select_labels=["team"]
-- )
+- res = client.ts().mrange(0, 200, filters=["Test=This"], select_labels=["team"])
- assert {"team": "ny"} == res[0]["1"][0]
- assert {"team": "sf"} == res[1]["2"][0]
-
@@ -7157,28 +16452,11 @@ index 99c6083..0000000
- assert [(15, 1.0), (16, 2.0)] == res[0]["1"][1]
-
- # test groupby
-- res = client.ts().mrange(
-- 0,
-- 3,
-- filters=["Test=This"],
-- groupby="Test",
-- reduce="sum"
-- )
+- res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="Test", reduce="sum")
- assert [(0, 0.0), (1, 2.0), (2, 4.0), (3, 6.0)] == res[0]["Test=This"][1]
-- res = client.ts().mrange(
-- 0,
-- 3,
-- filters=["Test=This"],
-- groupby="Test",
-- reduce="max"
-- )
+- res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="Test", reduce="max")
- assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[0]["Test=This"][1]
-- res = client.ts().mrange(
-- 0,
-- 3,
-- filters=["Test=This"],
-- groupby="team",
-- reduce="min")
+- res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="team", reduce="min")
- assert 2 == len(res)
- assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[0]["team=ny"][1]
- assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[1]["team=sf"][1]
@@ -7208,10 +16486,7 @@ index 99c6083..0000000
-@skip_ifmodversion_lt("99.99.99", "timeseries")
-def test_multi_reverse_range(client):
- client.ts().create(1, labels={"Test": "This", "team": "ny"})
-- client.ts().create(
-- 2,
-- labels={"Test": "This", "Taste": "That", "team": "sf"}
-- )
+- client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
- for i in range(100):
- client.ts().add(1, i, i % 7)
- client.ts().add(2, i, i % 11)
@@ -7226,31 +16501,18 @@ index 99c6083..0000000
- for i in range(100):
- client.ts().add(1, i + 200, i % 7)
- res = client.ts().mrevrange(
-- 0,
-- 500,
-- filters=["Test=This"],
-- aggregation_type="avg",
-- bucket_size_msec=10
+- 0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
- )
- assert 2 == len(res)
- assert 20 == len(res[0]["1"][1])
- assert {} == res[0]["1"][0]
-
- # test withlabels
-- res = client.ts().mrevrange(
-- 0,
-- 200,
-- filters=["Test=This"],
-- with_labels=True
-- )
+- res = client.ts().mrevrange(0, 200, filters=["Test=This"], with_labels=True)
- assert {"Test": "This", "team": "ny"} == res[0]["1"][0]
-
- # test with selected labels
-- res = client.ts().mrevrange(
-- 0,
-- 200,
-- filters=["Test=This"], select_labels=["team"]
-- )
+- res = client.ts().mrevrange(0, 200, filters=["Test=This"], select_labels=["team"])
- assert {"team": "ny"} == res[0]["1"][0]
- assert {"team": "sf"} == res[1]["2"][0]
-
@@ -7336,11 +16598,7 @@ index 99c6083..0000000
-
-@pytest.mark.redismod
-def test_info(client):
-- client.ts().create(
-- 1,
-- retention_msecs=5,
-- labels={"currentLabel": "currentData"}
-- )
+- client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
- info = client.ts().info(1)
- assert 5 == info.retention_msecs
- assert info.labels["currentLabel"] == "currentData"
@@ -7349,11 +16607,7 @@ index 99c6083..0000000
-@pytest.mark.redismod
-@skip_ifmodversion_lt("1.4.0", "timeseries")
-def testInfoDuplicatePolicy(client):
-- client.ts().create(
-- 1,
-- retention_msecs=5,
-- labels={"currentLabel": "currentData"}
-- )
+- client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
- info = client.ts().info(1)
- assert info.duplicate_policy is None
-
@@ -7372,7 +16626,6 @@ index 99c6083..0000000
-
-
-@pytest.mark.redismod
--@pytest.mark.pipeline
-def test_pipeline(client):
- pipeline = client.ts().pipeline()
- pipeline.create("with_pipeline")
@@ -7394,5 +16647,5 @@ index 99c6083..0000000
- uncompressed_info = client.ts().info("uncompressed")
- assert compressed_info.memory_usage != uncompressed_info.memory_usage
--
-2.34.0
+2.34.1
diff --git a/community/py3-redis/0002-Drop-tests-test_ssl.py.patch b/community/py3-redis/0002-Drop-tests-test_ssl.py.patch
new file mode 100644
index 0000000000..9a952ef9dd
--- /dev/null
+++ b/community/py3-redis/0002-Drop-tests-test_ssl.py.patch
@@ -0,0 +1,183 @@
+From 9bf9f40f41141942be166966ec434720da5b85bd Mon Sep 17 00:00:00 2001
+From: Drew DeVault <sir@cmpwn.com>
+Date: Wed, 29 Dec 2021 10:16:53 +0100
+Subject: [PATCH 2/2] Drop tests/test_ssl.py
+
+This test expects to be run in the upstream project's CI enviornment.
+
+Ref https://github.com/redis/redis-py/issues/1838
+---
+ tests/test_ssl.py | 161 ----------------------------------------------
+ 1 file changed, 161 deletions(-)
+ delete mode 100644 tests/test_ssl.py
+
+diff --git a/tests/test_ssl.py b/tests/test_ssl.py
+deleted file mode 100644
+index a2f66b2..0000000
+--- a/tests/test_ssl.py
++++ /dev/null
+@@ -1,161 +0,0 @@
+-import os
+-import socket
+-import ssl
+-from urllib.parse import urlparse
+-
+-import pytest
+-
+-import redis
+-from redis.exceptions import ConnectionError, RedisError
+-
+-from .conftest import skip_if_cryptography, skip_if_nocryptography
+-
+-
+-@pytest.mark.ssl
+-class TestSSL:
+- """Tests for SSL connections
+-
+- This relies on the --redis-ssl-url purely for rebuilding the client
+- and connecting to the appropriate port.
+- """
+-
+- ROOT = os.path.join(os.path.dirname(__file__), "..")
+- CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
+- if not os.path.isdir(CERT_DIR): # github actions package validation case
+- CERT_DIR = os.path.abspath(
+- os.path.join(ROOT, "..", "docker", "stunnel", "keys")
+- )
+- if not os.path.isdir(CERT_DIR):
+- raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
+-
+- def test_ssl_with_invalid_cert(self, request):
+- ssl_url = request.config.option.redis_ssl_url
+- sslclient = redis.from_url(ssl_url)
+- with pytest.raises(ConnectionError) as e:
+- sslclient.ping()
+- assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
+-
+- def test_ssl_connection(self, request):
+- ssl_url = request.config.option.redis_ssl_url
+- p = urlparse(ssl_url)[1].split(":")
+- r = redis.Redis(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none")
+- assert r.ping()
+-
+- def test_ssl_connection_without_ssl(self, request):
+- ssl_url = request.config.option.redis_ssl_url
+- p = urlparse(ssl_url)[1].split(":")
+- r = redis.Redis(host=p[0], port=p[1], ssl=False)
+-
+- with pytest.raises(ConnectionError) as e:
+- r.ping()
+- assert "Connection closed by server" in str(e)
+-
+- def test_validating_self_signed_certificate(self, request):
+- ssl_url = request.config.option.redis_ssl_url
+- p = urlparse(ssl_url)[1].split(":")
+- r = redis.Redis(
+- host=p[0],
+- port=p[1],
+- ssl=True,
+- ssl_certfile=os.path.join(self.CERT_DIR, "server-cert.pem"),
+- ssl_keyfile=os.path.join(self.CERT_DIR, "server-key.pem"),
+- ssl_cert_reqs="required",
+- ssl_ca_certs=os.path.join(self.CERT_DIR, "server-cert.pem"),
+- )
+- assert r.ping()
+-
+- def _create_oscp_conn(self, request):
+- ssl_url = request.config.option.redis_ssl_url
+- p = urlparse(ssl_url)[1].split(":")
+- r = redis.Redis(
+- host=p[0],
+- port=p[1],
+- ssl=True,
+- ssl_certfile=os.path.join(self.CERT_DIR, "server-cert.pem"),
+- ssl_keyfile=os.path.join(self.CERT_DIR, "server-key.pem"),
+- ssl_cert_reqs="required",
+- ssl_ca_certs=os.path.join(self.CERT_DIR, "server-cert.pem"),
+- ssl_validate_ocsp=True,
+- )
+- return r
+-
+- @skip_if_cryptography()
+- def test_ssl_ocsp_called(self, request):
+- r = self._create_oscp_conn(request)
+- with pytest.raises(RedisError) as e:
+- assert r.ping()
+- assert "cryptography not installed" in str(e)
+-
+- @skip_if_nocryptography()
+- def test_ssl_ocsp_called_withcrypto(self, request):
+- r = self._create_oscp_conn(request)
+- with pytest.raises(ConnectionError) as e:
+- assert r.ping()
+- assert "No AIA information present in ssl certificate" in str(e)
+-
+- # rediss://, url based
+- ssl_url = request.config.option.redis_ssl_url
+- sslclient = redis.from_url(ssl_url)
+- with pytest.raises(ConnectionError) as e:
+- sslclient.ping()
+- assert "No AIA information present in ssl certificate" in str(e)
+-
+- @skip_if_nocryptography()
+- def test_valid_ocsp_cert_http(self):
+- from redis.ocsp import OCSPVerifier
+-
+- hostnames = ["github.com", "aws.amazon.com", "ynet.co.il", "microsoft.com"]
+- for hostname in hostnames:
+- context = ssl.create_default_context()
+- with socket.create_connection((hostname, 443)) as sock:
+- with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
+- ocsp = OCSPVerifier(wrapped, hostname, 443)
+- assert ocsp.is_valid()
+-
+- @skip_if_nocryptography()
+- def test_revoked_ocsp_certificate(self):
+- from redis.ocsp import OCSPVerifier
+-
+- context = ssl.create_default_context()
+- hostname = "revoked.badssl.com"
+- with socket.create_connection((hostname, 443)) as sock:
+- with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
+- ocsp = OCSPVerifier(wrapped, hostname, 443)
+- assert ocsp.is_valid() is False
+-
+- @skip_if_nocryptography()
+- def test_unauthorized_ocsp(self):
+- from redis.ocsp import OCSPVerifier
+-
+- context = ssl.create_default_context()
+- hostname = "stackoverflow.com"
+- with socket.create_connection((hostname, 443)) as sock:
+- with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
+- ocsp = OCSPVerifier(wrapped, hostname, 443)
+- with pytest.raises(ConnectionError):
+- ocsp.is_valid()
+-
+- @skip_if_nocryptography()
+- def test_ocsp_not_present_in_response(self):
+- from redis.ocsp import OCSPVerifier
+-
+- context = ssl.create_default_context()
+- hostname = "google.co.il"
+- with socket.create_connection((hostname, 443)) as sock:
+- with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
+- ocsp = OCSPVerifier(wrapped, hostname, 443)
+- assert ocsp.is_valid() is False
+-
+- @skip_if_nocryptography()
+- def test_unauthorized_then_direct(self):
+- from redis.ocsp import OCSPVerifier
+-
+- # these certificates on the socket end return unauthorized
+- # then the second call succeeds
+- hostnames = ["wikipedia.org", "squarespace.com"]
+- for hostname in hostnames:
+- context = ssl.create_default_context()
+- with socket.create_connection((hostname, 443)) as sock:
+- with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
+- ocsp = OCSPVerifier(wrapped, hostname, 443)
+- assert ocsp.is_valid()
+--
+2.34.1
+
diff --git a/community/py3-redis/APKBUILD b/community/py3-redis/APKBUILD
index 5f682f9383..e125bd3939 100644
--- a/community/py3-redis/APKBUILD
+++ b/community/py3-redis/APKBUILD
@@ -1,8 +1,8 @@
# Maintainer: Eivind Uggedal <eu@eju.no>
pkgname=py3-redis
_pkgname=redis
-pkgver=4.0.2
-pkgrel=1
+pkgver=4.1.0
+pkgrel=0
pkgdesc="Python3 client for Redis key-value store"
url="https://github.com/andymccurdy/redis-py"
arch="noarch"
@@ -13,6 +13,7 @@ checkdepends="py3-pytest py3-mock redis"
source="
https://files.pythonhosted.org/packages/source/${_pkgname:0:1}/$_pkgname/$_pkgname-$pkgver.tar.gz
0001-all-remove-support-for-nonfree-Redis-modules.patch
+ 0002-Drop-tests-test_ssl.py.patch
"
builddir="$srcdir"/$_pkgname-$pkgver
@@ -34,6 +35,7 @@ package() {
}
sha512sums="
-3f73ddd2ceb551fa447cfb4ecdc1d393957bbfd8c184ae3d357f9f47fff860f1c4684ec0fc3cb85fea530454456c17a98a2a56e592bef4cd9ad17bb405c1a0f7 redis-4.0.2.tar.gz
-f2f890e147bd76311b6aa9d0bbdab64f44c46a52e450c7c6ca2c9eddcf30e0a6ed8e962950dc98d0160a45eb68580399707ff6a12a17c56a768382814deab626 0001-all-remove-support-for-nonfree-Redis-modules.patch
+85cd09570f4faf34a735befd0677aa8ca2cb0d62b0285c4c040380c2440f2774e47762ec4219381294465343353a15804b96f06b4d6eefa7159a224eb9e72001 redis-4.1.0.tar.gz
+b1dd96aeb6129f121108fac3c1ad033b1b657287fb0f959bc7fcab997b26c4b91cc7c0df6f86d6d2ac283951956a4a38826647f0e744514ce5031cf3917d1746 0001-all-remove-support-for-nonfree-Redis-modules.patch
+5184efc472ad16020240e57222f906656b1f6db5139d37de22b34298c7a15c9b91f5c2d976f6c8455071459d2ff273f75f6bf76f3f46990bacec6673a83a2872 0002-Drop-tests-test_ssl.py.patch
"