|
3 | 3 | from pyredis import commands |
4 | 4 | from pyredis.connection import Connection |
5 | 5 | from pyredis.exceptions import PyRedisError, PyRedisConnError, PyRedisConnReadTimeout, ReplyError |
6 | | -from pyredis.helper import dict_from_list, ClusterMap |
| 6 | +from pyredis.helper import dict_from_list, ClusterMap, slot_from_key |
7 | 7 |
|
8 | 8 |
|
9 | 9 | class Client( |
@@ -217,7 +217,7 @@ def __init__( |
217 | 217 | if not bool(seeds) != bool(cluster_map): |
218 | 218 | raise PyRedisError('Ether seeds or cluster_map has to be provided') |
219 | 219 | self._cluster = True |
220 | | - self._conns = {} |
| 220 | + self._conns = dict() |
221 | 221 | self._conn_timeout = conn_timeout |
222 | 222 | self._read_timeout = read_timeout |
223 | 223 | self._encoding = encoding |
@@ -321,6 +321,212 @@ def execute(self, *args, shard_key=None, sock=None, asking=False, retries=3): |
321 | 321 | raise err |
322 | 322 |
|
323 | 323 |
|
| 324 | +class HashClient( |
| 325 | + commands.Connection, |
| 326 | + commands.Hash, |
| 327 | + commands.HyperLogLog, |
| 328 | + commands.Key, |
| 329 | + commands.List, |
| 330 | + commands.Publish, |
| 331 | + commands.Scripting, |
| 332 | + commands.Set, |
| 333 | + commands.SSet, |
| 334 | + commands.String, |
| 335 | + commands.Transaction, |
| 336 | +): |
| 337 | + """ Client for Talking to Static Hashed Redis Cluster. |
| 338 | + |
| 339 | + The Client will calculate a crc16 hash using the shard_key, |
| 340 | + which is be default the first Key in case the command supports multiple keys. |
| 341 | + If the Key is using the TAG annotation "bla{tag}blarg", |
| 342 | + then only the tag portion is used, in this case "tag". |
| 343 | + The key space is split into 16384 buckets, so in theory you could provide |
| 344 | + a list with 16384 ('host', port) pairs to the "buckets" parameter. |
| 345 | + If you have less then 16384 ('host', port) pairs, the client will try to |
| 346 | + distribute the key spaces evenly between available pairs. |
| 347 | + |
| 348 | + --- Warning --- |
| 349 | + Since this is static hashing, the the order of pairs has to match on each client you use! |
| 350 | + Also changing the number of pairs will change the mapping between buckets and pairs, |
| 351 | + rendering your data inaccessible! |
| 352 | +
|
| 353 | + Inherits the following Commmand classes: |
| 354 | + - commands.Connection, |
| 355 | + - commands.Hash, |
| 356 | + - commands.HyperLogLog, |
| 357 | + - commands.Key, |
| 358 | + - commands.List, |
| 359 | + - commands.Publish, |
| 360 | + - commands.Scripting, |
| 361 | + - commands.Set, |
| 362 | + - commands.SSet, |
| 363 | + - commands.String, |
| 364 | + - commands.Transaction |
| 365 | + """ |
| 366 | + def __init__(self, buckets, database=0, password=None, encoding=None, conn_timeout=2, read_timeout=2): |
| 367 | + |
| 368 | + super().__init__() |
| 369 | + self._conns = dict() |
| 370 | + self._conn_names = list() |
| 371 | + self._bulk = False |
| 372 | + self._bulk_keep = False |
| 373 | + self._bulk_results = None |
| 374 | + self._bulk_size = None |
| 375 | + self._bulk_size_current = None |
| 376 | + self._bulk_bucket_order = list() |
| 377 | + self._closed = False |
| 378 | + self._cluster = True |
| 379 | + self._map = dict() |
| 380 | + self._init_conns(buckets, database, password, encoding, conn_timeout, read_timeout) |
| 381 | + self._init_map() |
| 382 | + |
| 383 | + def _bulk_fetch(self): |
| 384 | + for conn in self._bulk_bucket_order: |
| 385 | + result = conn.read(raise_on_result_err=False) |
| 386 | + if self._bulk_keep: |
| 387 | + self._bulk_results.append(result) |
| 388 | + self._bulk_bucket_order = list() |
| 389 | + self._bulk_size_current = 0 |
| 390 | + |
| 391 | + @staticmethod |
| 392 | + def _execute_basic(*args, conn): |
| 393 | + conn.write(*args) |
| 394 | + return conn.read() |
| 395 | + |
| 396 | + def _execute_bulk(self, *args, conn): |
| 397 | + conn.write(*args) |
| 398 | + self._bulk_size_current += 1 |
| 399 | + self._bulk_bucket_order.append(conn) |
| 400 | + if self._bulk_size_current == self._bulk_size: |
| 401 | + self._bulk_fetch() |
| 402 | + |
| 403 | + def _init_conns(self, buckets, database, password, encoding, conn_timeout, read_timeout): |
| 404 | + for bucket in buckets: |
| 405 | + host, port = bucket |
| 406 | + bucketname = '{0}_{1}'.format(host, port) |
| 407 | + self._conn_names.append(bucketname) |
| 408 | + self._conns[bucketname] = Connection( |
| 409 | + host=host, port=port, database=database, password=password, |
| 410 | + encoding=encoding, conn_timeout=conn_timeout, read_timeout=read_timeout |
| 411 | + ) |
| 412 | + |
| 413 | + def _init_map(self): |
| 414 | + num_buckets = len(self._conn_names) - 1 |
| 415 | + cur_bucket = 0 |
| 416 | + for slot in range(16384): |
| 417 | + self._map[slot] = self._conn_names[cur_bucket] |
| 418 | + if cur_bucket == num_buckets: |
| 419 | + cur_bucket = 0 |
| 420 | + else: |
| 421 | + cur_bucket += 1 |
| 422 | + |
| 423 | + @property |
| 424 | + def bulk(self): |
| 425 | + """ True if bulk mode is enabled. |
| 426 | +
|
| 427 | + :return: bool |
| 428 | + """ |
| 429 | + return self._bulk |
| 430 | + |
| 431 | + def bulk_start(self, bulk_size=5000, keep_results=True): |
| 432 | + """ Enable bulk mode |
| 433 | +
|
| 434 | + Put the client into bulk mode. Instead of executing a command & waiting for |
| 435 | + the reply, all commands are send to Redis without fetching the result. |
| 436 | + The results get fetched whenever $bulk_size commands have been executed, |
| 437 | + which will also resets the counter, or of bulk_stop() is called. |
| 438 | +
|
| 439 | + :param bulk_size: |
| 440 | + Number of commands to execute, before fetching results. |
| 441 | + :type bulk_size: int |
| 442 | +
|
| 443 | + :param keep_results: |
| 444 | + If True, keep the results. The Results will be returned when calling bulk_stop. |
| 445 | + :type keep_results: bool |
| 446 | +
|
| 447 | + :return: None |
| 448 | + """ |
| 449 | + if self.bulk: |
| 450 | + raise PyRedisError("Already in bulk mode") |
| 451 | + self._bulk = True |
| 452 | + self._bulk_size = bulk_size |
| 453 | + self._bulk_size_current = 0 |
| 454 | + if keep_results: |
| 455 | + self._bulk_results = [] |
| 456 | + self._bulk_keep = True |
| 457 | + |
| 458 | + def bulk_stop(self): |
| 459 | + """ Stop bulk mode. |
| 460 | +
|
| 461 | + All outstanding results from previous commands get fetched. |
| 462 | + If bulk_start was called with keep_results=True, return a list with all |
| 463 | + results from the executed commands in order. The list of results can also contain |
| 464 | + Exceptions, hat you should check for. |
| 465 | +
|
| 466 | + :return: None, list |
| 467 | + """ |
| 468 | + if not self.bulk: |
| 469 | + raise PyRedisError("Not in bulk mode") |
| 470 | + self._bulk_fetch() |
| 471 | + results = self._bulk_results |
| 472 | + self._bulk = False |
| 473 | + self._bulk_keep = False |
| 474 | + self._bulk_results = None |
| 475 | + self._bulk_size = None |
| 476 | + self._bulk_size_current = None |
| 477 | + return results |
| 478 | + |
| 479 | + def close(self): |
| 480 | + """ Close client. |
| 481 | +
|
| 482 | + :return: None |
| 483 | + """ |
| 484 | + for conn in self._conns.values(): |
| 485 | + conn.close() |
| 486 | + self._closed = True |
| 487 | + |
| 488 | + @property |
| 489 | + def closed(self): |
| 490 | + """ Check if client is closed. |
| 491 | +
|
| 492 | + :return: bool |
| 493 | + """ |
| 494 | + return self._closed |
| 495 | + |
| 496 | + def execute(self, *args, shard_key=None, sock=None): |
| 497 | + """ Execute arbitrary redis command. |
| 498 | +
|
| 499 | + :param args: |
| 500 | + :type args: list, int, float |
| 501 | +
|
| 502 | + :param shard_key: (optional) |
| 503 | + Should be set to the key name you try to work with. |
| 504 | + Can not be used if sock is set. |
| 505 | + :type shard_key: string |
| 506 | +
|
| 507 | + :param sock: (optional) |
| 508 | + The string representation of a socket, the command should be executed against. |
| 509 | + For example: "testhost_6379" |
| 510 | + Can not be used if shard_key is set. |
| 511 | + :type sock: string |
| 512 | +
|
| 513 | + :return: result, exception |
| 514 | + """ |
| 515 | + if not bool(shard_key) != bool(sock): |
| 516 | + raise PyRedisError('Ether shard_key or sock has to be provided') |
| 517 | + if not sock: |
| 518 | + sock = self._map[slot_from_key(shard_key)] |
| 519 | + conn = self._conns[sock] |
| 520 | + try: |
| 521 | + if not self._bulk: |
| 522 | + return self._execute_basic(conn=conn, *args) |
| 523 | + else: |
| 524 | + self._execute_bulk(conn=conn, *args) |
| 525 | + except PyRedisConnError as err: |
| 526 | + self.close() |
| 527 | + raise err |
| 528 | + |
| 529 | + |
324 | 530 | class PubSubClient(commands.Subscribe): |
325 | 531 | """ Pub/Sub Client. |
326 | 532 |
|
|
0 commit comments