Skip to content

Commit

Permalink
revert dictRehashingStartedByKeyType cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
enjoy-binbin committed Dec 8, 2023
1 parent 722af53 commit bc941c9
Showing 1 changed file with 19 additions and 16 deletions.
35 changes: 19 additions & 16 deletions src/server.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,42 +426,45 @@ int dictExpandAllowed(size_t moreMem, double usedRatio) {
*
* In non-cluster mode, bucket count can be retrieved directly from single dict bucket and
* we don't need this list as there is only one dictionary per DB. */
void dictRehashingStartedByKeyType(dict *d, dbKeyType keyType) {
void dictRehashingStarted(dict *d) {
if (!server.cluster_enabled) return;

unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[keyType].bucket_count += to; /* Started rehashing (Add the new ht size) */
server.db[0].sub_dict[DB_MAIN].bucket_count += to; /* Started rehashing (Add the new ht size) */
if (from == 0) return; /* No entries are to be moved. */
if (server.activerehashing) {
listAddNodeTail(server.db[0].sub_dict[keyType].rehashing, d);
listAddNodeTail(server.db[0].sub_dict[DB_MAIN].rehashing, d);
}
}

/* Updates the bucket count for the given dictionary in a DB. It removes
* the old ht size of the dictionary from the total sum of buckets for a DB. */
void dictRehashingCompletedByKeyType(dict *d, dbKeyType keyType) {
void dictRehashingCompleted(dict *d) {
if (!server.cluster_enabled) return;

unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[keyType].bucket_count -= from; /* Finished rehashing (Remove the old ht size) */
}

void dictRehashingStarted(dict *d) {
dictRehashingStartedByKeyType(d, DB_MAIN);
}

void dictRehashingCompleted(dict *d) {
dictRehashingCompletedByKeyType(d, DB_MAIN);
server.db[0].sub_dict[DB_MAIN].bucket_count -= from; /* Finished rehashing (Remove the old ht size) */
}

void dictRehashingStartedForExpires(dict *d) {
dictRehashingStartedByKeyType(d, DB_EXPIRES);
if (!server.cluster_enabled) return;

unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[DB_EXPIRES].bucket_count += to; /* Started rehashing (Add the new ht size) */
if (from == 0) return; /* No entries are to be moved. */
if (server.activerehashing) {
listAddNodeTail(server.db[0].sub_dict[DB_EXPIRES].rehashing, d);
}
}

void dictRehashingCompletedForExpires(dict *d) {
dictRehashingCompletedByKeyType(d, DB_EXPIRES);
if (!server.cluster_enabled) return;

unsigned long long from, to;
dictRehashingInfo(d, &from, &to);
server.db[0].sub_dict[DB_EXPIRES].bucket_count -= from; /* Finished rehashing (Remove the old ht size) */
}

/* Generic hash table type where keys are Redis Objects, Values
Expand Down

0 comments on commit bc941c9

Please sign in to comment.