Limit the main db and expires dictionaries to expand (#7954)
As we know, redis may reject user's requests or evict some keys if used memory is over maxmemory. Dictionaries expanding may make things worse, some big dictionaries, such as main db and expires dict, may eat huge memory at once for allocating a new big hash table and be far more than maxmemory after expanding. There are related issues: #4213 #4583 More details, when expand dict in redis, we will allocate a new big ht[1] that generally is double of ht[0], The size of ht[1] will be very big if ht[0] already is big. For db dict, if we have more than 64 million keys, we need to cost 1GB for ht[1] when dict expands. If the sum of used memory and new hash table of dict needed exceeds maxmemory, we shouldn't allow the dict to expand. Because, if we enable keys eviction, we still couldn't add much more keys after eviction and rehashing, what's worse, redis will keep less keys when redis only remains a little memory for storing new hash table instead of users' data. Moreover users can't write data in redis if disable keys eviction. What this commit changed ? Add a new member function expandAllowed for dict type, it provide a way for caller to allow expand or not. We expose two parameters for this function: more memory needed for expanding and dict current load factor, users can implement a function to make a decision by them. For main db dict and expires dict type, these dictionaries may be very big and cost huge memory for expanding, so we implement a judgement function: we can stop dict to expand provisionally if used memory will be over maxmemory after dict expands, but to guarantee the performance of redis, we still allow dict to expand if dict load factor exceeds the safe load factor. Add test cases to verify we don't allow main db to expand when left memory is not enough, so that avoid keys eviction. Other changes: For new hash table size when expand. Before this commit, the size is that double used of dict and later _dictNextPower. Actually we aim to control a dict load factor between 0.5 and 1.0. Now we replace *2 with +1, since the first check is that used >= size, the outcome of before will usually be the same as _dictNextPower(used+1). The only case where it'll differ is when dict_can_resize is false during fork, so that later the _dictNextPower(used*2) will cause the dict to jump to *4 (i.e. _dictNextPower(1025*2) will return 4096). Fix rehash test cases due to changing algorithm of new hash table size when expand.
This commit is contained in:
parent
2f41a38568
commit
75f9dec644
@ -1085,7 +1085,8 @@ dictType optionToLineDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictListDestructor /* val destructor */
|
||||
dictListDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
dictType optionSetDictType = {
|
||||
@ -1094,7 +1095,8 @@ dictType optionSetDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* The config rewrite state. */
|
||||
|
2
src/db.c
2
src/db.c
@ -464,7 +464,7 @@ dbBackup *backupDb(void) {
|
||||
for (int i=0; i<server.dbnum; i++) {
|
||||
backup->dbarray[i] = server.db[i];
|
||||
server.db[i].dict = dictCreate(&dbDictType,NULL);
|
||||
server.db[i].expires = dictCreate(&keyptrDictType,NULL);
|
||||
server.db[i].expires = dictCreate(&dbExpiresDictType,NULL);
|
||||
}
|
||||
|
||||
/* Backup cluster slots to keys map if enable cluster. */
|
||||
|
16
src/dict.c
16
src/dict.c
@ -951,6 +951,16 @@ unsigned long dictScan(dict *d,
|
||||
|
||||
/* ------------------------- private functions ------------------------------ */
|
||||
|
||||
/* Because we may need to allocate huge memory chunk at once when dict
|
||||
* expands, we will check this allocation is allowed or not if the dict
|
||||
* type has expandAllowed member function. */
|
||||
static int dictTypeExpandAllowed(dict *d) {
|
||||
if (d->type->expandAllowed == NULL) return 1;
|
||||
return d->type->expandAllowed(
|
||||
_dictNextPower(d->ht[0].used + 1) * sizeof(dictEntry*),
|
||||
(double)d->ht[0].used / d->ht[0].size);
|
||||
}
|
||||
|
||||
/* Expand the hash table if needed */
|
||||
static int _dictExpandIfNeeded(dict *d)
|
||||
{
|
||||
@ -966,9 +976,10 @@ static int _dictExpandIfNeeded(dict *d)
|
||||
* the number of buckets. */
|
||||
if (d->ht[0].used >= d->ht[0].size &&
|
||||
(dict_can_resize ||
|
||||
d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
|
||||
d->ht[0].used/d->ht[0].size > dict_force_resize_ratio) &&
|
||||
dictTypeExpandAllowed(d))
|
||||
{
|
||||
return dictExpand(d, d->ht[0].used*2);
|
||||
return dictExpand(d, d->ht[0].used + 1);
|
||||
}
|
||||
return DICT_OK;
|
||||
}
|
||||
@ -1173,6 +1184,7 @@ dictType BenchmarkDictType = {
|
||||
NULL,
|
||||
compareCallback,
|
||||
freeCallback,
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -62,6 +62,7 @@ typedef struct dictType {
|
||||
int (*keyCompare)(void *privdata, const void *key1, const void *key2);
|
||||
void (*keyDestructor)(void *privdata, void *key);
|
||||
void (*valDestructor)(void *privdata, void *obj);
|
||||
int (*expandAllowed)(size_t moreMem, double usedRatio);
|
||||
} dictType;
|
||||
|
||||
/* This is our hash table structure. Every dictionary has two of this as we
|
||||
|
14
src/evict.c
14
src/evict.c
@ -412,6 +412,20 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev
|
||||
return C_ERR;
|
||||
}
|
||||
|
||||
/* Return 1 if used memory is more than maxmemory after allocating more memory,
|
||||
* return 0 if not. Redis may reject user's requests or evict some keys if used
|
||||
* memory exceeds maxmemory, especially, when we allocate huge memory at once. */
|
||||
int overMaxmemoryAfterAlloc(size_t moremem) {
|
||||
if (!server.maxmemory) return 0; /* No limit. */
|
||||
|
||||
/* Check quickly. */
|
||||
size_t mem_used = zmalloc_used_memory();
|
||||
if (mem_used + moremem <= server.maxmemory) return 0;
|
||||
|
||||
size_t overhead = freeMemoryGetNotCountedMemory();
|
||||
mem_used = (mem_used > overhead) ? mem_used - overhead : 0;
|
||||
return mem_used + moremem > server.maxmemory;
|
||||
}
|
||||
|
||||
/* The evictionTimeProc is started when "maxmemory" has been breached and
|
||||
* could not immediately be resolved. This will spin the event loop with short
|
||||
|
@ -433,7 +433,8 @@ void rememberSlaveKeyWithExpire(redisDb *db, robj *key) {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
slaveKeysWithExpire = dictCreate(&dt,NULL);
|
||||
}
|
||||
|
@ -53,7 +53,8 @@ dictType latencyTimeSeriesDictType = {
|
||||
NULL, /* val dup */
|
||||
dictStringKeyCompare, /* key compare */
|
||||
dictVanillaFree, /* key destructor */
|
||||
dictVanillaFree /* val destructor */
|
||||
dictVanillaFree, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* ------------------------- Utility functions ------------------------------ */
|
||||
|
@ -153,7 +153,7 @@ void freeObjAsync(robj *key, robj *obj) {
|
||||
void emptyDbAsync(redisDb *db) {
|
||||
dict *oldht1 = db->dict, *oldht2 = db->expires;
|
||||
db->dict = dictCreate(&dbDictType,NULL);
|
||||
db->expires = dictCreate(&keyptrDictType,NULL);
|
||||
db->expires = dictCreate(&dbExpiresDictType,NULL);
|
||||
atomicIncr(lazyfree_objects,dictSize(oldht1));
|
||||
bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,oldht1,oldht2);
|
||||
}
|
||||
|
@ -7552,7 +7552,8 @@ dictType moduleAPIDictType = {
|
||||
NULL, /* val dup */
|
||||
dictCStringKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
int moduleRegisterApi(const char *funcname, void *funcptr) {
|
||||
|
@ -1299,7 +1299,8 @@ static int fetchClusterSlotsConfiguration(client c) {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
/* printf("[%d] fetchClusterSlotsConfiguration\n", c->thread_id); */
|
||||
dict *masters = dictCreate(&dtype, NULL);
|
||||
|
@ -2294,7 +2294,8 @@ static dictType clusterManagerDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
dictSdsDestructor /* val destructor */
|
||||
dictSdsDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
static dictType clusterManagerLinkDictType = {
|
||||
@ -2303,7 +2304,8 @@ static dictType clusterManagerLinkDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictListDestructor /* val destructor */
|
||||
dictListDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
typedef int clusterManagerCommandProc(int argc, char **argv);
|
||||
@ -7360,7 +7362,8 @@ static dictType typeinfoDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor (owned by the value)*/
|
||||
type_free /* val destructor */
|
||||
type_free, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
static void getKeyTypes(dict *types_dict, redisReply *keys, typeinfo **types) {
|
||||
|
@ -418,7 +418,8 @@ dictType instancesDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
dictInstancesValDestructor /* val destructor */
|
||||
dictInstancesValDestructor,/* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Instance runid (sds) -> votes (long casted to void*)
|
||||
@ -431,7 +432,8 @@ dictType leaderVotesDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Instance renamed commands table. */
|
||||
@ -441,7 +443,8 @@ dictType renamedCommandsDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictSdsDestructor /* val destructor */
|
||||
dictSdsDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* =========================== Initialization =============================== */
|
||||
|
60
src/server.c
60
src/server.c
@ -1298,6 +1298,20 @@ uint64_t dictEncObjHash(const void *key) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Return 1 if currently we allow dict to expand. Dict may allocate huge
|
||||
* memory to contain hash buckets when dict expands, that may lead redis
|
||||
* rejects user's requests or evicts some keys, we can stop dict to expand
|
||||
* provisionally if used memory will be over maxmemory after dict expands,
|
||||
* but to guarantee the performance of redis, we still allow dict to expand
|
||||
* if dict load factor exceeds HASHTABLE_MAX_LOAD_FACTOR. */
|
||||
int dictExpandAllowed(size_t moreMem, double usedRatio) {
|
||||
if (usedRatio <= HASHTABLE_MAX_LOAD_FACTOR) {
|
||||
return !overMaxmemoryAfterAlloc(moreMem);
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Generic hash table type where keys are Redis Objects, Values
|
||||
* dummy pointers. */
|
||||
dictType objectKeyPointerValueDictType = {
|
||||
@ -1306,7 +1320,8 @@ dictType objectKeyPointerValueDictType = {
|
||||
NULL, /* val dup */
|
||||
dictEncObjKeyCompare, /* key compare */
|
||||
dictObjectDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Like objectKeyPointerValueDictType(), but values can be destroyed, if
|
||||
@ -1317,7 +1332,8 @@ dictType objectKeyHeapPointerValueDictType = {
|
||||
NULL, /* val dup */
|
||||
dictEncObjKeyCompare, /* key compare */
|
||||
dictObjectDestructor, /* key destructor */
|
||||
dictVanillaFree /* val destructor */
|
||||
dictVanillaFree, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Set dictionary type. Keys are SDS strings, values are not used. */
|
||||
@ -1337,7 +1353,8 @@ dictType zsetDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* Note: SDS string shared & freed by skiplist */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Db->dict, keys are sds strings, vals are Redis objects. */
|
||||
@ -1347,7 +1364,8 @@ dictType dbDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictObjectDestructor /* val destructor */
|
||||
dictObjectDestructor, /* val destructor */
|
||||
dictExpandAllowed /* allow to expand */
|
||||
};
|
||||
|
||||
/* server.lua_scripts sha (as sds string) -> scripts (as robj) cache. */
|
||||
@ -1357,17 +1375,19 @@ dictType shaScriptObjectDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictObjectDestructor /* val destructor */
|
||||
dictObjectDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Db->expires */
|
||||
dictType keyptrDictType = {
|
||||
dictType dbExpiresDictType = {
|
||||
dictSdsHash, /* hash function */
|
||||
NULL, /* key dup */
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
dictExpandAllowed /* allow to expand */
|
||||
};
|
||||
|
||||
/* Command table. sds string -> command struct pointer. */
|
||||
@ -1377,7 +1397,8 @@ dictType commandTableDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Hash type hash table (note that small hashes are represented with ziplists) */
|
||||
@ -1387,7 +1408,8 @@ dictType hashDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
dictSdsDestructor /* val destructor */
|
||||
dictSdsDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Keylist hash table type has unencoded redis objects as keys and
|
||||
@ -1399,7 +1421,8 @@ dictType keylistDictType = {
|
||||
NULL, /* val dup */
|
||||
dictObjKeyCompare, /* key compare */
|
||||
dictObjectDestructor, /* key destructor */
|
||||
dictListDestructor /* val destructor */
|
||||
dictListDestructor, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Cluster nodes hash table, mapping nodes addresses 1.2.3.4:6379 to
|
||||
@ -1410,7 +1433,8 @@ dictType clusterNodesDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Cluster re-addition blacklist. This maps node IDs to the time
|
||||
@ -1422,7 +1446,8 @@ dictType clusterNodesBlackListDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Modules system dictionary type. Keys are module name,
|
||||
@ -1433,7 +1458,8 @@ dictType modulesDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Migrate cache dict type. */
|
||||
@ -1443,7 +1469,8 @@ dictType migrateCacheDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* Replication cached script dict (server.repl_scriptcache_dict).
|
||||
@ -1455,7 +1482,8 @@ dictType replScriptCacheDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCaseCompare, /* key compare */
|
||||
dictSdsDestructor, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
int htNeedsResize(dict *dict) {
|
||||
@ -3004,7 +3032,7 @@ void initServer(void) {
|
||||
/* Create the Redis databases, and initialize other internal state. */
|
||||
for (j = 0; j < server.dbnum; j++) {
|
||||
server.db[j].dict = dictCreate(&dbDictType,NULL);
|
||||
server.db[j].expires = dictCreate(&keyptrDictType,NULL);
|
||||
server.db[j].expires = dictCreate(&dbExpiresDictType,NULL);
|
||||
server.db[j].expires_cursor = 0;
|
||||
server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL);
|
||||
server.db[j].ready_keys = dictCreate(&objectKeyPointerValueDictType,NULL);
|
||||
|
@ -161,6 +161,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT];
|
||||
|
||||
/* Hash table parameters */
|
||||
#define HASHTABLE_MIN_FILL 10 /* Minimal hash table fill 10% */
|
||||
#define HASHTABLE_MAX_LOAD_FACTOR 1.618 /* Maximum hash table load factor. */
|
||||
|
||||
/* Command flags. Please check the command table defined in the server.c file
|
||||
* for more information about the meaning of every flag. */
|
||||
@ -1638,7 +1639,7 @@ extern dictType shaScriptObjectDictType;
|
||||
extern double R_Zero, R_PosInf, R_NegInf, R_Nan;
|
||||
extern dictType hashDictType;
|
||||
extern dictType replScriptCacheDictType;
|
||||
extern dictType keyptrDictType;
|
||||
extern dictType dbExpiresDictType;
|
||||
extern dictType modulesDictType;
|
||||
|
||||
/*-----------------------------------------------------------------------------
|
||||
@ -2070,6 +2071,7 @@ int zslLexValueLteMax(sds value, zlexrangespec *spec);
|
||||
/* Core functions */
|
||||
int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *level);
|
||||
size_t freeMemoryGetNotCountedMemory();
|
||||
int overMaxmemoryAfterAlloc(size_t moremem);
|
||||
int processCommand(client *c);
|
||||
void setupSignalHandlers(void);
|
||||
void removeSignalHandlers(void);
|
||||
|
@ -2439,7 +2439,8 @@ dictType setAccumulatorDictType = {
|
||||
NULL, /* val dup */
|
||||
dictSdsKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
/* The zunionInterDiffGenericCommand() function is called in order to implement the
|
||||
|
@ -241,3 +241,22 @@ test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
|
||||
# test again with fewer (and bigger) commands without pipeline, but with eviction
|
||||
test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
|
||||
|
||||
start_server {tags {"maxmemory"}} {
|
||||
test {Don't rehash if used memory exceeds maxmemory after rehash} {
|
||||
r config set maxmemory 0
|
||||
r config set maxmemory-policy allkeys-random
|
||||
|
||||
# Next rehash size is 8192, that will eat 64k memory
|
||||
populate 4096 "" 1
|
||||
|
||||
set used [s used_memory]
|
||||
set limit [expr {$used + 10*1024}]
|
||||
r config set maxmemory $limit
|
||||
r set k1 v1
|
||||
# Next writing command will trigger evicting some keys if last
|
||||
# command trigger DB dict rehash
|
||||
r set k2 v2
|
||||
# There must be 4098 keys because redis doesn't evict keys.
|
||||
r dbsize
|
||||
} {4098}
|
||||
}
|
||||
|
@ -316,8 +316,8 @@ start_server {tags {"other"}} {
|
||||
after 200
|
||||
|
||||
# Hash table should rehash since there is no child process,
|
||||
# size is power of two and over 4098, so it is 16384
|
||||
# size is power of two and over 4098, so it is 8192
|
||||
r set k3 v3
|
||||
assert_match "*table size: 16384*" [r debug HTSTATS 9]
|
||||
assert_match "*table size: 8192*" [r debug HTSTATS 9]
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,8 @@ dictType dictTypeTest = {
|
||||
NULL, /* val dup */
|
||||
dictKeyCompare, /* key compare */
|
||||
NULL, /* key destructor */
|
||||
NULL /* val destructor */
|
||||
NULL, /* val destructor */
|
||||
NULL /* allow to expand */
|
||||
};
|
||||
|
||||
void showBuckets(dictht ht) {
|
||||
|
Loading…
Reference in New Issue
Block a user