Two important fixes to append only file: zero length values and expires. A pretty neat new test to check consistency of randomly build datasets against snapshotting and AOF.

This commit is contained in:
antirez 2009-12-15 13:06:41 -05:00
parent 71c2b467b0
commit e96e4fbf15
3 changed files with 63 additions and 16 deletions

View File

@ -5868,7 +5868,8 @@ static int fwriteBulk(FILE *fp, robj *obj) {
obj = getDecodedObject(obj);
snprintf(buf,sizeof(buf),"$%ld\r\n",(long)sdslen(obj->ptr));
if (fwrite(buf,strlen(buf),1,fp) == 0) goto err;
if (fwrite(obj->ptr,sdslen(obj->ptr),1,fp) == 0) goto err;
if (sdslen(obj->ptr) && fwrite(obj->ptr,sdslen(obj->ptr),1,fp) == 0)
goto err;
if (fwrite("\r\n",2,1,fp) == 0) goto err;
decrRefCount(obj);
return 1;
@ -5997,7 +5998,7 @@ static int rewriteAppendOnlyFile(char *filename) {
}
/* Save the expire time */
if (expiretime != -1) {
char cmd[]="*3\r\n$6\r\nEXPIRE\r\n";
char cmd[]="*3\r\n$8\r\nEXPIREAT\r\n";
/* If this key is already expired skip it */
if (expiretime < now) continue;
if (fwrite(cmd,sizeof(cmd)-1,1,fp) == 0) goto werr;
@ -6026,7 +6027,7 @@ static int rewriteAppendOnlyFile(char *filename) {
werr:
fclose(fp);
unlink(tmpfile);
redisLog(REDIS_WARNING,"Write error writing append only fileon disk: %s", strerror(errno));
redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno));
if (di) dictReleaseIterator(di);
return REDIS_ERR;
}

View File

@ -65,6 +65,19 @@ proc waitForBgsave r {
}
}
proc waitForBgrewriteaof r {
while 1 {
set i [$r info]
if {[string match {*bgrewriteaof_in_progress:1*} $i]} {
puts -nonewline "\nWaiting for background AOF rewrite to finish... "
flush stdout
after 1000
} else {
break
}
}
}
proc randomInt {max} {
expr {int(rand()*$max)}
}
@ -154,17 +167,36 @@ proc createComplexDataset {r ops} {
proc datasetDigest r {
set keys [lsort [split [$r keys *] " "]]
set digest [::sha1::sha1 -hex $keys]
set digest {}
foreach k $keys {
set t [$r type $k]
switch t {
{string} {set aux [::sha1::sha1 -hex [$r get $k]]} \
{list} {set aux [::sha1::sha1 -hex [$r lrange $k 0 -1]]} \
{set} {set aux [::sha1::sha1 -hex [$r smembers $k]]} \
{zset} {set aux [::sha1::sha1 -hex [$r zrange $k 0 -1]]}
switch $t {
{string} {
set aux [::sha1::sha1 -hex [$r get $k]]
} {list} {
if {[$r llen $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [$r lrange $k 0 -1]]
}
} {set} {
if {[$r scard $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [lsort [$r smembers $k]]]
}
} {zset} {
if {[$r zcard $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [$r zrange $k 0 -1]]
}
} default {
error "Type not supported"
}
}
append aux $digest
set digest [::sha1::sha1 -hex $aux]
if {$aux eq {}} continue
set digest [::sha1::sha1 -hex [join [list $aux $digest $k] "\n"]]
}
return $digest
}
@ -1392,17 +1424,30 @@ proc main {server port} {
set sha1_after [datasetDigest $r]
expr {$sha1 eq $sha1_after}
} {1}
test {Same dataset digest if saving/reloading as AOF?} {
$r bgrewriteaof
waitForBgrewriteaof $r
$r debug loadaof
set sha1_after [datasetDigest $r]
expr {$sha1 eq $sha1_after}
} {1}
}
test {EXPIRES after a reload} {
test {EXPIRES after a reload (snapshot + append only file)} {
$r flushdb
$r set x 10
$r expire x 1000
$r save
$r debug reload
set ttl [$r ttl x]
expr {$ttl > 900 && $ttl <= 1000}
} {1}
set e1 [expr {$ttl > 900 && $ttl <= 1000}]
$r bgrewriteaof
waitForBgrewriteaof $r
set ttl [$r ttl x]
set e2 [expr {$ttl > 900 && $ttl <= 1000}]
list $e1 $e2
} {1 1}
# Leave the user with a clean DB before to exit
test {FLUSHDB} {

View File

@ -47,5 +47,6 @@ end
host = ARGV[0] || "127.0.0.1"
port = ARGV[1] || "6379"
puts "Performing SHA1 of Redis server #{host} #{port}"
p "Dataset SHA1: #{redisSha1(:host => host, :port => port.to_i)}"
db = ARGV[2] || "0"
puts "Performing SHA1 of Redis server #{host} #{port} DB: #{db}"
p "Dataset SHA1: #{redisSha1(:host => host, :port => port.to_i, :db => db)}"