Fix for flush_zone

git-svn-id: file:///svn/unbound/trunk@1580 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
Wouter Wijngaards 2009-04-02 11:56:01 +00:00
parent a5c428bb8d
commit af102d24ce
6 changed files with 77 additions and 37 deletions

View File

@ -1082,6 +1082,8 @@ struct del_info {
uint32_t expired;
/** number of rrsets removed */
size_t num_rrsets;
/** number of msgs removed */
size_t num_msgs;
/** number of key entries removed */
size_t num_keys;
};
@ -1101,6 +1103,20 @@ zone_del_rrset(struct lruhash_entry* e, void* arg)
}
}
/** callback to delete messages in a zone */
static void
zone_del_msg(struct lruhash_entry* e, void* arg)
{
/* entry is locked */
struct del_info* inf = (struct del_info*)arg;
struct msgreply_entry* k = (struct msgreply_entry*)e->key;
if(dname_subdomain_c(k->key.qname, inf->name)) {
struct reply_info* d = (struct reply_info*)e->data;
d->ttl = inf->expired;
inf->num_msgs++;
}
}
/** callback to delete keys in zone */
static void
zone_del_kcache(struct lruhash_entry* e, void* arg)
@ -1115,41 +1131,6 @@ zone_del_kcache(struct lruhash_entry* e, void* arg)
}
}
/** traverse a lruhash */
static void
lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
struct lruhash_entry* e;
lock_quick_lock(&h->lock);
for(i=0; i<h->size; i++) {
lock_quick_lock(&h->array[i].lock);
for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
if(wr) {
lock_rw_wrlock(&e->lock);
} else {
lock_rw_rdlock(&e->lock);
}
(*func)(e, arg);
lock_rw_unlock(&e->lock);
}
lock_quick_unlock(&h->array[i].lock);
}
lock_quick_unlock(&h->lock);
}
/** traverse a slabhash */
static void
slabhash_traverse(struct slabhash* sh, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
for(i=0; i<sh->size; i++)
lruhash_traverse(sh->array[i], wr, func, arg);
}
/** remove all rrsets and keys from zone from cache */
static void
do_flush_zone(SSL* ssl, struct worker* worker, char* arg)
@ -1170,10 +1151,13 @@ do_flush_zone(SSL* ssl, struct worker* worker, char* arg)
inf.expired = *worker->env.now;
inf.expired -= 3; /* handle 3 seconds skew between threads */
inf.num_rrsets = 0;
inf.num_msgs = 0;
inf.num_keys = 0;
slabhash_traverse(&worker->env.rrset_cache->table, 1,
&zone_del_rrset, &inf);
slabhash_traverse(worker->env.msg_cache, 1, &zone_del_msg, &inf);
/* and validator cache */
idx = modstack_find(&worker->daemon->mods, "validator");
if(idx != -1) {
@ -1183,8 +1167,9 @@ do_flush_zone(SSL* ssl, struct worker* worker, char* arg)
free(nm);
(void)ssl_printf(ssl, "ok removed %u rrsets and %u key entries\n",
(unsigned)inf.num_rrsets, (unsigned)inf.num_keys);
(void)ssl_printf(ssl, "ok removed %u rrsets, %u messages "
"and %u key entries\n", (unsigned)inf.num_rrsets,
(unsigned)inf.num_msgs, (unsigned)inf.num_keys);
}
/** remove name rrset from cache */

View File

@ -2,6 +2,9 @@
- pyunbound (libunbound python plugin) compiles using libtool.
- documentation for pythonmod and pyunbound is generated in doc/html.
- iana portlist updated.
- fixed bug in unbound-control flush_zone where it would not flush
every message in the target domain. This especially impacted
NXDOMAIN messages which could remain in the cache regardless.
1 April 2009: Wouter
- suppress errors when trying to contact authority servers that gave

View File

@ -511,3 +511,27 @@ lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md)
table->markdelfunc = md;
lock_quick_unlock(&table->lock);
}
void
lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
struct lruhash_entry* e;
lock_quick_lock(&h->lock);
for(i=0; i<h->size; i++) {
lock_quick_lock(&h->array[i].lock);
for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
if(wr) {
lock_rw_wrlock(&e->lock);
} else {
lock_rw_rdlock(&e->lock);
}
(*func)(e, arg);
lock_rw_unlock(&e->lock);
}
lock_quick_unlock(&h->array[i].lock);
}
lock_quick_unlock(&h->lock);
}

View File

@ -401,4 +401,14 @@ void lruhash_status(struct lruhash* table, const char* id, int extended);
*/
size_t lruhash_get_mem(struct lruhash* table);
/**
* Traverse a lruhash. Call back for every element in the table.
* @param h: hash table. Locked before use.
* @param wr: if true writelock is obtained on element, otherwise readlock.
* @param func: function for every element. Do not lock or unlock elements.
* @param arg: user argument to func.
*/
void lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg);
#endif /* UTIL_STORAGE_LRUHASH_H */

View File

@ -209,3 +209,11 @@ void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_t md)
lruhash_setmarkdel(sl->array[i], md);
}
}
void slabhash_traverse(struct slabhash* sh, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
for(i=0; i<sh->size; i++)
lruhash_traverse(sh->array[i], wr, func, arg);
}

View File

@ -174,6 +174,16 @@ struct lruhash* slabhash_gettable(struct slabhash* table, hashvalue_t hash);
*/
void slabhash_setmarkdel(struct slabhash* table, lruhash_markdelfunc_t md);
/**
* Traverse a slabhash.
* @param table: slabbed hash table.
* @param wr: if true, writelock is obtained, otherwise readlock.
* @param func: function to call for every element.
* @param arg: user argument to function.
*/
void slabhash_traverse(struct slabhash* table, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg);
/* --- test representation --- */
/** test structure contains test key */
struct slabhash_testkey {