Update locking management for iter_fwd and iter_hints methods. (#1054)

fast reload, move most of the locking management to iter_fwd and
iter_hints methods. The caller still has the ability to handle its
own locking, if desired, for atomic operations on sets of different
structs.

Co-authored-by: Wouter Wijngaards <wcawijngaards@users.noreply.github.com>
This commit is contained in:
Yorgos Thessalonikefs 2024-04-25 11:05:58 +02:00 committed by GitHub
parent d7353e6e99
commit 9b9bba9f02
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 296 additions and 152 deletions

View File

@ -839,6 +839,7 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
char b[260];
struct query_info qinfo;
struct iter_hints_stub* stub;
int nolock = 0;
regional_free_all(region);
qinfo.qname = nm;
qinfo.qname_len = nmlen;
@ -851,8 +852,7 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
"of %s\n", b))
return 0;
lock_rw_rdlock(&worker->env.fwds->lock);
dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass);
dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass, nolock);
if(dp) {
if(!ssl_printf(ssl, "forwarding request:\n")) {
lock_rw_unlock(&worker->env.fwds->lock);
@ -863,7 +863,6 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
lock_rw_unlock(&worker->env.fwds->lock);
return 1;
}
lock_rw_unlock(&worker->env.fwds->lock);
while(1) {
dp = dns_cache_find_delegation(&worker->env, nm, nmlen,
@ -898,9 +897,8 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
continue;
}
}
lock_rw_rdlock(&worker->env.hints->lock);
stub = hints_lookup_stub(worker->env.hints, nm, qinfo.qclass,
dp);
dp, nolock);
if(stub) {
if(stub->noprime) {
if(!ssl_printf(ssl, "The noprime stub servers "
@ -919,7 +917,6 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
print_dp_details(ssl, worker, stub->dp);
lock_rw_unlock(&worker->env.hints->lock);
} else {
lock_rw_unlock(&worker->env.hints->lock);
print_dp_main(ssl, dp, msg);
print_dp_details(ssl, worker, dp);
}

View File

@ -1992,10 +1992,9 @@ static int
print_root_fwds(RES* ssl, struct iter_forwards* fwds, uint8_t* root)
{
struct delegpt* dp;
lock_rw_rdlock(&fwds->lock);
dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN);
int nolock = 0;
dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN, nolock);
if(!dp) {
lock_rw_unlock(&fwds->lock);
return ssl_printf(ssl, "off (using root hints)\n");
}
/* if dp is returned it must be the root */
@ -2077,6 +2076,7 @@ do_forward(RES* ssl, struct worker* worker, char* args)
{
struct iter_forwards* fwd = worker->env.fwds;
uint8_t* root = (uint8_t*)"\000";
int nolock = 0;
if(!fwd) {
(void)ssl_printf(ssl, "error: structure not allocated\n");
return;
@ -2090,20 +2090,15 @@ do_forward(RES* ssl, struct worker* worker, char* args)
/* delete all the existing queries first */
mesh_delete_all(worker->env.mesh);
if(strcmp(args, "off") == 0) {
lock_rw_wrlock(&fwd->lock);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root);
lock_rw_unlock(&fwd->lock);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root, nolock);
} else {
struct delegpt* dp;
if(!(dp = parse_delegpt(ssl, args, root)))
return;
lock_rw_wrlock(&fwd->lock);
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
lock_rw_unlock(&fwd->lock);
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
return;
}
lock_rw_unlock(&fwd->lock);
}
send_ok(ssl);
}
@ -2162,10 +2157,12 @@ do_forward_add(RES* ssl, struct worker* worker, char* args)
int insecure = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, NULL, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
/* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors) {
if(!anchors_add_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
@ -2177,7 +2174,7 @@ do_forward_add(RES* ssl, struct worker* worker, char* args)
return;
}
}
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
lock_rw_unlock(&fwd->lock);
(void)ssl_printf(ssl, "error out of memory\n");
free(nm);
@ -2195,13 +2192,15 @@ do_forward_remove(RES* ssl, struct worker* worker, char* args)
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
/* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
free(nm);
send_ok(ssl);
@ -2215,10 +2214,12 @@ do_stub_add(RES* ssl, struct worker* worker, char* args)
int insecure = 0, prime = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, &prime, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
/* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors) {
@ -2232,7 +2233,7 @@ do_stub_add(RES* ssl, struct worker* worker, char* args)
return;
}
}
if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm)) {
if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock)) {
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
@ -2243,9 +2244,10 @@ do_stub_add(RES* ssl, struct worker* worker, char* args)
free(nm);
return;
}
if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime)) {
if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime,
nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
@ -2267,15 +2269,17 @@ do_stub_remove(RES* ssl, struct worker* worker, char* args)
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
/* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
free(nm);

View File

@ -359,30 +359,39 @@ forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg)
}
struct delegpt*
forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
int nolock)
{
rbnode_type* res = NULL;
struct iter_forward_zone* res;
struct iter_forward_zone key;
int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
res = rbtree_search(fwd->tree, &key);
if(res) return ((struct iter_forward_zone*)res)->dp;
return NULL;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
res = (struct iter_forward_zone*)rbtree_search(fwd->tree, &key);
has_dp = res && res->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
return has_dp?res->dp:NULL;
}
struct delegpt*
forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
int nolock)
{
/* lookup the forward zone in the tree */
rbnode_type* res = NULL;
struct iter_forward_zone *result;
struct iter_forward_zone key;
int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
if(rbtree_find_less_equal(fwd->tree, &key, &res)) {
/* exact */
result = (struct iter_forward_zone*)res;
@ -390,8 +399,10 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
/* smaller element (or no element) */
int m;
result = (struct iter_forward_zone*)res;
if(!result || result->dclass != qclass)
if(!result || result->dclass != qclass) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return NULL;
}
/* count number of labels matched */
(void)dname_lab_cmp(result->name, result->namelabs, key.name,
key.namelabs, &m);
@ -401,20 +412,22 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
result = result->parent;
}
}
if(result)
return result->dp;
return NULL;
has_dp = result && result->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
return has_dp?result->dp:NULL;
}
struct delegpt*
forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass)
forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass, int nolock)
{
uint8_t root = 0;
return forwards_lookup(fwd, &root, qclass);
return forwards_lookup(fwd, &root, qclass, nolock);
}
int
forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
/* Finds next root item in forwards lookup tree.
* Caller needs to handle locking of the forwards structure. */
static int
next_root_locked(struct iter_forwards* fwd, uint16_t* dclass)
{
struct iter_forward_zone key;
rbnode_type* n;
@ -431,7 +444,7 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* root not first item? search for higher items */
*dclass = p->dclass + 1;
return forwards_next_root(fwd, dclass);
return next_root_locked(fwd, dclass);
}
/* find class n in tree, we may get a direct hit, or if we don't
* this is the last item of the previous class so rbtree_next() takes
@ -459,10 +472,21 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* not a root node, return next higher item */
*dclass = p->dclass+1;
return forwards_next_root(fwd, dclass);
return next_root_locked(fwd, dclass);
}
}
int
forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass, int nolock)
{
int ret;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
ret = next_root_locked(fwd, dclass);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return ret;
}
size_t
forwards_get_mem(struct iter_forwards* fwd)
{
@ -491,51 +515,78 @@ fwd_zone_find(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
}
int
forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp,
int nolock)
{
struct iter_forward_zone *z;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if((z=fwd_zone_find(fwd, c, dp->name)) != NULL) {
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
}
if(!forwards_insert(fwd, c, dp))
if(!forwards_insert(fwd, c, dp)) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
int nolock)
{
struct iter_forward_zone *z;
if(!(z=fwd_zone_find(fwd, c, nm)))
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(!(z=fwd_zone_find(fwd, c, nm))) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
}
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
int
forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
int nolock)
{
if(fwd_zone_find(fwd, c, nm) != NULL)
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(fwd_zone_find(fwd, c, nm) != NULL) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1; /* already a stub zone there */
}
if(!fwd_add_stub_hole(fwd, c, nm)) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock)
{
struct iter_forward_zone *z;
if(!(z=fwd_zone_find(fwd, c, nm)))
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(!(z=fwd_zone_find(fwd, c, nm))) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
if(z->dp != NULL)
}
if(z->dp != NULL) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* not a stub hole */
}
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
}

View File

@ -112,48 +112,61 @@ int forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg);
/**
* Find forward zone exactly by name
* The return value is contents of the forwards structure, caller should
* lock and unlock a readlock on the forwards structure.
* The return value is contents of the forwards structure.
* Caller should lock and unlock a readlock on the forwards structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the forwards structure if a
* value was returned.
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point or null.
*/
struct delegpt* forwards_find(struct iter_forwards* fwd, uint8_t* qname,
uint16_t qclass);
uint16_t qclass, int nolock);
/**
* Find forward zone information
* For this qname/qclass find forward zone information, returns delegation
* point with server names and addresses, or NULL if no forwarding is needed.
* The return value is contents of the forwards structure, caller should
* lock and unlock a readlock on the forwards structure.
* The return value is contents of the forwards structure.
* Caller should lock and unlock a readlock on the forwards structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the forwards structure if a
* value was returned.
*
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if the query has to be forwarded to that list,
* otherwise null.
*/
struct delegpt* forwards_lookup(struct iter_forwards* fwd,
uint8_t* qname, uint16_t qclass);
struct delegpt* forwards_lookup(struct iter_forwards* fwd,
uint8_t* qname, uint16_t qclass, int nolock);
/**
* Same as forwards_lookup, but for the root only
* @param fwd: forward storage.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if root forward exists, otherwise null.
*/
struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
uint16_t qclass);
struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
uint16_t qclass, int nolock);
/**
* Find next root item in forwards lookup tree.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a readlock on the forwards structure.
* @param fwd: the forward storage
* @param qclass: class to look at next, or higher.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false if none found, or if true stored in qclass.
*/
int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass);
int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass,
int nolock);
/**
* Get memory in use by forward storage
@ -169,42 +182,56 @@ int fwd_cmp(const void* k1, const void* k2);
/**
* Add zone to forward structure. For external use since it recalcs
* the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* forward zone. malloced.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
struct delegpt* dp);
int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
struct delegpt* dp, int nolock);
/**
* Remove zone from forward structure. For external use since it
* recalcs the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock);
/**
* Add stub hole (empty entry in forward table, that makes resolution skip
* a forward-zone because the stub zone should override the forward zone).
* Does not add one if not necessary.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock);
/**
* Remove stub hole, if one exists.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm);
uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_FWD_H */

View File

@ -441,6 +441,7 @@ read_root_hints_list(struct iter_hints* hints, struct config_file* cfg)
int
hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
{
int nolock = 1;
lock_rw_wrlock(&hints->lock);
hints_del_tree(hints);
name_tree_init(&hints->tree);
@ -458,7 +459,7 @@ hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
}
/* use fallback compiletime root hints */
if(!hints_lookup_root(hints, LDNS_RR_CLASS_IN)) {
if(!hints_find_root(hints, LDNS_RR_CLASS_IN, nolock)) {
struct delegpt* dp = compile_time_root_prime(cfg->do_ip4,
cfg->do_ip6);
verbose(VERB_ALGO, "no config, using builtin root hints.");
@ -477,21 +478,33 @@ hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
return 1;
}
struct delegpt*
hints_lookup_root(struct iter_hints* hints, uint16_t qclass)
struct delegpt*
hints_find(struct iter_hints* hints, uint8_t* qname, uint16_t qclass,
int nolock)
{
struct iter_hints_stub *stub;
size_t len;
int has_dp;
int labs = dname_count_size_labels(qname, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
qname, len, labs, qclass);
has_dp = stub && stub->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&hints->lock); }
return has_dp?stub->dp:NULL;
}
struct delegpt*
hints_find_root(struct iter_hints* hints, uint16_t qclass, int nolock)
{
uint8_t rootlab = 0;
struct iter_hints_stub *stub;
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
&rootlab, 1, 1, qclass);
if(!stub)
return NULL;
return stub->dp;
return hints_find(hints, &rootlab, qclass, nolock);
}
struct iter_hints_stub*
hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, struct delegpt* cache_dp)
hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, struct delegpt* cache_dp, int nolock)
{
size_t len;
int labs;
@ -499,14 +512,20 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
/* first lookup the stub */
labs = dname_count_size_labels(qname, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname,
len, labs, qclass);
if(!r) return NULL;
if(!r) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
/* If there is no cache (root prime situation) */
if(cache_dp == NULL) {
if(r->dp->namelabs != 1)
return r; /* no cache dp, use any non-root stub */
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
@ -523,12 +542,18 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
if(dname_strict_subdomain(r->dp->name, r->dp->namelabs,
cache_dp->name, cache_dp->namelabs))
return r; /* need to prime this stub */
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
int hints_next_root(struct iter_hints* hints, uint16_t* qclass)
int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock)
{
return name_tree_next_root(&hints->tree, qclass);
int ret;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
ret = name_tree_next_root(&hints->tree, qclass);
if(!nolock) { lock_rw_unlock(&hints->lock); }
return ret;
}
size_t
@ -548,30 +573,41 @@ hints_get_mem(struct iter_hints* hints)
int
hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
int noprime)
int noprime, int nolock)
{
struct iter_hints_stub *z;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&hints->lock); }
if((z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
dp->name, dp->namelen, dp->namelabs, c)) != NULL) {
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
}
if(!hints_insert(hints, c, dp, noprime))
if(!hints_insert(hints, c, dp, noprime)) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return 0;
}
name_tree_init_parents(&hints->tree);
if(!nolock) { lock_rw_unlock(&hints->lock); }
return 1;
}
void
hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm)
hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm,
int nolock)
{
struct iter_hints_stub *z;
size_t len;
int labs = dname_count_size_labels(nm, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&hints->lock); }
if(!(z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
nm, len, labs, c)))
nm, len, labs, c))) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return; /* nothing to do */
}
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
name_tree_init_parents(&hints->tree);
if(!nolock) { lock_rw_unlock(&hints->lock); }
}

View File

@ -100,43 +100,66 @@ void hints_delete(struct iter_hints* hints);
int hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg);
/**
* Find root hints for the given class.
* The return value is contents of the hints structure, caller should
* lock and unlock a readlock on the hints structure.
* Find hints for the given class.
* The return value is contents of the hints structure.
* Caller should lock and unlock a readlock on the hints structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the hints structure if a
* value was returned.
* @param hints: hint storage.
* @param qname: the qname that generated the delegation point.
* @param qclass: class for which root hints are requested. host order.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
struct delegpt* hints_lookup_root(struct iter_hints* hints, uint16_t qclass);
struct delegpt* hints_find(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, int nolock);
/**
* Same as hints_lookup, but for the root only.
* @param hints: hint storage.
* @param qclass: class for which root hints are requested. host order.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
struct delegpt* hints_find_root(struct iter_hints* hints,
uint16_t qclass, int nolock);
/**
* Find next root hints (to cycle through all root hints).
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a readlock on the hints structure.
* @param hints: hint storage
* @param qclass: class for which root hints are sought.
* 0 means give the first available root hints class.
* x means, give class x or a higher class if any.
* returns the found class in this variable.
* @param nolock: Skip locking, locking is handled by the caller.
* @return true if a root hint class is found.
* false if not root hint class is found (qclass may have been changed).
*/
int hints_next_root(struct iter_hints* hints, uint16_t* qclass);
int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock);
/**
* Given a qname/qclass combination, and the delegation point from the cache
* for this qname/qclass, determine if this combination indicates that a
* stub hint exists and must be primed.
* The return value is contents of the hints structure, caller should
* lock and unlock a readlock on the hints structure.
* The return value is contents of the hints structure.
* Caller should lock and unlock a readlock on the hints structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the hints structure if a
* value was returned.
*
* @param hints: hint storage.
* @param qname: The qname that generated the delegation point.
* @param qclass: The qclass that generated the delegation point.
* @param dp: The cache generated delegation point.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A priming delegation point if there is a stub hint that must
* be primed, otherwise null.
*/
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp);
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/**
* Get memory in use by hints
@ -149,23 +172,30 @@ size_t hints_get_mem(struct iter_hints* hints);
/**
* Add stub to hints structure. For external use since it recalcs
* the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* hints stub. malloced.
* @param noprime: set noprime option to true or false on new hint stub.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
int noprime);
int noprime, int nolock);
/**
* Remove stub from hints structure. For external use since it
* recalcs the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of stub zone
* @param nm: name of stub zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm);
void hints_delete_stub(struct iter_hints* hints, uint16_t c,
uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_HINTS_H */

View File

@ -1285,11 +1285,13 @@ iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
{
uint16_t c1 = *c, c2 = *c;
int r1, r2;
int nolock = 1;
/* prelock both forwards and hints for atomic read. */
lock_rw_rdlock(&fwd->lock);
lock_rw_rdlock(&hints->lock);
r1 = hints_next_root(hints, &c1);
r2 = forwards_next_root(fwd, &c2);
r1 = hints_next_root(hints, &c1, nolock);
r2 = forwards_next_root(fwd, &c2, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&hints->lock);
@ -1462,13 +1464,16 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
{
struct iter_hints_stub *stub;
struct delegpt *dp;
int nolock = 1;
/* Check for stub. */
/* Lock both forwards and hints for atomic read. */
lock_rw_rdlock(&qstate->env->fwds->lock);
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
qinf->qclass, NULL);
dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
qinf->qclass, NULL, nolock);
dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass,
nolock);
/* see if forward or stub is more pertinent */
if(stub && stub->dp && dp) {

View File

@ -678,39 +678,40 @@ errinf_reply(struct module_qstate* qstate, struct iter_qstate* iq)
/** see if last resort is possible - does config allow queries to parent */
static int
can_have_last_resort(struct module_env* env, uint8_t* nm, size_t nmlen,
can_have_last_resort(struct module_env* env, uint8_t* nm, size_t ATTR_UNUSED(nmlen),
uint16_t qclass, int* have_dp, struct delegpt** retdp,
struct regional* region)
{
struct delegpt* fwddp;
struct iter_hints_stub* stub;
int labs = dname_count_labels(nm);
struct delegpt* dp = NULL;
int nolock = 0;
/* do not process a last resort (the parent side) if a stub
* or forward is configured, because we do not want to go 'above'
* the configured servers */
lock_rw_rdlock(&env->hints->lock);
if(!dname_is_root(nm) && (stub = (struct iter_hints_stub*)
name_tree_find(&env->hints->tree, nm, nmlen, labs, qclass)) &&
if(!dname_is_root(nm) &&
(dp = hints_find(env->hints, nm, qclass, nolock)) &&
/* has_parent side is turned off for stub_first, where we
* are allowed to go to the parent */
stub->dp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(stub->dp, region);
dp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->hints->lock);
if(have_dp) *have_dp = 1;
return 0;
}
lock_rw_unlock(&env->hints->lock);
lock_rw_rdlock(&env->fwds->lock);
if((fwddp = forwards_find(env->fwds, nm, qclass)) &&
if(dp) {
lock_rw_unlock(&env->hints->lock);
dp = NULL;
}
if((dp = forwards_find(env->fwds, nm, qclass, nolock)) &&
/* has_parent_side is turned off for forward_first, where
* we are allowed to go to the parent */
fwddp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(fwddp, region);
dp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->fwds->lock);
if(have_dp) *have_dp = 1;
return 0;
}
lock_rw_unlock(&env->fwds->lock);
/* lock_() calls are macros that could be nothing, surround in {} */
if(dp) { lock_rw_unlock(&env->fwds->lock); }
return 1;
}
@ -886,13 +887,12 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
{
struct delegpt* dp;
struct module_qstate* subq;
int nolock = 0;
verbose(VERB_DETAIL, "priming . %s NS",
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
lock_rw_rdlock(&qstate->env->hints->lock);
dp = hints_lookup_root(qstate->env->hints, qclass);
dp = hints_find_root(qstate->env->hints, qclass, nolock);
if(!dp) {
lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "Cannot prime due to lack of hints");
return 0;
}
@ -956,15 +956,13 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
struct iter_hints_stub* stub;
struct delegpt* stub_dp;
struct module_qstate* subq;
int nolock = 0;
if(!qname) return 0;
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp,
nolock);
/* The stub (if there is one) does not need priming. */
if(!stub) {
lock_rw_unlock(&qstate->env->hints->lock);
return 0;
}
if(!stub) return 0;
stub_dp = stub->dp;
/* if we have an auth_zone dp, and stub is equal, don't prime stub
* yet, unless we want to fallback and avoid the auth_zone */
@ -1319,6 +1317,7 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
struct delegpt* dp;
uint8_t* delname = iq->qchase.qname;
size_t delnamelen = iq->qchase.qname_len;
int nolock = 0;
if(iq->refetch_glue && iq->dp) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
@ -1327,12 +1326,9 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
&& !dname_is_root(iq->qchase.qname))
dname_remove_label(&delname, &delnamelen);
lock_rw_rdlock(&qstate->env->fwds->lock);
dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
if(!dp) {
lock_rw_unlock(&qstate->env->fwds->lock);
return 0;
}
dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass,
nolock);
if(!dp) return 0;
/* send recursion desired to forward addr */
iq->chase_flags |= BIT_RD;
iq->dp = delegpt_copy(dp, qstate->region);
@ -1633,6 +1629,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
* root priming situation. */
if(iq->dp == NULL) {
int r;
int nolock = 0;
/* if under auth zone, no prime needed */
if(!auth_zone_delegpt(qstate, iq, delname, delnamelen))
return error_response(qstate, id,
@ -1646,17 +1643,14 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
break; /* got noprime-stub-zone, continue */
else if(r)
return 0; /* stub prime request made */
lock_rw_rdlock(&qstate->env->fwds->lock);
if(forwards_lookup_root(qstate->env->fwds,
iq->qchase.qclass)) {
if(forwards_lookup_root(qstate->env->fwds,
iq->qchase.qclass, nolock)) {
lock_rw_unlock(&qstate->env->fwds->lock);
/* forward zone root, no root prime needed */
/* fill in some dp - safety belt */
lock_rw_rdlock(&qstate->env->hints->lock);
iq->dp = hints_lookup_root(qstate->env->hints,
iq->qchase.qclass);
iq->dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
if(!iq->dp) {
lock_rw_unlock(&qstate->env->hints->lock);
log_err("internal error: no hints dp");
errinf(qstate, "no hints for this class");
return error_response(qstate, id,
@ -1672,7 +1666,6 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
return next_state(iq, INIT_REQUEST_2_STATE);
}
lock_rw_unlock(&qstate->env->fwds->lock);
/* Note that the result of this will set a new
* DelegationPoint based on the result of priming. */
if(!prime_root(qstate, iq, id, iq->qchase.qclass))
@ -1730,15 +1723,14 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(dname_is_root(iq->dp->name)) {
/* use safety belt */
int nolock = 0;
verbose(VERB_QUERY, "Cache has root NS but "
"no addresses. Fallback to the safety belt.");
lock_rw_rdlock(&qstate->env->hints->lock);
iq->dp = hints_lookup_root(qstate->env->hints,
iq->qchase.qclass);
iq->dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
/* note deleg_msg is from previous lookup,
* but RD is on, so it is not used */
if(!iq->dp) {
lock_rw_unlock(&qstate->env->hints->lock);
log_err("internal error: no hints dp");
return error_response(qstate, id,
LDNS_RCODE_REFUSED);
@ -1800,6 +1792,7 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
delnamelen = iq->qchase.qname_len;
if(iq->refetch_glue) {
struct iter_hints_stub* stub;
int nolock = 0;
if(!iq->dp) {
log_err("internal or malloc fail: no dp for refetch");
errinf(qstate, "malloc failure, no delegation info");
@ -1807,16 +1800,16 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
}
/* Do not send queries above stub, do not set delname to dp if
* this is above stub without stub-first. */
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(
qstate->env->hints, iq->qchase.qname, iq->qchase.qclass,
iq->dp);
iq->dp, nolock);
if(!stub || !stub->dp->has_parent_side_NS ||
dname_subdomain_c(iq->dp->name, stub->dp->name)) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
}
lock_rw_unlock(&qstate->env->hints->lock);
/* lock_() calls are macros that could be nothing, surround in {} */
if(stub) { lock_rw_unlock(&qstate->env->hints->lock); }
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue) {
if(!dname_is_root(delname))
@ -2130,24 +2123,25 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(!iq->dp->has_parent_side_NS && dname_is_root(iq->dp->name)) {
struct delegpt* p;
lock_rw_rdlock(&qstate->env->hints->lock);
p = hints_lookup_root(qstate->env->hints, iq->qchase.qclass);
if(p) {
struct delegpt* dp;
int nolock = 0;
dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
if(dp) {
struct delegpt_addr* a;
iq->chase_flags &= ~BIT_RD; /* go to authorities */
for(ns = p->nslist; ns; ns=ns->next) {
for(ns = dp->nslist; ns; ns=ns->next) {
(void)delegpt_add_ns(iq->dp, qstate->region,
ns->name, ns->lame, ns->tls_auth_name,
ns->port);
}
for(a = p->target_list; a; a=a->next_target) {
for(a = dp->target_list; a; a=a->next_target) {
(void)delegpt_add_addr(iq->dp, qstate->region,
&a->addr, a->addrlen, a->bogus,
a->lame, a->tls_auth_name, -1, NULL);
}
lock_rw_unlock(&qstate->env->hints->lock);
}
lock_rw_unlock(&qstate->env->hints->lock);
iq->dp->has_parent_side_NS = 1;
} else if(!iq->dp->has_parent_side_NS) {
if(!iter_lookup_parent_NS_from_cache(qstate->env, iq->dp,

View File

@ -1416,7 +1416,7 @@ struct delegpt* dns_cache_find_delegation(struct module_env* env,
int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
struct delegpt* dp, int supports_ipv4, int supports_ipv6, int use_nat64);
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp);
uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/* Custom function to perform logic similar to the one in daemon/cachedump.c */
struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t nmlen);
@ -1433,6 +1433,7 @@ struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t n
struct query_info qinfo;
struct iter_hints_stub* stub;
uint32_t timenow = *qstate->env->now;
int nolock = 0;
regional_free_all(region);
qinfo.qname = (uint8_t*)nm;
@ -1455,14 +1456,13 @@ struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t n
dname_str((uint8_t*)nm, b);
continue;
}
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qinfo.qname, qinfo.qclass, dp);
stub = hints_lookup_stub(qstate->env->hints, qinfo.qname,
qinfo.qclass, dp, nolock);
if (stub) {
struct delegpt* stubdp = delegpt_copy(stub->dp, region);
lock_rw_unlock(&qstate->env->hints->lock);
return stubdp;
} else {
lock_rw_unlock(&qstate->env->hints->lock);
return dp;
}
}