- Fix to rename internally used types from _t to _type, because _t

type names are reserved by POSIX.
- iana portlist update


git-svn-id: file:///svn/unbound/trunk@3989 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
Wouter Wijngaards 2017-01-19 10:25:41 +00:00
parent 5d522b0017
commit c010e93d4a
92 changed files with 654 additions and 642 deletions

View File

@ -48,7 +48,7 @@ void _ARC4_UNLOCK(void)
}
#else /* !THREADS_DISABLED */
static lock_quick_t arc4lock;
static lock_quick_type arc4lock;
static int arc4lockinit = 0;
void _ARC4_LOCK(void)

View File

@ -76,7 +76,7 @@ struct acl_list {
* Tree of the addresses that are allowed/blocked.
* contents of type acl_addr.
*/
rbtree_t tree;
rbtree_type tree;
};
/**

View File

@ -1418,7 +1418,7 @@ static void
do_cache_remove(struct worker* worker, uint8_t* nm, size_t nmlen,
uint16_t t, uint16_t c)
{
hashvalue_t h;
hashvalue_type h;
struct query_info k;
rrset_cache_remove(worker->env.rrset_cache, nm, nmlen, t, c, 0);
if(t == LDNS_RR_TYPE_SOA)

View File

@ -787,7 +787,7 @@ worker_handle_request(struct comm_point* c, void* arg, int error,
{
struct worker* worker = (struct worker*)arg;
int ret;
hashvalue_t h;
hashvalue_type h;
struct lruhash_entry* e;
struct query_info qinfo;
struct edns_data edns;

View File

@ -85,7 +85,7 @@ struct worker {
/** global shared daemon structure */
struct daemon* daemon;
/** thread id */
ub_thread_t thr_id;
ub_thread_type thr_id;
/** pipe, for commands for this worker */
struct tube* cmd;
/** the event base this worker works with */

View File

@ -1,6 +1,9 @@
19 January 2017: Wouter
- Fix to Rename ub_callback_t to ub_callback_type, because POSIX
reserves _t typedefs.
- Fix to rename internally used types from _t to _type, because _t
type names are reserved by POSIX.
- iana portlist update
12 January 2017: Wouter
- Fix to also block meta types 128 through to 248 with formerr.

View File

@ -58,7 +58,7 @@ struct iter_donotq {
* contents of type addr_tree_node. Each node is an address span
* that must not be used to send queries to.
*/
rbtree_t tree;
rbtree_type tree;
};
/**

View File

@ -82,7 +82,7 @@ static void fwd_zone_free(struct iter_forward_zone* n)
free(n);
}
static void delfwdnode(rbnode_t* n, void* ATTR_UNUSED(arg))
static void delfwdnode(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct iter_forward_zone* node = (struct iter_forward_zone*)n;
fwd_zone_free(node);
@ -332,7 +332,7 @@ forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg)
struct delegpt*
forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
{
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct iter_forward_zone key;
key.node.key = &key;
key.dclass = qclass;
@ -347,7 +347,7 @@ struct delegpt*
forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
{
/* lookup the forward zone in the tree */
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct iter_forward_zone *result;
struct iter_forward_zone key;
key.node.key = &key;
@ -388,7 +388,7 @@ int
forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
{
struct iter_forward_zone key;
rbnode_t* n;
rbnode_type* n;
struct iter_forward_zone* p;
if(*dclass == 0) {
/* first root item is first item in tree */

View File

@ -57,7 +57,7 @@ struct iter_forwards {
* match which gives the ancestor needed.
* contents of type iter_forward_zone.
*/
rbtree_t* tree;
rbtree_type* tree;
};
/**
@ -65,7 +65,7 @@ struct iter_forwards {
*/
struct iter_forward_zone {
/** redblacktree node, key is this structure: class and name */
rbnode_t node;
rbnode_type node;
/** name */
uint8_t* name;
/** length of name */

View File

@ -67,7 +67,7 @@ static void hints_stub_free(struct iter_hints_stub* s)
free(s);
}
static void delhintnode(rbnode_t* n, void* ATTR_UNUSED(arg))
static void delhintnode(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct iter_hints_stub* node = (struct iter_hints_stub*)n;
hints_stub_free(node);

View File

@ -59,7 +59,7 @@ struct iter_hints {
* contents of type iter_hints_stub. The class IN root is in here.
* uses name_tree_node from dnstree.h.
*/
rbtree_t tree;
rbtree_type tree;
};
/**

View File

@ -60,14 +60,14 @@ struct iter_priv {
* contents of type addr_tree_node.
* No further data need, only presence or absence.
*/
rbtree_t a;
rbtree_type a;
/**
* Tree of the domains spans that are allowed to contain
* the blocked address spans.
* contents of type name_tree_node.
* No further data need, only presence or absence.
*/
rbtree_t n;
rbtree_type n;
};
/**

View File

@ -161,8 +161,8 @@ mark_additional_rrset(sldns_buffer* pkt, struct msg_parse* msg,
for(rr = rrset->rr_first; rr; rr = rr->next) {
if(get_additional_name(rrset, rr, &nm, &nmlen, pkt)) {
/* mark A */
hashvalue_t h = pkt_hash_rrset(pkt, nm, LDNS_RR_TYPE_A,
rrset->rrset_class, 0);
hashvalue_type h = pkt_hash_rrset(pkt, nm,
LDNS_RR_TYPE_A, rrset->rrset_class, 0);
struct rrset_parse* r = msgparse_hashtable_lookup(
msg, pkt, h, 0, nm, nmlen,
LDNS_RR_TYPE_A, rrset->rrset_class);

View File

@ -108,7 +108,7 @@ read_fetch_policy(struct iter_env* ie, const char* str)
/** apply config caps whitelist items to name tree */
static int
caps_white_apply_cfg(rbtree_t* ntree, struct config_file* cfg)
caps_white_apply_cfg(rbtree_type* ntree, struct config_file* cfg)
{
struct config_strlist* p;
for(p=cfg->caps_whitelist; p; p=p->next) {

View File

@ -88,7 +88,7 @@ iter_init(struct module_env* env, int id)
/** delete caps_whitelist element */
static void
caps_free(struct rbnode_t* n, void* ATTR_UNUSED(d))
caps_free(struct rbnode_type* n, void* ATTR_UNUSED(d))
{
if(n) {
free(((struct name_tree_node*)n)->name);

View File

@ -51,7 +51,7 @@ struct iter_forwards;
struct iter_donotq;
struct iter_prep_list;
struct iter_priv;
struct rbtree_t;
struct rbtree_type;
/** max number of targets spawned for a query and its subqueries */
#define MAX_TARGET_COUNT 64
@ -115,7 +115,7 @@ struct iter_env {
struct iter_priv* priv;
/** whitelist for capsforid names */
struct rbtree_t* caps_white;
struct rbtree_type* caps_white;
/** The maximum dependency depth that this resolver will pursue. */
int max_dependency_depth;

View File

@ -61,17 +61,17 @@ struct ub_event_base;
struct ub_ctx {
/* --- pipes --- */
/** mutex on query write pipe */
lock_basic_t qqpipe_lock;
lock_basic_type qqpipe_lock;
/** the query write pipe */
struct tube* qq_pipe;
/** mutex on result read pipe */
lock_basic_t rrpipe_lock;
lock_basic_type rrpipe_lock;
/** the result read pipe */
struct tube* rr_pipe;
/* --- shared data --- */
/** mutex for access to env.cfg, finalized and dothread */
lock_basic_t cfglock;
lock_basic_type cfglock;
/**
* The context has been finalized
* This is after config when the first resolve is done.
@ -84,7 +84,7 @@ struct ub_ctx {
/** pid of bg worker process */
pid_t bg_pid;
/** tid of bg worker thread */
ub_thread_t bg_tid;
ub_thread_type bg_tid;
/** do threading (instead of forking) for async resolution */
int dothread;
@ -129,7 +129,7 @@ struct ub_ctx {
* Used to see if querynum is free for use.
* Content of type ctx_query.
*/
rbtree_t queries;
rbtree_type queries;
};
/**
@ -140,7 +140,7 @@ struct ub_ctx {
*/
struct ctx_query {
/** node in rbtree, must be first entry, key is ptr to the querynum */
struct rbnode_t node;
struct rbnode_type node;
/** query id number, key for node */
int querynum;
/** was this an async query? */

View File

@ -215,7 +215,7 @@ ub_ctx_create_event(struct event_base* eb)
/** delete q */
static void
delq(rbnode_t* n, void* ATTR_UNUSED(arg))
delq(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct ctx_query* q = (struct ctx_query*)n;
context_query_delete(q);
@ -706,7 +706,7 @@ ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype,
int
ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
int rrclass, void* mydata, ub_event_callback_t callback, int* async_id)
int rrclass, void* mydata, ub_event_callback_type callback, int* async_id)
{
struct ctx_query* q;
int r;

View File

@ -639,7 +639,7 @@ libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
enum sec_status s, char* why_bogus)
{
struct ctx_query* q = (struct ctx_query*)arg;
ub_event_callback_t cb = (ub_event_callback_t)q->cb;
ub_event_callback_type cb = (ub_event_callback_type)q->cb;
void* cb_arg = q->cb_arg;
int cancelled = q->cancelled;

View File

@ -170,7 +170,7 @@ struct ub_event {
struct ub_event_vmt* vmt;
};
typedef void (*ub_event_callback_t)(void*, int, void*, int, int, char*);
typedef void (*ub_event_callback_type)(void*, int, void*, int, int, char*);
/**
* Create a resolving and validation context.
@ -254,7 +254,7 @@ int ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base);
* @return 0 if OK, else error.
*/
int ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
int rrclass, void* mydata, ub_event_callback_t callback, int* async_id);
int rrclass, void* mydata, ub_event_callback_type callback, int* async_id);
#ifdef __cplusplus
}

View File

@ -106,7 +106,7 @@ store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
void
dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside,
hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
struct reply_info* qrep, struct regional* region)
{
struct msgreply_entry* e;
@ -188,7 +188,7 @@ msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
{
struct lruhash_entry* e;
struct query_info k;
hashvalue_t h;
hashvalue_type h;
k.qname = qname;
k.qname_len = qnamelen;
@ -709,7 +709,7 @@ dns_cache_lookup(struct module_env* env,
{
struct lruhash_entry* e;
struct query_info k;
hashvalue_t h;
hashvalue_type h;
time_t now = *env->now;
struct ub_packed_rrset_key* rrset;
@ -865,7 +865,7 @@ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
} else {
/* store msg, and rrsets */
struct query_info qinf;
hashvalue_t h;
hashvalue_type h;
qinf = *msgqinf;
qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len);

View File

@ -106,7 +106,7 @@ int dns_cache_store(struct module_env* env, struct query_info* qinf,
* @param region: to allocate into for qmsg.
*/
void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside,
hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
struct reply_info* qrep, struct regional* region);
/**

View File

@ -260,7 +260,7 @@ infra_create(struct config_file* cfg)
}
/** delete domain_limit entries */
static void domain_limit_free(rbnode_t* n, void* ATTR_UNUSED(arg))
static void domain_limit_free(rbnode_type* n, void* ATTR_UNUSED(arg))
{
if(n) {
free(((struct domain_limit_data*)n)->node.name);
@ -300,11 +300,11 @@ infra_adjust(struct infra_cache* infra, struct config_file* cfg)
/** calculate the hash value for a host key
* set use_port to a non-0 number to use the port in
* the hash calculation; 0 to ignore the port.*/
static hashvalue_t
static hashvalue_type
hash_addr(struct sockaddr_storage* addr, socklen_t addrlen,
int use_port)
{
hashvalue_t h = 0xab;
hashvalue_type h = 0xab;
/* select the pieces to hash, some OS have changing data inside */
if(addr_is_ip6(addr, addrlen)) {
struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr;
@ -325,7 +325,7 @@ hash_addr(struct sockaddr_storage* addr, socklen_t addrlen,
}
/** calculate infra hash for a key */
static hashvalue_t
static hashvalue_type
hash_infra(struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* name)
{
return dname_query_hash(name, hash_addr(addr, addrlen, 1));
@ -775,7 +775,7 @@ static struct lruhash_entry* infra_find_ratedata(struct infra_cache* infra,
uint8_t* name, size_t namelen, int wr)
{
struct rate_key key;
hashvalue_t h = dname_query_hash(name, 0xab);
hashvalue_type h = dname_query_hash(name, 0xab);
memset(&key, 0, sizeof(key));
key.name = name;
key.namelen = namelen;
@ -788,7 +788,7 @@ struct lruhash_entry* infra_find_ip_ratedata(struct infra_cache* infra,
struct comm_reply* repinfo, int wr)
{
struct ip_rate_key key;
hashvalue_t h = hash_addr(&(repinfo->addr),
hashvalue_type h = hash_addr(&(repinfo->addr),
repinfo->addrlen, 0);
memset(&key, 0, sizeof(key));
key.addr = repinfo->addr;
@ -801,7 +801,7 @@ struct lruhash_entry* infra_find_ip_ratedata(struct infra_cache* infra,
static void infra_create_ratedata(struct infra_cache* infra,
uint8_t* name, size_t namelen, time_t timenow)
{
hashvalue_t h = dname_query_hash(name, 0xab);
hashvalue_type h = dname_query_hash(name, 0xab);
struct rate_key* k = (struct rate_key*)calloc(1, sizeof(*k));
struct rate_data* d = (struct rate_data*)calloc(1, sizeof(*d));
if(!k || !d) {
@ -829,7 +829,7 @@ static void infra_create_ratedata(struct infra_cache* infra,
static void infra_ip_create_ratedata(struct infra_cache* infra,
struct comm_reply* repinfo, time_t timenow)
{
hashvalue_t h = hash_addr(&(repinfo->addr),
hashvalue_type h = hash_addr(&(repinfo->addr),
repinfo->addrlen, 0);
struct ip_rate_key* k = (struct ip_rate_key*)calloc(1, sizeof(*k));
struct ip_rate_data* d = (struct ip_rate_data*)calloc(1, sizeof(*d));

View File

@ -117,7 +117,7 @@ struct infra_cache {
/** hash table with query rates per name: rate_key, rate_data */
struct slabhash* domain_rates;
/** ratelimit settings for domains, struct domain_limit_data */
rbtree_t domain_limits;
rbtree_type domain_limits;
/** hash table with query rates per client ip: ip_rate_key, ip_rate_data */
struct slabhash* client_ip_rates;
};

View File

@ -91,7 +91,7 @@ struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r,
void
rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key,
hashvalue_t hash, rrset_id_t id)
hashvalue_type hash, rrset_id_type id)
{
struct lruhash* table = slabhash_gettable(&r->table, hash);
/*
@ -186,7 +186,7 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
{
struct lruhash_entry* e;
struct ub_packed_rrset_key* k = ref->key;
hashvalue_t h = k->entry.hash;
hashvalue_type h = k->entry.hash;
uint16_t rrset_type = ntohs(k->rk.type);
int equal = 0;
log_assert(ref->id != 0 && k->id != 0);
@ -303,10 +303,10 @@ void
rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch,
struct rrset_ref* ref, size_t count)
{
hashvalue_t* h;
hashvalue_type* h;
size_t i;
if(count > RR_COUNT_MAX || !(h = (hashvalue_t*)regional_alloc(scratch,
sizeof(hashvalue_t)*count))) {
if(count > RR_COUNT_MAX || !(h = (hashvalue_type*)regional_alloc(
scratch, sizeof(hashvalue_type)*count))) {
log_warn("rrset LRU: memory allocation failed");
h = NULL;
} else /* store hash values */

View File

@ -102,7 +102,7 @@ struct rrset_cache* rrset_cache_adjust(struct rrset_cache* r,
* @param id: used to check that the item is unchanged and not deleted.
*/
void rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key,
hashvalue_t hash, rrset_id_t id);
hashvalue_type hash, rrset_id_type id);
/**
* Update an rrset in the rrset cache. Stores the information for later use.

View File

@ -1163,7 +1163,7 @@ listen_cp_insert(struct comm_point* c, struct listen_dnsport* front)
struct listen_dnsport*
listen_create(struct comm_base* base, struct listen_port* ports,
size_t bufsize, int tcp_accept_count, void* sslctx,
struct dt_env* dtenv, comm_point_callback_t* cb, void *cb_arg)
struct dt_env* dtenv, comm_point_callback_type* cb, void *cb_arg)
{
struct listen_dnsport* front = (struct listen_dnsport*)
malloc(sizeof(struct listen_dnsport));

View File

@ -137,7 +137,7 @@ void listening_ports_free(struct listen_port* list);
*/
struct listen_dnsport* listen_create(struct comm_base* base,
struct listen_port* ports, size_t bufsize, int tcp_accept_count,
void* sslctx, struct dt_env *dtenv, comm_point_callback_t* cb,
void* sslctx, struct dt_env *dtenv, comm_point_callback_type* cb,
void* cb_arg);
/**

View File

@ -74,7 +74,7 @@ local_zones_create(void)
/** helper traverse to delete zones */
static void
lzdel(rbnode_t* n, void* ATTR_UNUSED(arg))
lzdel(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct local_zone* z = (struct local_zone*)n->key;
local_zone_delete(z);
@ -165,7 +165,7 @@ local_zone_create(uint8_t* nm, size_t len, int labs,
return NULL;
}
rbtree_init(&z->data, &local_data_cmp);
lock_protect(&z->lock, &z->parent, sizeof(*z)-sizeof(rbnode_t));
lock_protect(&z->lock, &z->parent, sizeof(*z)-sizeof(rbnode_type));
/* also the zones->lock protects node, parent, name*, class */
return z;
}
@ -629,7 +629,7 @@ lz_enter_override(struct local_zones* zones, char* zname, char* netblock,
/* create netblock addr_tree if not present yet */
if(!z->override_tree) {
z->override_tree = (struct rbtree_t*)regional_alloc_zero(
z->override_tree = (struct rbtree_type*)regional_alloc_zero(
z->region, sizeof(*z->override_tree));
if(!z->override_tree) {
lock_rw_unlock(&z->lock);
@ -1060,7 +1060,7 @@ local_zones_tags_lookup(struct local_zones* zones,
uint8_t* name, size_t len, int labs, uint16_t dclass,
uint8_t* taglist, size_t taglen, int ignoretags)
{
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct local_zone *result;
struct local_zone key;
int m;
@ -1494,8 +1494,8 @@ lz_inform_print(struct local_zone* z, struct query_info* qinfo,
static enum localzone_type
lz_type(uint8_t *taglist, size_t taglen, uint8_t *taglist2, size_t taglen2,
uint8_t *tagactions, size_t tagactionssize, enum localzone_type lzt,
struct comm_reply* repinfo, struct rbtree_t* override_tree, int* tag,
char** tagname, int num_tags)
struct comm_reply* repinfo, struct rbtree_type* override_tree,
int* tag, char** tagname, int num_tags)
{
size_t i, j;
uint8_t tagmatch;

View File

@ -95,9 +95,9 @@ enum localzone_type {
*/
struct local_zones {
/** lock on the localzone tree */
lock_rw_t lock;
lock_rw_type lock;
/** rbtree of struct local_zone */
rbtree_t ztree;
rbtree_type ztree;
};
/**
@ -105,7 +105,7 @@ struct local_zones {
*/
struct local_zone {
/** rbtree node, key is name and class */
rbnode_t node;
rbnode_type node;
/** parent zone, if any. */
struct local_zone* parent;
@ -123,7 +123,7 @@ struct local_zone {
* For the node, parent, name, namelen, namelabs, dclass, you
* need to also hold the zones_tree lock to change them (or to
* delete this zone) */
lock_rw_t lock;
lock_rw_type lock;
/** how to process zone */
enum localzone_type type;
@ -133,14 +133,14 @@ struct local_zone {
size_t taglen;
/** netblock addr_tree with struct local_zone_override information
* or NULL if there are no override elements */
struct rbtree_t* override_tree;
struct rbtree_type* override_tree;
/** in this region the zone's data is allocated.
* the struct local_zone itself is malloced. */
struct regional* region;
/** local data for this zone
* rbtree of struct local_data */
rbtree_t data;
rbtree_type data;
/** if data contains zone apex SOA data, this is a ptr to it. */
struct ub_packed_rrset_key* soa;
};
@ -150,7 +150,7 @@ struct local_zone {
*/
struct local_data {
/** rbtree node, key is name only */
rbnode_t node;
rbnode_type node;
/** domain name */
uint8_t* name;
/** length of name */

View File

@ -203,7 +203,7 @@ mesh_create(struct module_stack* stack, struct module_env* env)
/** help mesh delete delete mesh states */
static void
mesh_delete_helper(rbnode_t* n)
mesh_delete_helper(rbnode_type* n)
{
struct mesh_state* mstate = (struct mesh_state*)n->key;
/* perform a full delete, not only 'cleanup' routine,
@ -321,7 +321,7 @@ void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
/* see if it already exists, if not, create one */
if(!s) {
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
@ -409,7 +409,7 @@ void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
int
mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, struct edns_data* edns, sldns_buffer* buf,
uint16_t qid, mesh_cb_func_t cb, void* cb_arg)
uint16_t qid, mesh_cb_func_type cb, void* cb_arg)
{
struct mesh_state* s = NULL;
int unique = edns_unique_mesh_state(edns->opt_list, mesh->env);
@ -423,7 +423,7 @@ mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
/* see if it already exists, if not, create one */
if(!s) {
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
@ -479,7 +479,7 @@ void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&(BIT_RD|BIT_CD),
0, 0);
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
/* already exists, and for a different purpose perhaps.
* if mesh_no_list, keep it that way. */
@ -729,7 +729,7 @@ void mesh_detach_subs(struct module_qstate* qstate)
struct mesh_area* mesh = qstate->env->mesh;
struct mesh_state_ref* ref, lookup;
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
lookup.node.key = &lookup;
lookup.s = qstate->mesh_info;
@ -764,7 +764,7 @@ int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
}
if(!sub) {
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
/* create a new one */
sub = mesh_state_create(qstate->env, qinfo, qflags, prime, valrec);
@ -809,7 +809,7 @@ int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub)
{
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
struct rbnode_type* n;
#endif
struct mesh_state_ref* subref; /* points to sub, inserted in super */
struct mesh_state_ref* superref; /* points to super, inserted in sub */
@ -1080,7 +1080,7 @@ struct mesh_state* mesh_area_find(struct mesh_area* mesh,
}
int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
sldns_buffer* buf, mesh_cb_func_t cb, void* cb_arg,
sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg,
uint16_t qid, uint16_t qflags)
{
struct mesh_cb* r = regional_alloc(s->s.region,

View File

@ -83,9 +83,9 @@ struct mesh_area {
struct module_env* env;
/** set of runnable queries (mesh_state.run_node) */
rbtree_t run;
rbtree_type run;
/** rbtree of all current queries (mesh_state.node)*/
rbtree_t all;
rbtree_type all;
/** count of the total number of mesh_reply entries */
size_t num_reply_addrs;
@ -154,9 +154,9 @@ struct mesh_area {
*/
struct mesh_state {
/** node in mesh_area all tree, key is this struct. Must be first. */
rbnode_t node;
rbnode_type node;
/** node in mesh_area runnable tree, key is this struct */
rbnode_t run_node;
rbnode_type run_node;
/** the query state. Note that the qinfo and query_flags
* may not change. */
struct module_qstate s;
@ -166,10 +166,10 @@ struct mesh_state {
struct mesh_cb* cb_list;
/** set of superstates (that want this state's result)
* contains struct mesh_state_ref* */
rbtree_t super_set;
rbtree_type super_set;
/** set of substates (that this state needs to continue)
* contains struct mesh_state_ref* */
rbtree_t sub_set;
rbtree_type sub_set;
/** number of activations for the mesh state */
size_t num_activated;
@ -193,7 +193,7 @@ struct mesh_state {
*/
struct mesh_state_ref {
/** node in rbtree for set, key is this structure */
rbnode_t node;
rbnode_type node;
/** the mesh state */
struct mesh_state* s;
};
@ -224,7 +224,7 @@ struct mesh_reply {
* Mesh result callback func.
* called as func(cb_arg, rcode, buffer_with_reply, security, why_bogus);
*/
typedef void (*mesh_cb_func_t)(void*, int, struct sldns_buffer*, enum sec_status,
typedef void (*mesh_cb_func_type)(void*, int, struct sldns_buffer*, enum sec_status,
char*);
/**
@ -245,7 +245,7 @@ struct mesh_cb {
/** callback routine for results. if rcode != 0 buf has message.
* called as cb(cb_arg, rcode, buf, sec_state);
*/
mesh_cb_func_t cb;
mesh_cb_func_type cb;
/** user arg for callback */
void* cb_arg;
};
@ -300,7 +300,7 @@ void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
*/
int mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, struct edns_data* edns, struct sldns_buffer* buf,
uint16_t qid, mesh_cb_func_t cb, void* cb_arg);
uint16_t qid, mesh_cb_func_type cb, void* cb_arg);
/**
* New prefetch message. Create new query state if needed.
@ -498,7 +498,7 @@ int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns,
* @return: 0 on alloc error.
*/
int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
struct sldns_buffer* buf, mesh_cb_func_t cb, void* cb_arg, uint16_t qid,
struct sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg, uint16_t qid,
uint16_t qflags);
/**

View File

@ -334,7 +334,7 @@ use_free_buffer(struct outside_network* outnet)
if(outnet->tcp_wait_last == w)
outnet->tcp_wait_last = NULL;
if(!outnet_tcp_take_into_use(w, w->pkt, w->pkt_len)) {
comm_point_callback_t* cb = w->cb;
comm_point_callback_type* cb = w->cb;
void* cb_arg = w->cb_arg;
waiting_tcp_delete(w);
fptr_ok(fptr_whitelist_pending_tcp(cb));
@ -775,7 +775,7 @@ outside_network_create(struct comm_base *base, size_t bufsize,
/** helper pending delete */
static void
pending_node_del(rbnode_t* node, void* arg)
pending_node_del(rbnode_type* node, void* arg)
{
struct pending* pend = (struct pending*)node;
struct outside_network* outnet = (struct outside_network*)arg;
@ -784,7 +784,7 @@ pending_node_del(rbnode_t* node, void* arg)
/** helper serviced delete */
static void
serviced_node_del(rbnode_t* node, void* ATTR_UNUSED(arg))
serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
{
struct serviced_query* sq = (struct serviced_query*)node;
struct service_callback* p = sq->cblist, *np;
@ -1124,7 +1124,7 @@ randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
struct pending*
pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
int timeout, comm_point_callback_t* cb, void* cb_arg)
int timeout, comm_point_callback_type* cb, void* cb_arg)
{
struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
if(!pend) return NULL;
@ -1174,7 +1174,7 @@ outnet_tcptimer(void* arg)
{
struct waiting_tcp* w = (struct waiting_tcp*)arg;
struct outside_network* outnet = w->outnet;
comm_point_callback_t* cb;
comm_point_callback_type* cb;
void* cb_arg;
if(w->pkt) {
/* it is on the waiting list */
@ -1197,7 +1197,7 @@ outnet_tcptimer(void* arg)
struct waiting_tcp*
pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
int timeout, comm_point_callback_t* callback, void* callback_arg)
int timeout, comm_point_callback_type* callback, void* callback_arg)
{
struct pending_tcp* pend = sq->outnet->tcp_free;
struct waiting_tcp* w;
@ -1301,7 +1301,7 @@ serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
{
struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
#ifdef UNBOUND_DEBUG
rbnode_t* ins;
rbnode_type* ins;
#endif
if(!sq)
return NULL;
@ -1587,7 +1587,7 @@ serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
uint8_t *backup_p = NULL;
size_t backlen = 0;
#ifdef UNBOUND_DEBUG
rbnode_t* rem =
rbnode_type* rem =
#else
(void)
#endif
@ -1990,7 +1990,7 @@ outnet_serviced_query(struct outside_network* outnet,
int nocaps, int tcp_upstream, int ssl_upstream,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
size_t zonelen, struct module_qstate* qstate,
comm_point_callback_t* callback, void* callback_arg, sldns_buffer* buff,
comm_point_callback_type* callback, void* callback_arg, sldns_buffer* buff,
struct module_env* env)
{
struct serviced_query* sq;

View File

@ -123,9 +123,9 @@ struct outside_network {
struct pending* udp_wait_last;
/** pending udp answers. sorted by id, addr */
rbtree_t* pending;
rbtree_type* pending;
/** serviced queries, sorted by qbuf, addr, dnssec */
rbtree_t* serviced;
rbtree_type* serviced;
/** host cache, pointer but not owned by outnet. */
struct infra_cache* infra;
/** where to get random numbers */
@ -210,7 +210,7 @@ struct port_comm {
*/
struct pending {
/** redblacktree entry, key is the pending struct(id, addr). */
rbnode_t node;
rbnode_type node;
/** the ID for the query. int so that a value out of range can
* be used to signify a pending that is for certain not present in
* the rbtree. (and for which deletion is safe). */
@ -224,7 +224,7 @@ struct pending {
/** timeout event */
struct comm_timer* timer;
/** callback for the timeout, error or reply to the message */
comm_point_callback_t* cb;
comm_point_callback_type* cb;
/** callback user argument */
void* cb_arg;
/** the outside network it is part of */
@ -285,7 +285,7 @@ struct waiting_tcp {
/** length of query packet. */
size_t pkt_len;
/** callback for the timeout, error or reply to the message */
comm_point_callback_t* cb;
comm_point_callback_type* cb;
/** callback user argument */
void* cb_arg;
/** if it uses ssl upstream */
@ -299,7 +299,7 @@ struct service_callback {
/** next in callback list */
struct service_callback* next;
/** callback function */
comm_point_callback_t* cb;
comm_point_callback_type* cb;
/** user argument for callback function */
void* cb_arg;
};
@ -317,7 +317,7 @@ struct service_callback {
*/
struct serviced_query {
/** The rbtree node, key is this record */
rbnode_t node;
rbnode_type node;
/** The query that needs to be answered. Starts with flags u16,
* then qdcount, ..., including qname, qtype, qclass. Does not include
* EDNS record. */
@ -443,7 +443,7 @@ void outside_network_quit_prepare(struct outside_network* outnet);
* @return: NULL on error for malloc or socket. Else the pending query object.
*/
struct pending* pending_udp_query(struct serviced_query* sq,
struct sldns_buffer* packet, int timeout, comm_point_callback_t* callback,
struct sldns_buffer* packet, int timeout, comm_point_callback_type* callback,
void* callback_arg);
/**
@ -459,7 +459,7 @@ struct pending* pending_udp_query(struct serviced_query* sq,
* @return: false on error for malloc or socket. Else the pending TCP object.
*/
struct waiting_tcp* pending_tcp_query(struct serviced_query* sq,
struct sldns_buffer* packet, int timeout, comm_point_callback_t* callback,
struct sldns_buffer* packet, int timeout, comm_point_callback_type* callback,
void* callback_arg);
/**
@ -504,7 +504,7 @@ struct serviced_query* outnet_serviced_query(struct outside_network* outnet,
int nocaps, int tcp_upstream, int ssl_upstream,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
size_t zonelen, struct module_qstate* qstate,
comm_point_callback_t* callback, void* callback_arg,
comm_point_callback_type* callback, void* callback_arg,
struct sldns_buffer* buff, struct module_env* env);
/**

View File

@ -78,7 +78,7 @@ view_delete(struct view* v)
}
static void
delviewnode(rbnode_t* n, void* ATTR_UNUSED(arg))
delviewnode(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct view* v = (struct view*)n;
view_delete(v);
@ -107,7 +107,7 @@ view_create(char* name)
return NULL;
}
lock_rw_init(&v->lock);
lock_protect(&v->lock, &v->name, sizeof(*v)-sizeof(rbnode_t));
lock_protect(&v->lock, &v->name, sizeof(*v)-sizeof(rbnode_type));
return v;
}

View File

@ -54,9 +54,9 @@ struct config_view;
*/
struct views {
/** lock on the view tree */
lock_rw_t lock;
lock_rw_type lock;
/** rbtree of struct view */
rbtree_t vtree;
rbtree_type vtree;
};
/**
@ -64,7 +64,7 @@ struct views {
*/
struct view {
/** rbtree node, key is name */
rbnode_t node;
rbnode_type node;
/** view name.
* Has to be right after rbnode_t due to pointer arithmatic in
* view_create's lock protect */
@ -77,7 +77,7 @@ struct view {
/** lock on the data in the structure
* For the node and name you need to also hold the views_tree lock to
* change them. */
lock_rw_t lock;
lock_rw_type lock;
};

View File

@ -64,7 +64,7 @@ struct track_id {
/** true if cancelled */
int cancel;
/** a lock on this structure for thread safety */
lock_basic_t lock;
lock_basic_type lock;
};
/**
@ -164,7 +164,7 @@ struct ext_thr_info {
/** thread num for debug */
int thread_num;
/** thread id */
ub_thread_t tid;
ub_thread_type tid;
/** context */
struct ub_ctx* ctx;
/** size of array to query */

View File

@ -307,7 +307,7 @@ struct checked_lock_mutex { struct checked_lock* c_m; };
struct checked_lock_spl { struct checked_lock* c_spl; };
/** debugging rwlock */
typedef struct checked_lock_rw lock_rw_t;
typedef struct checked_lock_rw lock_rw_type;
#define lock_rw_init(lock) checklock_init(check_lock_rwlock, &((lock)->c_rw), __func__, __FILE__, __LINE__)
#define lock_rw_destroy(lock) checklock_destroy(check_lock_rwlock, &((lock)->c_rw), __func__, __FILE__, __LINE__)
#define lock_rw_rdlock(lock) checklock_rdlock(check_lock_rwlock, (lock)->c_rw, __func__, __FILE__, __LINE__)
@ -315,26 +315,26 @@ typedef struct checked_lock_rw lock_rw_t;
#define lock_rw_unlock(lock) checklock_unlock(check_lock_rwlock, (lock)->c_rw, __func__, __FILE__, __LINE__)
/** debugging mutex */
typedef struct checked_lock_mutex lock_basic_t;
typedef struct checked_lock_mutex lock_basic_type;
#define lock_basic_init(lock) checklock_init(check_lock_mutex, &((lock)->c_m), __func__, __FILE__, __LINE__)
#define lock_basic_destroy(lock) checklock_destroy(check_lock_mutex, &((lock)->c_m), __func__, __FILE__, __LINE__)
#define lock_basic_lock(lock) checklock_lock(check_lock_mutex, (lock)->c_m, __func__, __FILE__, __LINE__)
#define lock_basic_unlock(lock) checklock_unlock(check_lock_mutex, (lock)->c_m, __func__, __FILE__, __LINE__)
/** debugging spinlock */
typedef struct checked_lock_spl lock_quick_t;
typedef struct checked_lock_spl lock_quick_type;
#define lock_quick_init(lock) checklock_init(check_lock_spinlock, &((lock)->c_spl), __func__, __FILE__, __LINE__)
#define lock_quick_destroy(lock) checklock_destroy(check_lock_spinlock, &((lock)->c_spl), __func__, __FILE__, __LINE__)
#define lock_quick_lock(lock) checklock_lock(check_lock_spinlock, (lock)->c_spl, __func__, __FILE__, __LINE__)
#define lock_quick_unlock(lock) checklock_unlock(check_lock_spinlock, (lock)->c_spl, __func__, __FILE__, __LINE__)
/** we use the pthread id, our thr_check structure is kept behind the scenes */
typedef pthread_t ub_thread_t;
typedef pthread_t ub_thread_type;
#define ub_thread_create(thr, func, arg) checklock_thrcreate(thr, func, arg)
#define ub_thread_self() pthread_self()
#define ub_thread_join(thread) checklock_thrjoin(thread)
typedef pthread_key_t ub_thread_key_t;
typedef pthread_key_t ub_thread_key_type;
#define ub_thread_key_create(key, f) LOCKRET(pthread_key_create(key, f))
#define ub_thread_key_set(key, v) LOCKRET(pthread_setspecific(key, v))
#define ub_thread_key_get(key) pthread_getspecific(key)

View File

@ -318,7 +318,7 @@ answer_callback_from_entry(struct replay_runtime* runtime,
struct comm_point c;
struct comm_reply repinfo;
void* cb_arg = pend->cb_arg;
comm_point_callback_t* cb = pend->callback;
comm_point_callback_type* cb = pend->callback;
memset(&c, 0, sizeof(c));
c.fd = -1;
@ -422,7 +422,7 @@ fake_pending_callback(struct replay_runtime* runtime,
struct comm_reply repinfo;
struct comm_point c;
void* cb_arg;
comm_point_callback_t* cb;
comm_point_callback_type* cb;
memset(&c, 0, sizeof(c));
if(!p) fatal_exit("No pending queries.");
@ -735,7 +735,7 @@ struct listen_dnsport*
listen_create(struct comm_base* base, struct listen_port* ATTR_UNUSED(ports),
size_t bufsize, int ATTR_UNUSED(tcp_accept_count),
void* ATTR_UNUSED(sslctx), struct dt_env* ATTR_UNUSED(dtenv),
comm_point_callback_t* cb, void* cb_arg)
comm_point_callback_type* cb, void* cb_arg)
{
struct replay_runtime* runtime = (struct replay_runtime*)base;
struct listen_dnsport* l= calloc(1, sizeof(struct listen_dnsport));
@ -937,7 +937,7 @@ outside_network_quit_prepare(struct outside_network* ATTR_UNUSED(outnet))
struct pending*
pending_udp_query(struct serviced_query* sq, sldns_buffer* packet,
int timeout, comm_point_callback_t* callback, void* callback_arg)
int timeout, comm_point_callback_type* callback, void* callback_arg)
{
struct replay_runtime* runtime = (struct replay_runtime*)
sq->outnet->base;
@ -987,7 +987,7 @@ pending_udp_query(struct serviced_query* sq, sldns_buffer* packet,
struct waiting_tcp*
pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
int timeout, comm_point_callback_t* callback, void* callback_arg)
int timeout, comm_point_callback_type* callback, void* callback_arg)
{
struct replay_runtime* runtime = (struct replay_runtime*)
sq->outnet->base;
@ -1041,7 +1041,7 @@ struct serviced_query* outnet_serviced_query(struct outside_network* outnet,
int ATTR_UNUSED(tcp_upstream), int ATTR_UNUSED(ssl_upstream),
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
size_t zonelen, struct module_qstate* qstate,
comm_point_callback_t* callback, void* callback_arg,
comm_point_callback_type* callback, void* callback_arg,
sldns_buffer* ATTR_UNUSED(buff), struct module_env* ATTR_UNUSED(env))
{
struct replay_runtime* runtime = (struct replay_runtime*)outnet->base;
@ -1160,7 +1160,7 @@ void listening_ports_free(struct listen_port* list)
struct comm_point* comm_point_create_local(struct comm_base* ATTR_UNUSED(base),
int ATTR_UNUSED(fd), size_t ATTR_UNUSED(bufsize),
comm_point_callback_t* ATTR_UNUSED(callback),
comm_point_callback_type* ATTR_UNUSED(callback),
void* ATTR_UNUSED(callback_arg))
{
return calloc(1, 1);
@ -1168,7 +1168,7 @@ struct comm_point* comm_point_create_local(struct comm_base* ATTR_UNUSED(base),
struct comm_point* comm_point_create_raw(struct comm_base* ATTR_UNUSED(base),
int ATTR_UNUSED(fd), int ATTR_UNUSED(writing),
comm_point_callback_t* ATTR_UNUSED(callback),
comm_point_callback_type* ATTR_UNUSED(callback),
void* ATTR_UNUSED(callback_arg))
{
/* no pipe comm possible */

View File

@ -68,7 +68,7 @@ struct order_id {
/** a lock */
struct order_lock {
/** rbnode in all tree */
rbnode_t node;
rbnode_type node;
/** lock id */
struct order_id id;
/** the creation file */
@ -76,7 +76,7 @@ struct order_lock {
/** creation line */
int create_line;
/** set of all locks that are smaller than this one (locked earlier) */
rbtree_t* smaller;
rbtree_type* smaller;
/** during depthfirstsearch, this is a linked list of the stack
* of locks. points to the next lock bigger than this one. */
struct lock_ref* dfs_next;
@ -89,7 +89,7 @@ struct order_lock {
/** reference to a lock in a rbtree set */
struct lock_ref {
/** rbnode, key is an order_id ptr */
rbnode_t node;
rbnode_type node;
/** the lock referenced */
struct order_lock* lock;
/** why is this ref */
@ -181,7 +181,7 @@ static int readup_str(char** str, FILE* in)
}
/** read creation entry */
static void read_create(rbtree_t* all, FILE* in)
static void read_create(rbtree_type* all, FILE* in)
{
struct order_lock* o = calloc(1, sizeof(struct order_lock));
if(!o) fatal_exit("malloc failure");
@ -210,7 +210,7 @@ static void read_create(rbtree_t* all, FILE* in)
/** insert lock entry (empty) into list */
static struct order_lock*
insert_lock(rbtree_t* all, struct order_id* id)
insert_lock(rbtree_type* all, struct order_id* id)
{
struct order_lock* o = calloc(1, sizeof(struct order_lock));
if(!o) fatal_exit("malloc failure");
@ -223,7 +223,7 @@ insert_lock(rbtree_t* all, struct order_id* id)
}
/** read lock entry */
static void read_lock(rbtree_t* all, FILE* in, int val)
static void read_lock(rbtree_type* all, FILE* in, int val)
{
struct order_id prev_id, now_id;
struct lock_ref* ref;
@ -256,7 +256,7 @@ static void read_lock(rbtree_t* all, FILE* in, int val)
}
/** read input file */
static void readinput(rbtree_t* all, char* file)
static void readinput(rbtree_type* all, char* file)
{
FILE *in = fopen(file, "r");
int fst;
@ -367,7 +367,7 @@ static void check_order_lock(struct order_lock* lock)
}
/** Check ordering of locks */
static void check_order(rbtree_t* all_locks)
static void check_order(rbtree_type* all_locks)
{
/* check each lock */
struct order_lock* lock;
@ -391,7 +391,7 @@ static void check_order(rbtree_t* all_locks)
int
main(int argc, char* argv[])
{
rbtree_t* all_locks;
rbtree_type* all_locks;
int i;
time_t starttime = time(NULL);
#ifdef USE_THREAD_DEBUG

View File

@ -51,7 +51,7 @@
*/
struct codeline {
/** rbtree node */
rbnode_t node;
rbnode_type node;
/** the name of the file:linenumber */
char* codeline;
/** the name of the function */
@ -99,7 +99,7 @@ match(char* line)
/** find or alloc codeline in tree */
static struct codeline*
get_codeline(rbtree_t* tree, char* key, char* func)
get_codeline(rbtree_type* tree, char* key, char* func)
{
struct codeline* cl = (struct codeline*)rbtree_search(tree, key);
if(!cl) {
@ -118,7 +118,7 @@ get_codeline(rbtree_t* tree, char* key, char* func)
/** read up the malloc stats */
static void
read_malloc_stat(char* line, rbtree_t* tree)
read_malloc_stat(char* line, rbtree_type* tree)
{
char codeline[10240];
char name[10240];
@ -143,7 +143,7 @@ read_malloc_stat(char* line, rbtree_t* tree)
/** read up the calloc stats */
static void
read_calloc_stat(char* line, rbtree_t* tree)
read_calloc_stat(char* line, rbtree_type* tree)
{
char codeline[10240];
char name[10240];
@ -180,7 +180,7 @@ get_file_size(const char* fname)
/** read the logfile */
static void
readfile(rbtree_t* tree, const char* fname)
readfile(rbtree_type* tree, const char* fname)
{
off_t total = get_file_size(fname);
off_t done = (off_t)0;
@ -216,7 +216,7 @@ readfile(rbtree_t* tree, const char* fname)
/** print memory stats */
static void
printstats(rbtree_t* tree)
printstats(rbtree_type* tree)
{
struct codeline* cl;
uint64_t total = 0, tcalls = 0;
@ -235,7 +235,7 @@ printstats(rbtree_t* tree)
/** main program */
int main(int argc, const char* argv[])
{
rbtree_t* tree = 0;
rbtree_type* tree = 0;
log_init(NULL, 0, 0);
if(argc != 2) {
usage();

View File

@ -63,7 +63,7 @@
* done (successfully).
* @return expanded text, malloced. NULL on failure.
*/
static char* macro_expand(rbtree_t* store,
static char* macro_expand(rbtree_type* store,
struct replay_runtime* runtime, char** text);
/** compare of time values */
@ -548,7 +548,7 @@ replay_var_compare(const void* a, const void* b)
return strcmp(x->name, y->name);
}
rbtree_t*
rbtree_type*
macro_store_create(void)
{
return rbtree_create(&replay_var_compare);
@ -556,7 +556,7 @@ macro_store_create(void)
/** helper function to delete macro values */
static void
del_macro(rbnode_t* x, void* ATTR_UNUSED(arg))
del_macro(rbnode_type* x, void* ATTR_UNUSED(arg))
{
struct replay_var* v = (struct replay_var*)x;
free(v->name);
@ -565,7 +565,7 @@ del_macro(rbnode_t* x, void* ATTR_UNUSED(arg))
}
void
macro_store_delete(rbtree_t* store)
macro_store_delete(rbtree_type* store)
{
if(!store)
return;
@ -615,7 +615,7 @@ do_buf_insert(char* buf, size_t remain, char* after, char* inserted)
/** do macro recursion */
static char*
do_macro_recursion(rbtree_t* store, struct replay_runtime* runtime,
do_macro_recursion(rbtree_type* store, struct replay_runtime* runtime,
char* at, size_t remain)
{
char* after = at+2;
@ -632,7 +632,7 @@ do_macro_recursion(rbtree_t* store, struct replay_runtime* runtime,
/** get var from store */
static struct replay_var*
macro_getvar(rbtree_t* store, char* name)
macro_getvar(rbtree_type* store, char* name)
{
struct replay_var k;
k.node.key = &k;
@ -642,7 +642,7 @@ macro_getvar(rbtree_t* store, char* name)
/** do macro variable */
static char*
do_macro_variable(rbtree_t* store, char* buf, size_t remain)
do_macro_variable(rbtree_type* store, char* buf, size_t remain)
{
struct replay_var* v;
char* at = buf+1;
@ -776,7 +776,7 @@ do_macro_range(char* buf)
}
static char*
macro_expand(rbtree_t* store, struct replay_runtime* runtime, char** text)
macro_expand(rbtree_type* store, struct replay_runtime* runtime, char** text)
{
char buf[10240];
char* at = *text;
@ -844,7 +844,7 @@ macro_expand(rbtree_t* store, struct replay_runtime* runtime, char** text)
}
char*
macro_process(rbtree_t* store, struct replay_runtime* runtime, char* text)
macro_process(rbtree_type* store, struct replay_runtime* runtime, char* text)
{
char buf[10240];
char* next, *expand;
@ -872,14 +872,14 @@ macro_process(rbtree_t* store, struct replay_runtime* runtime, char* text)
}
char*
macro_lookup(rbtree_t* store, char* name)
macro_lookup(rbtree_type* store, char* name)
{
struct replay_var* x = macro_getvar(store, name);
if(!x) return strdup("");
return strdup(x->value);
}
void macro_print_debug(rbtree_t* store)
void macro_print_debug(rbtree_type* store)
{
struct replay_var* x;
RBTREE_FOR(x, struct replay_var*, store) {
@ -888,7 +888,7 @@ void macro_print_debug(rbtree_t* store)
}
int
macro_assign(rbtree_t* store, char* name, char* value)
macro_assign(rbtree_type* store, char* name, char* value)
{
struct replay_var* x = macro_getvar(store, name);
if(x) {
@ -918,7 +918,7 @@ macro_assign(rbtree_t* store, char* name, char* value)
void testbound_selftest(void)
{
/* test the macro store */
rbtree_t* store = macro_store_create();
rbtree_type* store = macro_store_create();
char* v;
int r;
int num_asserts = 0;

View File

@ -280,7 +280,7 @@ struct replay_runtime {
struct fake_timer* timer_list;
/** callback to call for incoming queries */
comm_point_callback_t* callback_query;
comm_point_callback_type* callback_query;
/** user argument for incoming query callback */
void *cb_arg;
@ -305,7 +305,7 @@ struct replay_runtime {
/**
* Tree of macro values. Of type replay_var
*/
rbtree_t* vars;
rbtree_type* vars;
};
/**
@ -325,7 +325,7 @@ struct fake_pending {
/** qtype */
int qtype;
/** The callback function to call when answer arrives (or timeout) */
comm_point_callback_t* callback;
comm_point_callback_type* callback;
/** callback user argument */
void* cb_arg;
/** original timeout in seconds from 'then' */
@ -380,7 +380,7 @@ struct fake_timer {
*/
struct replay_var {
/** rbtree node. Key is this structure. Sorted by name. */
rbnode_t node;
rbnode_type node;
/** the variable name */
char* name;
/** the variable value */
@ -413,13 +413,13 @@ struct fake_timer* replay_get_oldest_timer(struct replay_runtime* runtime);
* Create variable storage
* @return new or NULL on failure.
*/
rbtree_t* macro_store_create(void);
rbtree_type* macro_store_create(void);
/**
* Delete variable storage
* @param store: the macro storage to free up.
*/
void macro_store_delete(rbtree_t* store);
void macro_store_delete(rbtree_type* store);
/**
* Apply macro substitution to string.
@ -428,7 +428,7 @@ void macro_store_delete(rbtree_t* store);
* @param text: string to work on.
* @return newly malloced string with result.
*/
char* macro_process(rbtree_t* store, struct replay_runtime* runtime,
char* macro_process(rbtree_type* store, struct replay_runtime* runtime,
char* text);
/**
@ -438,7 +438,7 @@ char* macro_process(rbtree_t* store, struct replay_runtime* runtime,
* @return newly malloced string with result or strdup("") if not found.
* or NULL on malloc failure.
*/
char* macro_lookup(rbtree_t* store, char* name);
char* macro_lookup(rbtree_type* store, char* name);
/**
* Set macro value.
@ -447,10 +447,10 @@ char* macro_lookup(rbtree_t* store, char* name);
* @param value: text to set it to. Not expanded.
* @return false on failure.
*/
int macro_assign(rbtree_t* store, char* name, char* value);
int macro_assign(rbtree_type* store, char* name, char* value);
/** Print macro variables stored as debug info */
void macro_print_debug(rbtree_t* store);
void macro_print_debug(rbtree_type* store);
/** testbounds self test */
void testbound_selftest(void);

View File

@ -45,9 +45,9 @@
#include "util/storage/slabhash.h" /* for the test structures */
/** use this type for the lruhash test key */
typedef struct slabhash_testkey testkey_t;
typedef struct slabhash_testkey testkey_type;
/** use this type for the lruhash test data */
typedef struct slabhash_testdata testdata_t;
typedef struct slabhash_testdata testdata_type;
/** delete key */
static void delkey(struct slabhash_testkey* k) {
@ -56,10 +56,10 @@ static void delkey(struct slabhash_testkey* k) {
static void deldata(struct slabhash_testdata* d) {free(d);}
/** hash func, very bad to improve collisions */
static hashvalue_t myhash(int id) {return (hashvalue_t)id & 0x0f;}
static hashvalue_type myhash(int id) {return (hashvalue_type)id & 0x0f;}
/** allocate new key, fill in hash */
static testkey_t* newkey(int id) {
testkey_t* k = (testkey_t*)calloc(1, sizeof(testkey_t));
static testkey_type* newkey(int id) {
testkey_type* k = (testkey_type*)calloc(1, sizeof(testkey_type));
if(!k) fatal_exit("out of memory");
k->id = id;
k->entry.hash = myhash(id);
@ -68,9 +68,9 @@ static testkey_t* newkey(int id) {
return k;
}
/** new data el */
static testdata_t* newdata(int val) {
testdata_t* d = (testdata_t*)calloc(1,
sizeof(testdata_t));
static testdata_type* newdata(int val) {
testdata_type* d = (testdata_type*)calloc(1,
sizeof(testdata_type));
if(!d) fatal_exit("out of memory");
d->data = val;
return d;
@ -80,12 +80,12 @@ static testdata_t* newdata(int val) {
static void
test_bin_find_entry(struct lruhash* table)
{
testkey_t* k = newkey(12);
testdata_t* d = newdata(128);
testkey_t* k2 = newkey(12 + 1024);
testkey_t* k3 = newkey(14);
testkey_t* k4 = newkey(12 + 1024*2);
hashvalue_t h = myhash(12);
testkey_type* k = newkey(12);
testdata_type* d = newdata(128);
testkey_type* k2 = newkey(12 + 1024);
testkey_type* k3 = newkey(14);
testkey_type* k4 = newkey(12 + 1024*2);
hashvalue_type h = myhash(12);
struct lruhash_bin bin;
memset(&bin, 0, sizeof(bin));
bin_init(&bin, 1);
@ -161,8 +161,8 @@ test_bin_find_entry(struct lruhash* table)
/** test lru_front lru_remove */
static void test_lru(struct lruhash* table)
{
testkey_t* k = newkey(12);
testkey_t* k2 = newkey(14);
testkey_type* k = newkey(12);
testkey_type* k2 = newkey(14);
lock_quick_lock(&table->lock);
unit_assert( table->lru_start == NULL && table->lru_end == NULL);
@ -208,10 +208,10 @@ static void test_lru(struct lruhash* table)
static void
test_short_table(struct lruhash* table)
{
testkey_t* k = newkey(12);
testkey_t* k2 = newkey(14);
testdata_t* d = newdata(128);
testdata_t* d2 = newdata(129);
testkey_type* k = newkey(12);
testkey_type* k2 = newkey(14);
testdata_type* d = newdata(128);
testdata_type* d2 = newdata(129);
k->entry.data = d;
k2->entry.data = d2;
@ -232,11 +232,11 @@ test_short_table(struct lruhash* table)
/** test adding a random element */
static void
testadd(struct lruhash* table, testdata_t* ref[])
testadd(struct lruhash* table, testdata_type* ref[])
{
int numtoadd = random() % HASHTESTMAX;
testdata_t* data = newdata(numtoadd);
testkey_t* key = newkey(numtoadd);
testdata_type* data = newdata(numtoadd);
testkey_type* key = newkey(numtoadd);
key->entry.data = data;
lruhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
ref[numtoadd] = data;
@ -244,10 +244,10 @@ testadd(struct lruhash* table, testdata_t* ref[])
/** test adding a random element */
static void
testremove(struct lruhash* table, testdata_t* ref[])
testremove(struct lruhash* table, testdata_type* ref[])
{
int num = random() % HASHTESTMAX;
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
lruhash_remove(table, myhash(num), key);
ref[num] = NULL;
delkey(key);
@ -255,12 +255,12 @@ testremove(struct lruhash* table, testdata_t* ref[])
/** test adding a random element */
static void
testlookup(struct lruhash* table, testdata_t* ref[])
testlookup(struct lruhash* table, testdata_type* ref[])
{
int num = random() % HASHTESTMAX;
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
testdata_t* data = en? (testdata_t*)en->data : NULL;
testdata_type* data = en? (testdata_type*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
@ -310,11 +310,11 @@ check_table(struct lruhash* table)
/** test adding a random element (unlimited range) */
static void
testadd_unlim(struct lruhash* table, testdata_t** ref)
testadd_unlim(struct lruhash* table, testdata_type** ref)
{
int numtoadd = random() % (HASHTESTMAX * 10);
testdata_t* data = newdata(numtoadd);
testkey_t* key = newkey(numtoadd);
testdata_type* data = newdata(numtoadd);
testkey_type* key = newkey(numtoadd);
key->entry.data = data;
lruhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
if(ref)
@ -323,10 +323,10 @@ testadd_unlim(struct lruhash* table, testdata_t** ref)
/** test adding a random element (unlimited range) */
static void
testremove_unlim(struct lruhash* table, testdata_t** ref)
testremove_unlim(struct lruhash* table, testdata_type** ref)
{
int num = random() % (HASHTESTMAX*10);
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
lruhash_remove(table, myhash(num), key);
if(ref)
ref[num] = NULL;
@ -335,12 +335,12 @@ testremove_unlim(struct lruhash* table, testdata_t** ref)
/** test adding a random element (unlimited range) */
static void
testlookup_unlim(struct lruhash* table, testdata_t** ref)
testlookup_unlim(struct lruhash* table, testdata_type** ref)
{
int num = random() % (HASHTESTMAX*10);
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
testdata_t* data = en? (testdata_t*)en->data : NULL;
testdata_type* data = en? (testdata_type*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
@ -360,7 +360,7 @@ static void
test_long_table(struct lruhash* table)
{
/* assuming it all fits in the hashtable, this check will work */
testdata_t* ref[HASHTESTMAX * 100];
testdata_type* ref[HASHTESTMAX * 100];
size_t i;
memset(ref, 0, sizeof(ref));
/* test assumption */
@ -422,7 +422,7 @@ struct test_thr {
/** thread num, first entry. */
int num;
/** id */
ub_thread_t id;
ub_thread_type id;
/** hash table */
struct lruhash* table;
};

View File

@ -73,7 +73,7 @@ int testcount = 0;
/** test alloc code */
static void
alloc_test(void) {
alloc_special_t *t1, *t2;
alloc_special_type *t1, *t2;
struct alloc_cache major, minor1, minor2;
int i;

View File

@ -242,7 +242,7 @@ static void remove_item(struct val_neg_cache* neg)
{
int n, i;
struct val_neg_data* d;
rbnode_t* walk;
rbnode_type* walk;
struct val_neg_zone* z;
lock_basic_lock(&neg->lock);
@ -324,7 +324,7 @@ static size_t sumtrees_inuse(struct val_neg_cache* neg)
RBTREE_FOR(z, struct val_neg_zone*, &neg->tree) {
/* get count of highest parent for num in use */
d = (struct val_neg_data*)rbtree_first(&z->tree);
if(d && (rbnode_t*)d!=RBTREE_NULL)
if(d && (rbnode_type*)d!=RBTREE_NULL)
res += d->count;
}
return res;

View File

@ -44,24 +44,24 @@
#include "util/storage/slabhash.h"
/** use this type for the slabhash test key */
typedef struct slabhash_testkey testkey_t;
typedef struct slabhash_testkey testkey_type;
/** use this type for the slabhash test data */
typedef struct slabhash_testdata testdata_t;
typedef struct slabhash_testdata testdata_type;
/** delete key */
static void delkey(struct slabhash_testkey* k) {
lock_rw_destroy(&k->entry.lock); free(k);}
/** hash func, very bad to improve collisions, both high and low bits */
static hashvalue_t myhash(int id) {
hashvalue_t h = (hashvalue_t)id & 0x0f;
static hashvalue_type myhash(int id) {
hashvalue_type h = (hashvalue_type)id & 0x0f;
h |= (h << 28);
return h;
}
/** allocate new key, fill in hash */
static testkey_t* newkey(int id) {
testkey_t* k = (testkey_t*)calloc(1, sizeof(testkey_t));
static testkey_type* newkey(int id) {
testkey_type* k = (testkey_type*)calloc(1, sizeof(testkey_type));
if(!k) fatal_exit("out of memory");
k->id = id;
k->entry.hash = myhash(id);
@ -70,9 +70,9 @@ static testkey_t* newkey(int id) {
return k;
}
/** new data el */
static testdata_t* newdata(int val) {
testdata_t* d = (testdata_t*)calloc(1,
sizeof(testdata_t));
static testdata_type* newdata(int val) {
testdata_type* d = (testdata_type*)calloc(1,
sizeof(testdata_type));
if(!d) fatal_exit("out of memory");
d->data = val;
return d;
@ -82,10 +82,10 @@ static testdata_t* newdata(int val) {
static void
test_short_table(struct slabhash* table)
{
testkey_t* k = newkey(12);
testkey_t* k2 = newkey(14);
testdata_t* d = newdata(128);
testdata_t* d2 = newdata(129);
testkey_type* k = newkey(12);
testkey_type* k2 = newkey(14);
testdata_type* d = newdata(128);
testdata_type* d2 = newdata(129);
k->entry.data = d;
k2->entry.data = d2;
@ -106,11 +106,11 @@ test_short_table(struct slabhash* table)
/** test adding a random element */
static void
testadd(struct slabhash* table, testdata_t* ref[])
testadd(struct slabhash* table, testdata_type* ref[])
{
int numtoadd = random() % HASHTESTMAX;
testdata_t* data = newdata(numtoadd);
testkey_t* key = newkey(numtoadd);
testdata_type* data = newdata(numtoadd);
testkey_type* key = newkey(numtoadd);
key->entry.data = data;
slabhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
ref[numtoadd] = data;
@ -118,10 +118,10 @@ testadd(struct slabhash* table, testdata_t* ref[])
/** test adding a random element */
static void
testremove(struct slabhash* table, testdata_t* ref[])
testremove(struct slabhash* table, testdata_type* ref[])
{
int num = random() % HASHTESTMAX;
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
slabhash_remove(table, myhash(num), key);
ref[num] = NULL;
delkey(key);
@ -129,12 +129,12 @@ testremove(struct slabhash* table, testdata_t* ref[])
/** test adding a random element */
static void
testlookup(struct slabhash* table, testdata_t* ref[])
testlookup(struct slabhash* table, testdata_type* ref[])
{
int num = random() % HASHTESTMAX;
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
struct lruhash_entry* en = slabhash_lookup(table, myhash(num), key, 0);
testdata_t* data = en? (testdata_t*)en->data : NULL;
testdata_type* data = en? (testdata_type*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
@ -193,11 +193,11 @@ check_table(struct slabhash* table)
/** test adding a random element (unlimited range) */
static void
testadd_unlim(struct slabhash* table, testdata_t** ref)
testadd_unlim(struct slabhash* table, testdata_type** ref)
{
int numtoadd = random() % (HASHTESTMAX * 10);
testdata_t* data = newdata(numtoadd);
testkey_t* key = newkey(numtoadd);
testdata_type* data = newdata(numtoadd);
testkey_type* key = newkey(numtoadd);
key->entry.data = data;
slabhash_insert(table, myhash(numtoadd), &key->entry, data, NULL);
if(ref)
@ -206,10 +206,10 @@ testadd_unlim(struct slabhash* table, testdata_t** ref)
/** test adding a random element (unlimited range) */
static void
testremove_unlim(struct slabhash* table, testdata_t** ref)
testremove_unlim(struct slabhash* table, testdata_type** ref)
{
int num = random() % (HASHTESTMAX*10);
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
slabhash_remove(table, myhash(num), key);
if(ref)
ref[num] = NULL;
@ -218,12 +218,12 @@ testremove_unlim(struct slabhash* table, testdata_t** ref)
/** test adding a random element (unlimited range) */
static void
testlookup_unlim(struct slabhash* table, testdata_t** ref)
testlookup_unlim(struct slabhash* table, testdata_type** ref)
{
int num = random() % (HASHTESTMAX*10);
testkey_t* key = newkey(num);
testkey_type* key = newkey(num);
struct lruhash_entry* en = slabhash_lookup(table, myhash(num), key, 0);
testdata_t* data = en? (testdata_t*)en->data : NULL;
testdata_type* data = en? (testdata_type*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
@ -243,7 +243,7 @@ static void
test_long_table(struct slabhash* table)
{
/* assuming it all fits in the hashtable, this check will work */
testdata_t* ref[HASHTESTMAX * 100];
testdata_type* ref[HASHTESTMAX * 100];
size_t i;
memset(ref, 0, sizeof(ref));
/* test assumption */
@ -301,7 +301,7 @@ struct slab_test_thr {
/** thread num, first entry. */
int num;
/** id */
ub_thread_t id;
ub_thread_type id;
/** hash table */
struct slabhash* table;
};

View File

@ -412,7 +412,7 @@ nsectest(void)
/** Test hash algo - NSEC3 hash it and compare result */
static void
nsec3_hash_test_entry(struct entry* e, rbtree_t* ct,
nsec3_hash_test_entry(struct entry* e, rbtree_type* ct,
struct alloc_cache* alloc, struct regional* region,
sldns_buffer* buf)
{
@ -468,7 +468,7 @@ nsec3_hash_test(const char* fname)
*
* The test does not perform canonicalization during the compare.
*/
rbtree_t ct;
rbtree_type ct;
struct regional* region = regional_create();
struct alloc_cache alloc;
sldns_buffer* buf = sldns_buffer_new(65535);

View File

@ -52,7 +52,7 @@
/** setup new special type */
static void
alloc_setup_special(alloc_special_t* t)
alloc_setup_special(alloc_special_type* t)
{
memset(t, 0, sizeof(*t));
lock_rw_init(&t->entry.lock);
@ -66,10 +66,11 @@ alloc_setup_special(alloc_special_t* t)
static void
prealloc_setup(struct alloc_cache* alloc)
{
alloc_special_t* p;
alloc_special_type* p;
int i;
for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) {
if(!(p = (alloc_special_type*)malloc(
sizeof(alloc_special_type)))) {
log_err("prealloc: out of memory");
return;
}
@ -128,7 +129,7 @@ alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
void
alloc_clear(struct alloc_cache* alloc)
{
alloc_special_t* p, *np;
alloc_special_type* p, *np;
struct regional* r, *nr;
if(!alloc)
return;
@ -187,10 +188,10 @@ alloc_get_id(struct alloc_cache* alloc)
return id;
}
alloc_special_t*
alloc_special_type*
alloc_special_obtain(struct alloc_cache* alloc)
{
alloc_special_t* p;
alloc_special_type* p;
log_assert(alloc);
/* see if in local cache */
if(alloc->quar) {
@ -217,7 +218,7 @@ alloc_special_obtain(struct alloc_cache* alloc)
}
/* allocate new */
prealloc_setup(alloc);
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) {
if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) {
log_err("alloc_special_obtain: out of memory");
return NULL;
}
@ -228,10 +229,10 @@ alloc_special_obtain(struct alloc_cache* alloc)
/** push mem and some more items to the super */
static void
pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem)
pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem)
{
int i;
alloc_special_t *p = alloc->quar;
alloc_special_type *p = alloc->quar;
log_assert(p);
log_assert(alloc && alloc->super &&
alloc->num_quar >= ALLOC_SPECIAL_MAX);
@ -253,7 +254,7 @@ pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem)
}
void
alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem)
alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem)
{
log_assert(alloc);
if(!mem)
@ -286,12 +287,12 @@ alloc_stats(struct alloc_cache* alloc)
size_t alloc_get_mem(struct alloc_cache* alloc)
{
alloc_special_t* p;
alloc_special_type* p;
size_t s = sizeof(*alloc);
if(!alloc->super) {
lock_quick_lock(&alloc->lock); /* superalloc needs locking */
}
s += sizeof(alloc_special_t) * alloc->num_quar;
s += sizeof(alloc_special_type) * alloc->num_quar;
for(p = alloc->quar; p; p = alloc_special_next(p)) {
s += lock_get_mem(&p->entry.lock);
}

View File

@ -53,11 +53,11 @@ struct ub_packed_rrset_key;
struct regional;
/** The special type, packed rrset. Not allowed to be used for other memory */
typedef struct ub_packed_rrset_key alloc_special_t;
typedef struct ub_packed_rrset_key alloc_special_type;
/** clean the special type. Pass pointer. */
#define alloc_special_clean(x) (x)->id = 0;
/** access next pointer. (in available spot). Pass pointer. */
#define alloc_special_next(x) ((alloc_special_t*)((x)->entry.overflow_next))
#define alloc_special_next(x) ((alloc_special_type*)((x)->entry.overflow_next))
/** set next pointer. (in available spot). Pass pointers. */
#define alloc_set_special_next(x, y) \
((x)->entry.overflow_next) = (struct lruhash_entry*)(y);
@ -71,11 +71,11 @@ typedef struct ub_packed_rrset_key alloc_special_t;
*/
struct alloc_cache {
/** lock, only used for the super. */
lock_quick_t lock;
lock_quick_type lock;
/** global allocator above this one. NULL for none (malloc/free) */
struct alloc_cache* super;
/** singly linked lists of special type. These are free for use. */
alloc_special_t* quar;
alloc_special_type* quar;
/** number of items in quarantine. */
size_t num_quar;
/** thread number for id creation */
@ -116,20 +116,20 @@ void alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
void alloc_clear(struct alloc_cache* alloc);
/**
* Get a new special_t element.
* Get a new special_type element.
* @param alloc: where to alloc it.
* @return: memory block. Will not return NULL (instead fatal_exit).
* The block is zeroed.
*/
alloc_special_t* alloc_special_obtain(struct alloc_cache* alloc);
alloc_special_type* alloc_special_obtain(struct alloc_cache* alloc);
/**
* Return special_t back to pool.
* Return special_type back to pool.
* The block is cleaned up (zeroed) which also invalidates the ID inside.
* @param alloc: where to alloc it.
* @param mem: block to free.
*/
void alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem);
void alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem);
/**
* Set ID number of special type to a fresh new ID number.

View File

@ -270,8 +270,8 @@ dname_pkt_compare(sldns_buffer* pkt, uint8_t* d1, uint8_t* d2)
return 0;
}
hashvalue_t
dname_query_hash(uint8_t* dname, hashvalue_t h)
hashvalue_type
dname_query_hash(uint8_t* dname, hashvalue_type h)
{
uint8_t labuf[LDNS_MAX_LABELLEN+1];
uint8_t lablen;
@ -294,8 +294,8 @@ dname_query_hash(uint8_t* dname, hashvalue_t h)
return h;
}
hashvalue_t
dname_pkt_hash(sldns_buffer* pkt, uint8_t* dname, hashvalue_t h)
hashvalue_type
dname_pkt_hash(sldns_buffer* pkt, uint8_t* dname, hashvalue_type h)
{
uint8_t labuf[LDNS_MAX_LABELLEN+1];
uint8_t lablen;

View File

@ -127,7 +127,7 @@ int dname_pkt_compare(struct sldns_buffer* pkt, uint8_t* d1, uint8_t* d2);
* @param h: initial hash value.
* @return: result hash value.
*/
hashvalue_t dname_query_hash(uint8_t* dname, hashvalue_t h);
hashvalue_type dname_query_hash(uint8_t* dname, hashvalue_type h);
/**
* Hash dname, label by label, lowercasing, into hashvalue.
@ -139,7 +139,8 @@ hashvalue_t dname_query_hash(uint8_t* dname, hashvalue_t h);
* @return: result hash value.
* Result is the same as dname_query_hash, even if compression is used.
*/
hashvalue_t dname_pkt_hash(struct sldns_buffer* pkt, uint8_t* dname, hashvalue_t h);
hashvalue_type dname_pkt_hash(struct sldns_buffer* pkt, uint8_t* dname,
hashvalue_type h);
/**
* Copy over a valid dname and decompress it.

View File

@ -71,7 +71,7 @@ smart_compare(sldns_buffer* pkt, uint8_t* dnow,
*/
static struct rrset_parse*
new_rrset(struct msg_parse* msg, uint8_t* dname, size_t dnamelen,
uint16_t type, uint16_t dclass, hashvalue_t hash,
uint16_t type, uint16_t dclass, hashvalue_type hash,
uint32_t rrset_flags, sldns_pkt_section section,
struct regional* region)
{
@ -159,13 +159,13 @@ pkt_rrset_flags(sldns_buffer* pkt, uint16_t type, sldns_pkt_section sec)
return f;
}
hashvalue_t
hashvalue_type
pkt_hash_rrset(sldns_buffer* pkt, uint8_t* dname, uint16_t type,
uint16_t dclass, uint32_t rrset_flags)
{
/* note this MUST be identical to rrset_key_hash in packed_rrset.c */
/* this routine handles compressed names */
hashvalue_t h = 0xab;
hashvalue_type h = 0xab;
h = dname_pkt_hash(pkt, dname, h);
h = hashlittle(&type, sizeof(type), h); /* host order */
h = hashlittle(&dclass, sizeof(dclass), h); /* netw order */
@ -174,25 +174,25 @@ pkt_hash_rrset(sldns_buffer* pkt, uint8_t* dname, uint16_t type,
}
/** create partial dname hash for rrset hash */
static hashvalue_t
static hashvalue_type
pkt_hash_rrset_first(sldns_buffer* pkt, uint8_t* dname)
{
/* works together with pkt_hash_rrset_rest */
/* note this MUST be identical to rrset_key_hash in packed_rrset.c */
/* this routine handles compressed names */
hashvalue_t h = 0xab;
hashvalue_type h = 0xab;
h = dname_pkt_hash(pkt, dname, h);
return h;
}
/** create a rrset hash from a partial dname hash */
static hashvalue_t
pkt_hash_rrset_rest(hashvalue_t dname_h, uint16_t type, uint16_t dclass,
static hashvalue_type
pkt_hash_rrset_rest(hashvalue_type dname_h, uint16_t type, uint16_t dclass,
uint32_t rrset_flags)
{
/* works together with pkt_hash_rrset_first */
/* note this MUST be identical to rrset_key_hash in packed_rrset.c */
hashvalue_t h;
hashvalue_type h;
h = hashlittle(&type, sizeof(type), dname_h); /* host order */
h = hashlittle(&dclass, sizeof(dclass), h); /* netw order */
h = hashlittle(&rrset_flags, sizeof(uint32_t), h);
@ -201,7 +201,7 @@ pkt_hash_rrset_rest(hashvalue_t dname_h, uint16_t type, uint16_t dclass,
/** compare rrset_parse with data */
static int
rrset_parse_equals(struct rrset_parse* p, sldns_buffer* pkt, hashvalue_t h,
rrset_parse_equals(struct rrset_parse* p, sldns_buffer* pkt, hashvalue_type h,
uint32_t rrset_flags, uint8_t* dname, size_t dnamelen,
uint16_t type, uint16_t dclass)
{
@ -215,8 +215,8 @@ rrset_parse_equals(struct rrset_parse* p, sldns_buffer* pkt, hashvalue_t h,
struct rrset_parse*
msgparse_hashtable_lookup(struct msg_parse* msg, sldns_buffer* pkt,
hashvalue_t h, uint32_t rrset_flags, uint8_t* dname, size_t dnamelen,
uint16_t type, uint16_t dclass)
hashvalue_type h, uint32_t rrset_flags, uint8_t* dname,
size_t dnamelen, uint16_t type, uint16_t dclass)
{
struct rrset_parse* p = msg->hashtable[h & (PARSE_TABLE_SIZE-1)];
while(p) {
@ -388,7 +388,7 @@ change_rrsig_rrset(struct rrset_parse* sigset, struct msg_parse* msg,
int hasother, sldns_pkt_section section, struct regional* region)
{
struct rrset_parse* dataset = sigset;
hashvalue_t hash = pkt_hash_rrset(pkt, sigset->dname, datatype,
hashvalue_type hash = pkt_hash_rrset(pkt, sigset->dname, datatype,
sigset->rrset_class, rrset_flags);
log_assert( sigset->type == LDNS_RR_TYPE_RRSIG );
log_assert( datatype != LDNS_RR_TYPE_RRSIG );
@ -455,14 +455,14 @@ change_rrsig_rrset(struct rrset_parse* sigset, struct msg_parse* msg,
*/
static int
find_rrset(struct msg_parse* msg, sldns_buffer* pkt, uint8_t* dname,
size_t dnamelen, uint16_t type, uint16_t dclass, hashvalue_t* hash,
size_t dnamelen, uint16_t type, uint16_t dclass, hashvalue_type* hash,
uint32_t* rrset_flags,
uint8_t** prev_dname_first, uint8_t** prev_dname_last,
size_t* prev_dnamelen, uint16_t* prev_type,
uint16_t* prev_dclass, struct rrset_parse** rrset_prev,
sldns_pkt_section section, struct regional* region)
{
hashvalue_t dname_h = pkt_hash_rrset_first(pkt, dname);
hashvalue_type dname_h = pkt_hash_rrset_first(pkt, dname);
uint16_t covtype;
if(*rrset_prev) {
/* check if equal to previous item */
@ -824,7 +824,7 @@ parse_section(sldns_buffer* pkt, struct msg_parse* msg,
uint16_t type, prev_type = 0;
uint16_t dclass, prev_dclass = 0;
uint32_t rrset_flags = 0;
hashvalue_t hash = 0;
hashvalue_type hash = 0;
struct rrset_parse* rrset = NULL;
int r;

View File

@ -138,7 +138,7 @@ struct rrset_parse {
/** next in list of all rrsets */
struct rrset_parse* rrset_all_next;
/** hash value of rrset */
hashvalue_t hash;
hashvalue_type hash;
/** which section was it found in: one of
* LDNS_SECTION_ANSWER, LDNS_SECTION_AUTHORITY, LDNS_SECTION_ADDITIONAL
*/
@ -296,7 +296,7 @@ int parse_edns_from_pkt(struct sldns_buffer* pkt, struct edns_data* edns,
* @param rrset_flags: rrset flags (same as packed_rrset flags).
* @return hash value
*/
hashvalue_t pkt_hash_rrset(struct sldns_buffer* pkt, uint8_t* dname, uint16_t type,
hashvalue_type pkt_hash_rrset(struct sldns_buffer* pkt, uint8_t* dname, uint16_t type,
uint16_t dclass, uint32_t rrset_flags);
/**
@ -312,7 +312,7 @@ hashvalue_t pkt_hash_rrset(struct sldns_buffer* pkt, uint8_t* dname, uint16_t ty
* @return NULL or the rrset_parse if found.
*/
struct rrset_parse* msgparse_hashtable_lookup(struct msg_parse* msg,
struct sldns_buffer* pkt, hashvalue_t h, uint32_t rrset_flags,
struct sldns_buffer* pkt, hashvalue_type h, uint32_t rrset_flags,
uint8_t* dname, size_t dnamelen, uint16_t type, uint16_t dclass);
/**

View File

@ -608,10 +608,10 @@ reply_info_delete(void* d, void* ATTR_UNUSED(arg))
free(r);
}
hashvalue_t
hashvalue_type
query_info_hash(struct query_info *q, uint16_t flags)
{
hashvalue_t h = 0xab;
hashvalue_type h = 0xab;
h = hashlittle(&q->qtype, sizeof(q->qtype), h);
if(q->qtype == LDNS_RR_TYPE_AAAA && (flags&BIT_CD))
h++;
@ -622,7 +622,7 @@ query_info_hash(struct query_info *q, uint16_t flags)
struct msgreply_entry*
query_info_entrysetup(struct query_info* q, struct reply_info* r,
hashvalue_t h)
hashvalue_type h)
{
struct msgreply_entry* e = (struct msgreply_entry*)malloc(
sizeof(struct msgreply_entry));

View File

@ -105,7 +105,7 @@ struct rrset_ref {
/** the key with lock, and ptr to packed data. */
struct ub_packed_rrset_key* key;
/** id needed */
rrset_id_t id;
rrset_id_type id;
};
/**
@ -330,7 +330,7 @@ void reply_info_delete(void* d, void* arg);
/** calculate hash value of query_info, lowercases the qname,
* uses CD flag for AAAA qtype */
hashvalue_t query_info_hash(struct query_info *q, uint16_t flags);
hashvalue_type query_info_hash(struct query_info *q, uint16_t flags);
/**
* Setup query info entry
@ -340,7 +340,7 @@ hashvalue_t query_info_hash(struct query_info *q, uint16_t flags);
* @return: newly allocated message reply cache item.
*/
struct msgreply_entry* query_info_entrysetup(struct query_info* q,
struct reply_info* r, hashvalue_t h);
struct reply_info* r, hashvalue_type h);
/**
* Copy reply_info and all rrsets in it and allocate.

View File

@ -158,14 +158,14 @@ rrsetdata_equal(struct packed_rrset_data* d1, struct packed_rrset_data* d2)
return 1;
}
hashvalue_t
hashvalue_type
rrset_key_hash(struct packed_rrset_key* key)
{
/* type is hashed in host order */
uint16_t t = ntohs(key->type);
/* Note this MUST be identical to pkt_hash_rrset in msgparse.c */
/* this routine does not have a compressed name */
hashvalue_t h = 0xab;
hashvalue_type h = 0xab;
h = dname_query_hash(key->dname, h);
h = hashlittle(&t, sizeof(t), h);
h = hashlittle(&key->rrset_class, sizeof(uint16_t), h);

View File

@ -47,7 +47,7 @@ struct regional;
/** type used to uniquely identify rrsets. Cannot be reused without
* clearing the cache. */
typedef uint64_t rrset_id_t;
typedef uint64_t rrset_id_type;
/** this rrset is NSEC and is at zone apex (at child side of zonecut) */
#define PACKED_RRSET_NSEC_AT_APEX 0x1
@ -114,7 +114,7 @@ struct ub_packed_rrset_key {
* The other values in this struct may only be altered after changing
* the id (which needs a writelock on entry.lock).
*/
rrset_id_t id;
rrset_id_type id;
/** key data: dname, type and class */
struct packed_rrset_key rk;
};
@ -340,7 +340,7 @@ void rrset_data_delete(void* data, void* userdata);
* @param key: the rrset key with name, type, class, flags.
* @return hash value.
*/
hashvalue_t rrset_key_hash(struct packed_rrset_key* key);
hashvalue_type rrset_key_hash(struct packed_rrset_key* key);
/**
* Fixup pointers in fixed data packed_rrset_data blob.

View File

@ -84,7 +84,7 @@
#endif
int
fptr_whitelist_comm_point(comm_point_callback_t *fptr)
fptr_whitelist_comm_point(comm_point_callback_type *fptr)
{
if(fptr == &worker_handle_request) return 1;
else if(fptr == &outnet_udp_cb) return 1;
@ -94,7 +94,7 @@ fptr_whitelist_comm_point(comm_point_callback_t *fptr)
}
int
fptr_whitelist_comm_point_raw(comm_point_callback_t *fptr)
fptr_whitelist_comm_point_raw(comm_point_callback_type *fptr)
{
if(fptr == &tube_handle_listen) return 1;
else if(fptr == &tube_handle_write) return 1;
@ -156,7 +156,7 @@ fptr_whitelist_event(void (*fptr)(int, short, void *))
}
int
fptr_whitelist_pending_udp(comm_point_callback_t *fptr)
fptr_whitelist_pending_udp(comm_point_callback_type *fptr)
{
if(fptr == &serviced_udp_callback) return 1;
else if(fptr == &worker_handle_reply) return 1;
@ -165,7 +165,7 @@ fptr_whitelist_pending_udp(comm_point_callback_t *fptr)
}
int
fptr_whitelist_pending_tcp(comm_point_callback_t *fptr)
fptr_whitelist_pending_tcp(comm_point_callback_type *fptr)
{
if(fptr == &serviced_tcp_callback) return 1;
else if(fptr == &worker_handle_reply) return 1;
@ -174,7 +174,7 @@ fptr_whitelist_pending_tcp(comm_point_callback_t *fptr)
}
int
fptr_whitelist_serviced_query(comm_point_callback_t *fptr)
fptr_whitelist_serviced_query(comm_point_callback_type *fptr)
{
if(fptr == &worker_handle_service_reply) return 1;
else if(fptr == &libworker_handle_service_reply) return 1;
@ -209,7 +209,7 @@ fptr_whitelist_rbtree_cmp(int (*fptr) (const void *, const void *))
}
int
fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr)
fptr_whitelist_hash_sizefunc(lruhash_sizefunc_type fptr)
{
if(fptr == &msgreply_sizefunc) return 1;
else if(fptr == &ub_rrset_sizefunc) return 1;
@ -222,7 +222,7 @@ fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr)
}
int
fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr)
fptr_whitelist_hash_compfunc(lruhash_compfunc_type fptr)
{
if(fptr == &query_info_compare) return 1;
else if(fptr == &ub_rrset_compare) return 1;
@ -235,7 +235,7 @@ fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr)
}
int
fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr)
fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_type fptr)
{
if(fptr == &query_entry_delete) return 1;
else if(fptr == &ub_rrset_key_delete) return 1;
@ -248,7 +248,7 @@ fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr)
}
int
fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr)
fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_type fptr)
{
if(fptr == &reply_info_delete) return 1;
else if(fptr == &rrset_data_delete) return 1;
@ -260,7 +260,7 @@ fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr)
}
int
fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_t fptr)
fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_type fptr)
{
if(fptr == NULL) return 1;
else if(fptr == &rrset_markdel) return 1;
@ -412,14 +412,14 @@ fptr_whitelist_alloc_cleanup(void (*fptr)(void*))
return 0;
}
int fptr_whitelist_tube_listen(tube_callback_t* fptr)
int fptr_whitelist_tube_listen(tube_callback_type* fptr)
{
if(fptr == &worker_handle_control_cmd) return 1;
else if(fptr == &libworker_handle_control_cmd) return 1;
return 0;
}
int fptr_whitelist_mesh_cb(mesh_cb_func_t fptr)
int fptr_whitelist_mesh_cb(mesh_cb_func_type fptr)
{
if(fptr == &libworker_fg_done_cb) return 1;
else if(fptr == &libworker_bg_done_cb) return 1;
@ -436,7 +436,7 @@ int fptr_whitelist_print_func(void (*fptr)(char*,void*))
return 0;
}
int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_t* fptr,
int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_type* fptr,
enum inplace_cb_list_type type)
{
#ifndef WITH_PYTHONMODULE
@ -462,7 +462,7 @@ int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_t* fptr,
return 0;
}
int fptr_whitelist_inplace_cb_query(inplace_cb_query_func_t* ATTR_UNUSED(fptr))
int fptr_whitelist_inplace_cb_query(inplace_cb_query_func_type* ATTR_UNUSED(fptr))
{
return 0;
}

View File

@ -80,7 +80,7 @@
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_point(comm_point_callback_t *fptr);
int fptr_whitelist_comm_point(comm_point_callback_type *fptr);
/**
* Check function pointer whitelist for raw comm_point callback values.
@ -88,7 +88,7 @@ int fptr_whitelist_comm_point(comm_point_callback_t *fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_comm_point_raw(comm_point_callback_t *fptr);
int fptr_whitelist_comm_point_raw(comm_point_callback_type *fptr);
/**
* Check function pointer whitelist for comm_timer callback values.
@ -137,7 +137,7 @@ int fptr_whitelist_event(void (*fptr)(int, short, void *));
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_pending_udp(comm_point_callback_t *fptr);
int fptr_whitelist_pending_udp(comm_point_callback_type *fptr);
/**
* Check function pointer whitelist for pending tcp callback values.
@ -145,7 +145,7 @@ int fptr_whitelist_pending_udp(comm_point_callback_t *fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_pending_tcp(comm_point_callback_t *fptr);
int fptr_whitelist_pending_tcp(comm_point_callback_type *fptr);
/**
* Check function pointer whitelist for serviced query callback values.
@ -153,7 +153,7 @@ int fptr_whitelist_pending_tcp(comm_point_callback_t *fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_serviced_query(comm_point_callback_t *fptr);
int fptr_whitelist_serviced_query(comm_point_callback_type *fptr);
/**
* Check function pointer whitelist for rbtree cmp callback values.
@ -169,7 +169,7 @@ int fptr_whitelist_rbtree_cmp(int (*fptr) (const void *, const void *));
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr);
int fptr_whitelist_hash_sizefunc(lruhash_sizefunc_type fptr);
/**
* Check function pointer whitelist for lruhash compfunc callback values.
@ -177,7 +177,7 @@ int fptr_whitelist_hash_sizefunc(lruhash_sizefunc_t fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr);
int fptr_whitelist_hash_compfunc(lruhash_compfunc_type fptr);
/**
* Check function pointer whitelist for lruhash delkeyfunc callback values.
@ -185,7 +185,7 @@ int fptr_whitelist_hash_compfunc(lruhash_compfunc_t fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr);
int fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_type fptr);
/**
* Check function pointer whitelist for lruhash deldata callback values.
@ -193,7 +193,7 @@ int fptr_whitelist_hash_delkeyfunc(lruhash_delkeyfunc_t fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr);
int fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_type fptr);
/**
* Check function pointer whitelist for lruhash markdel callback values.
@ -201,7 +201,7 @@ int fptr_whitelist_hash_deldatafunc(lruhash_deldatafunc_t fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_t fptr);
int fptr_whitelist_hash_markdelfunc(lruhash_markdelfunc_type fptr);
/**
* Check function pointer whitelist for module_env send_query callback values.
@ -316,7 +316,7 @@ int fptr_whitelist_alloc_cleanup(void (*fptr)(void*));
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_tube_listen(tube_callback_t* fptr);
int fptr_whitelist_tube_listen(tube_callback_type* fptr);
/**
* Check function pointer whitelist for mesh state callback values.
@ -324,7 +324,7 @@ int fptr_whitelist_tube_listen(tube_callback_t* fptr);
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_mesh_cb(mesh_cb_func_t fptr);
int fptr_whitelist_mesh_cb(mesh_cb_func_type fptr);
/**
* Check function pointer whitelist for config_get_option func values.
@ -341,7 +341,7 @@ int fptr_whitelist_print_func(void (*fptr)(char*,void*));
* @param type: the type of the callback function.
* @return false if not in whitelist.
*/
int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_t* fptr,
int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_type* fptr,
enum inplace_cb_list_type type);
/**
@ -349,7 +349,7 @@ int fptr_whitelist_inplace_cb_reply_generic(inplace_cb_reply_func_t* fptr,
* @param fptr: function pointer to check.
* @return false if not in whitelist.
*/
int fptr_whitelist_inplace_cb_query(inplace_cb_query_func_t* fptr);
int fptr_whitelist_inplace_cb_query(inplace_cb_query_func_type* fptr);
/** Due to module breakage by fptr wlist, these test app declarations
* are presented here */

View File

@ -3946,6 +3946,7 @@
4700,
4701,
4702,
4711,
4725,
4726,
4727,

View File

@ -110,15 +110,15 @@ void ub_thread_sig_unblock(int sig)
* @param arg: user argument to func.
*/
void
ub_thr_fork_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
ub_thr_fork_create(ub_thread_type* thr, void* (*func)(void*), void* arg)
{
pid_t pid = fork();
switch(pid) {
default: /* main */
*thr = (ub_thread_t)pid;
*thr = (ub_thread_type)pid;
return;
case 0: /* child */
*thr = (ub_thread_t)getpid();
*thr = (ub_thread_type)getpid();
(void)(*func)(arg);
exit(0);
case -1: /* error */
@ -128,10 +128,10 @@ ub_thr_fork_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
/**
* There is no threading. Wait for a process to terminate.
* Note that ub_thread_t is defined as pid_t.
* Note that ub_thread_type is defined as pid_t.
* @param thread: the process id to wait for.
*/
void ub_thr_fork_wait(ub_thread_t thread)
void ub_thr_fork_wait(ub_thread_type thread)
{
int status = 0;
if(waitpid((pid_t)thread, &status, 0) == -1)
@ -143,7 +143,7 @@ void ub_thr_fork_wait(ub_thread_t thread)
#endif /* !defined(HAVE_PTHREAD) && !defined(HAVE_SOLARIS_THREADS) && !defined(HAVE_WINDOWS_THREADS) */
#ifdef HAVE_SOLARIS_THREADS
void* ub_thread_key_get(ub_thread_key_t key)
void* ub_thread_key_get(ub_thread_key_type key)
{
void* ret=NULL;
LOCKRET(thr_getspecific(key, &ret));
@ -167,19 +167,19 @@ static void log_win_err(const char* str, DWORD err)
LocalFree(buf);
}
void lock_basic_init(lock_basic_t* lock)
void lock_basic_init(lock_basic_type* lock)
{
/* implement own lock, because windows HANDLE as Mutex usage
* uses too many handles and would bog down the whole system. */
(void)InterlockedExchange(lock, 0);
}
void lock_basic_destroy(lock_basic_t* lock)
void lock_basic_destroy(lock_basic_type* lock)
{
(void)InterlockedExchange(lock, 0);
}
void lock_basic_lock(lock_basic_t* lock)
void lock_basic_lock(lock_basic_type* lock)
{
LONG wait = 1; /* wait 1 msec at first */
@ -191,13 +191,13 @@ void lock_basic_lock(lock_basic_t* lock)
/* the old value was 0, but we inserted 1, we locked it! */
}
void lock_basic_unlock(lock_basic_t* lock)
void lock_basic_unlock(lock_basic_type* lock)
{
/* unlock it by inserting the value of 0. xchg for cache coherency. */
(void)InterlockedExchange(lock, 0);
}
void ub_thread_key_create(ub_thread_key_t* key, void* f)
void ub_thread_key_create(ub_thread_key_type* key, void* f)
{
*key = TlsAlloc();
if(*key == TLS_OUT_OF_INDEXES) {
@ -207,14 +207,14 @@ void ub_thread_key_create(ub_thread_key_t* key, void* f)
else ub_thread_key_set(*key, f);
}
void ub_thread_key_set(ub_thread_key_t key, void* v)
void ub_thread_key_set(ub_thread_key_type key, void* v)
{
if(!TlsSetValue(key, v)) {
log_win_err("TlsSetValue failed", GetLastError());
}
}
void* ub_thread_key_get(ub_thread_key_t key)
void* ub_thread_key_get(ub_thread_key_type key)
{
void* ret = (void*)TlsGetValue(key);
if(ret == NULL && GetLastError() != ERROR_SUCCESS) {
@ -223,7 +223,7 @@ void* ub_thread_key_get(ub_thread_key_t key)
return ret;
}
void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
void ub_thread_create(ub_thread_type* thr, void* (*func)(void*), void* arg)
{
#ifndef HAVE__BEGINTHREADEX
*thr = CreateThread(NULL, /* default security (no inherit handle) */
@ -233,7 +233,7 @@ void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
NULL); /* do not store thread identifier anywhere */
#else
/* the beginthreadex routine setups for the C lib; aligns stack */
*thr=(ub_thread_t)_beginthreadex(NULL, 0, (void*)func, arg, 0, NULL);
*thr=(ub_thread_type)_beginthreadex(NULL, 0, (void*)func, arg, 0, NULL);
#endif
if(*thr == NULL) {
log_win_err("CreateThread failed", GetLastError());
@ -241,12 +241,12 @@ void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg)
}
}
ub_thread_t ub_thread_self(void)
ub_thread_type ub_thread_self(void)
{
return GetCurrentThread();
}
void ub_thread_join(ub_thread_t thr)
void ub_thread_join(ub_thread_type thr)
{
DWORD ret = WaitForSingleObject(thr, INFINITE);
if(ret == WAIT_FAILED) {

View File

@ -95,7 +95,7 @@
/******************* PTHREAD ************************/
/** use pthread mutex for basic lock */
typedef pthread_mutex_t lock_basic_t;
typedef pthread_mutex_t lock_basic_type;
/** small front for pthread init func, NULL is default attrs. */
#define lock_basic_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_basic_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
@ -104,7 +104,7 @@ typedef pthread_mutex_t lock_basic_t;
#ifndef HAVE_PTHREAD_RWLOCK_T
/** in case rwlocks are not supported, use a mutex. */
typedef pthread_mutex_t lock_rw_t;
typedef pthread_mutex_t lock_rw_type;
#define lock_rw_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_rw_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
#define lock_rw_rdlock(lock) LOCKRET(pthread_mutex_lock(lock))
@ -112,7 +112,7 @@ typedef pthread_mutex_t lock_rw_t;
#define lock_rw_unlock(lock) LOCKRET(pthread_mutex_unlock(lock))
#else /* HAVE_PTHREAD_RWLOCK_T */
/** we use the pthread rwlock */
typedef pthread_rwlock_t lock_rw_t;
typedef pthread_rwlock_t lock_rw_type;
/** small front for pthread init func, NULL is default attrs. */
#define lock_rw_init(lock) LOCKRET(pthread_rwlock_init(lock, NULL))
#define lock_rw_destroy(lock) LOCKRET(pthread_rwlock_destroy(lock))
@ -123,7 +123,7 @@ typedef pthread_rwlock_t lock_rw_t;
#ifndef HAVE_PTHREAD_SPINLOCK_T
/** in case spinlocks are not supported, use a mutex. */
typedef pthread_mutex_t lock_quick_t;
typedef pthread_mutex_t lock_quick_type;
/** small front for pthread init func, NULL is default attrs. */
#define lock_quick_init(lock) LOCKRET(pthread_mutex_init(lock, NULL))
#define lock_quick_destroy(lock) LOCKRET(pthread_mutex_destroy(lock))
@ -132,7 +132,7 @@ typedef pthread_mutex_t lock_quick_t;
#else /* HAVE_PTHREAD_SPINLOCK_T */
/** use pthread spinlock for the quick lock */
typedef pthread_spinlock_t lock_quick_t;
typedef pthread_spinlock_t lock_quick_type;
/**
* allocate process private since this is available whether
* Thread Process-Shared Synchronization is supported or not.
@ -148,7 +148,7 @@ typedef pthread_spinlock_t lock_quick_t;
#endif /* HAVE SPINLOCK */
/** Thread creation */
typedef pthread_t ub_thread_t;
typedef pthread_t ub_thread_type;
/** On alpine linux default thread stack size is 80 Kb. See
http://wiki.musl-libc.org/wiki/Functional_differences_from_glibc#Thread_stack_size
This is not enough and cause segfault. Other linux distros have 2 Mb at least.
@ -172,7 +172,7 @@ Wrapper for set up thread stack size */
#define ub_thread_self() pthread_self()
/** wait for another thread to terminate */
#define ub_thread_join(thread) LOCKRET(pthread_join(thread, NULL))
typedef pthread_key_t ub_thread_key_t;
typedef pthread_key_t ub_thread_key_type;
#define ub_thread_key_create(key, f) LOCKRET(pthread_key_create(key, f))
#define ub_thread_key_set(key, v) LOCKRET(pthread_setspecific(key, v))
#define ub_thread_key_get(key) pthread_getspecific(key)
@ -184,7 +184,7 @@ typedef pthread_key_t ub_thread_key_t;
#include <synch.h>
#include <thread.h>
typedef rwlock_t lock_rw_t;
typedef rwlock_t lock_rw_type;
#define lock_rw_init(lock) LOCKRET(rwlock_init(lock, USYNC_THREAD, NULL))
#define lock_rw_destroy(lock) LOCKRET(rwlock_destroy(lock))
#define lock_rw_rdlock(lock) LOCKRET(rw_rdlock(lock))
@ -192,28 +192,28 @@ typedef rwlock_t lock_rw_t;
#define lock_rw_unlock(lock) LOCKRET(rw_unlock(lock))
/** use basic mutex */
typedef mutex_t lock_basic_t;
typedef mutex_t lock_basic_type;
#define lock_basic_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL))
#define lock_basic_destroy(lock) LOCKRET(mutex_destroy(lock))
#define lock_basic_lock(lock) LOCKRET(mutex_lock(lock))
#define lock_basic_unlock(lock) LOCKRET(mutex_unlock(lock))
/** No spinlocks in solaris threads API. Use a mutex. */
typedef mutex_t lock_quick_t;
typedef mutex_t lock_quick_type;
#define lock_quick_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL))
#define lock_quick_destroy(lock) LOCKRET(mutex_destroy(lock))
#define lock_quick_lock(lock) LOCKRET(mutex_lock(lock))
#define lock_quick_unlock(lock) LOCKRET(mutex_unlock(lock))
/** Thread creation, create a default thread. */
typedef thread_t ub_thread_t;
typedef thread_t ub_thread_type;
#define ub_thread_create(thr, func, arg) LOCKRET(thr_create(NULL, NULL, func, arg, NULL, thr))
#define ub_thread_self() thr_self()
#define ub_thread_join(thread) LOCKRET(thr_join(thread, NULL, NULL))
typedef thread_key_t ub_thread_key_t;
typedef thread_key_t ub_thread_key_type;
#define ub_thread_key_create(key, f) LOCKRET(thr_keycreate(key, f))
#define ub_thread_key_set(key, v) LOCKRET(thr_setspecific(key, v))
void* ub_thread_key_get(ub_thread_key_t key);
void* ub_thread_key_get(ub_thread_key_type key);
#else /* we do not HAVE_SOLARIS_THREADS and no PTHREADS */
@ -222,7 +222,7 @@ void* ub_thread_key_get(ub_thread_key_t key);
#include <windows.h>
/* Use a mutex */
typedef LONG lock_rw_t;
typedef LONG lock_rw_type;
#define lock_rw_init(lock) lock_basic_init(lock)
#define lock_rw_destroy(lock) lock_basic_destroy(lock)
#define lock_rw_rdlock(lock) lock_basic_lock(lock)
@ -230,35 +230,35 @@ typedef LONG lock_rw_t;
#define lock_rw_unlock(lock) lock_basic_unlock(lock)
/** the basic lock is a mutex, implemented opaquely, for error handling. */
typedef LONG lock_basic_t;
void lock_basic_init(lock_basic_t* lock);
void lock_basic_destroy(lock_basic_t* lock);
void lock_basic_lock(lock_basic_t* lock);
void lock_basic_unlock(lock_basic_t* lock);
typedef LONG lock_basic_type;
void lock_basic_init(lock_basic_type* lock);
void lock_basic_destroy(lock_basic_type* lock);
void lock_basic_lock(lock_basic_type* lock);
void lock_basic_unlock(lock_basic_type* lock);
/** on windows no spinlock, use mutex too. */
typedef LONG lock_quick_t;
typedef LONG lock_quick_type;
#define lock_quick_init(lock) lock_basic_init(lock)
#define lock_quick_destroy(lock) lock_basic_destroy(lock)
#define lock_quick_lock(lock) lock_basic_lock(lock)
#define lock_quick_unlock(lock) lock_basic_unlock(lock)
/** Thread creation, create a default thread. */
typedef HANDLE ub_thread_t;
void ub_thread_create(ub_thread_t* thr, void* (*func)(void*), void* arg);
ub_thread_t ub_thread_self(void);
void ub_thread_join(ub_thread_t thr);
typedef DWORD ub_thread_key_t;
void ub_thread_key_create(ub_thread_key_t* key, void* f);
void ub_thread_key_set(ub_thread_key_t key, void* v);
void* ub_thread_key_get(ub_thread_key_t key);
typedef HANDLE ub_thread_type;
void ub_thread_create(ub_thread_type* thr, void* (*func)(void*), void* arg);
ub_thread_type ub_thread_self(void);
void ub_thread_join(ub_thread_type thr);
typedef DWORD ub_thread_key_type;
void ub_thread_key_create(ub_thread_key_type* key, void* f);
void ub_thread_key_set(ub_thread_key_type key, void* v);
void* ub_thread_key_get(ub_thread_key_type key);
#else /* we do not HAVE_SOLARIS_THREADS, PTHREADS or WINDOWS_THREADS */
/******************* NO THREADS ************************/
#define THREADS_DISABLED 1
/** In case there is no thread support, define locks to do nothing */
typedef int lock_rw_t;
typedef int lock_rw_type;
#define lock_rw_init(lock) /* nop */
#define lock_rw_destroy(lock) /* nop */
#define lock_rw_rdlock(lock) /* nop */
@ -266,30 +266,30 @@ typedef int lock_rw_t;
#define lock_rw_unlock(lock) /* nop */
/** define locks to do nothing */
typedef int lock_basic_t;
typedef int lock_basic_type;
#define lock_basic_init(lock) /* nop */
#define lock_basic_destroy(lock) /* nop */
#define lock_basic_lock(lock) /* nop */
#define lock_basic_unlock(lock) /* nop */
/** define locks to do nothing */
typedef int lock_quick_t;
typedef int lock_quick_type;
#define lock_quick_init(lock) /* nop */
#define lock_quick_destroy(lock) /* nop */
#define lock_quick_lock(lock) /* nop */
#define lock_quick_unlock(lock) /* nop */
/** Thread creation, threads do not exist */
typedef pid_t ub_thread_t;
typedef pid_t ub_thread_type;
/** ub_thread_create is simulated with fork (extremely heavy threads,
* with no shared memory). */
#define ub_thread_create(thr, func, arg) \
ub_thr_fork_create(thr, func, arg)
#define ub_thread_self() getpid()
#define ub_thread_join(thread) ub_thr_fork_wait(thread)
void ub_thr_fork_wait(ub_thread_t thread);
void ub_thr_fork_create(ub_thread_t* thr, void* (*func)(void*), void* arg);
typedef void* ub_thread_key_t;
void ub_thr_fork_wait(ub_thread_type thread);
void ub_thr_fork_create(ub_thread_type* thr, void* (*func)(void*), void* arg);
typedef void* ub_thread_key_type;
#define ub_thread_key_create(key, f) (*(key)) = NULL
#define ub_thread_key_set(key, v) (key) = (v)
#define ub_thread_key_get(key) (key)

View File

@ -67,10 +67,10 @@ static FILE* logfile = 0;
/** if key has been created */
static int key_created = 0;
/** pthread key for thread ids in logfile */
static ub_thread_key_t logkey;
static ub_thread_key_type logkey;
#ifndef THREADS_DISABLED
/** pthread mutex to protect FILE* */
static lock_quick_t log_lock;
static lock_quick_type log_lock;
#endif
/** the identity of this executable/process */
static const char* ident="unbound";

View File

@ -147,7 +147,7 @@ static void handle_timeouts(struct event_base* base, struct timeval* now,
wait->tv_sec = (time_t)-1;
#endif
while((rbnode_t*)(p = (struct event*)rbtree_first(base->times))
while((rbnode_type*)(p = (struct event*)rbtree_first(base->times))
!=RBTREE_NULL) {
#ifndef S_SPLINT_S
if(p->ev_timeout.tv_sec > now->tv_sec ||

View File

@ -96,7 +96,7 @@
struct event_base
{
/** sorted by timeout (absolute), ptr */
rbtree_t* times;
rbtree_type* times;
/** array of 0 - maxfd of ptr to event for it */
struct event** fds;
/** max fd in use */
@ -128,7 +128,7 @@ struct event_base
*/
struct event {
/** node in timeout rbtree */
rbnode_t node;
rbnode_type node;
/** is event already added */
int added;

View File

@ -124,7 +124,7 @@ edns_register_option(uint16_t opt_code, int bypass_cache_stage,
}
static int
inplace_cb_reply_register_generic(inplace_cb_reply_func_t* cb,
inplace_cb_reply_register_generic(inplace_cb_reply_func_type* cb,
enum inplace_cb_list_type type, void* cb_arg, struct module_env* env)
{
struct inplace_cb_reply* callback;
@ -153,7 +153,7 @@ inplace_cb_reply_register_generic(inplace_cb_reply_func_t* cb,
}
int
inplace_cb_reply_register(inplace_cb_reply_func_t* cb, void* cb_arg,
inplace_cb_reply_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env)
{
return inplace_cb_reply_register_generic(cb, inplace_cb_reply, cb_arg,
@ -161,7 +161,7 @@ inplace_cb_reply_register(inplace_cb_reply_func_t* cb, void* cb_arg,
}
int
inplace_cb_reply_cache_register(inplace_cb_reply_func_t* cb, void* cb_arg,
inplace_cb_reply_cache_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env)
{
return inplace_cb_reply_register_generic(cb, inplace_cb_reply_cache,
@ -169,7 +169,7 @@ inplace_cb_reply_cache_register(inplace_cb_reply_func_t* cb, void* cb_arg,
}
int
inplace_cb_reply_local_register(inplace_cb_reply_func_t* cb, void* cb_arg,
inplace_cb_reply_local_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env)
{
return inplace_cb_reply_register_generic(cb, inplace_cb_reply_local,
@ -177,7 +177,7 @@ inplace_cb_reply_local_register(inplace_cb_reply_func_t* cb, void* cb_arg,
}
int
inplace_cb_reply_servfail_register(inplace_cb_reply_func_t* cb, void* cb_arg,
inplace_cb_reply_servfail_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env)
{
return inplace_cb_reply_register_generic(cb, inplace_cb_reply_servfail,
@ -216,7 +216,7 @@ void inplace_cb_reply_servfail_delete(struct module_env* env)
}
int
inplace_cb_query_register(inplace_cb_query_func_t* cb, void* cb_arg,
inplace_cb_query_register(inplace_cb_query_func_type* cb, void* cb_arg,
struct module_env* env)
{
struct inplace_cb_query* callback;

View File

@ -226,7 +226,7 @@ struct edns_known_option {
* region: region to store data.
* python_callback: only used for registering a python callback function.
*/
typedef int inplace_cb_reply_func_t(struct query_info* qinfo,
typedef int inplace_cb_reply_func_type(struct query_info* qinfo,
struct module_qstate* qstate, struct reply_info* rep, int rcode,
struct edns_data* edns, struct edns_option** opt_list_out,
struct regional* region, void* python_callback);
@ -244,7 +244,7 @@ struct inplace_cb_reply {
* opt_list_out, region, python_callback);
* python_callback is only used for registering a python callback function.
*/
inplace_cb_reply_func_t* cb;
inplace_cb_reply_func_type* cb;
void* cb_arg;
};
@ -265,7 +265,7 @@ struct inplace_cb_reply {
* region: region to store data.
* python_callback: only used for registering a python callback function.
*/
typedef int inplace_cb_query_func_t(struct query_info* qinfo, uint16_t flags,
typedef int inplace_cb_query_func_type(struct query_info* qinfo, uint16_t flags,
struct module_qstate* qstate, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* zone, size_t zonelen, struct regional* region,
void* python_callback);
@ -283,7 +283,7 @@ struct inplace_cb_query {
* region, python_callback);
* python_callback is only used for registering a python callback function.
*/
inplace_cb_query_func_t* cb;
inplace_cb_query_func_type* cb;
void* cb_arg;
};
@ -688,7 +688,7 @@ int edns_register_option(uint16_t opt_code, int bypass_cache_stage,
* @return true on success, false on failure (out of memory or trying to
* register after the environment is copied to the threads.)
*/
int inplace_cb_reply_register(inplace_cb_reply_func_t* cb, void* cb_arg,
int inplace_cb_reply_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env);
/**
@ -699,7 +699,7 @@ int inplace_cb_reply_register(inplace_cb_reply_func_t* cb, void* cb_arg,
* @return true on success, false on failure (out of memory or trying to
* register after the environment is copied to the threads.)
*/
int inplace_cb_reply_cache_register(inplace_cb_reply_func_t* cb, void* cb_arg,
int inplace_cb_reply_cache_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env);
/**
@ -711,7 +711,7 @@ int inplace_cb_reply_cache_register(inplace_cb_reply_func_t* cb, void* cb_arg,
* @return true on success, false on failure (out of memory or trying to
* register after the environment is copied to the threads.)
*/
int inplace_cb_reply_local_register(inplace_cb_reply_func_t* cb, void* cb_arg,
int inplace_cb_reply_local_register(inplace_cb_reply_func_type* cb, void* cb_arg,
struct module_env* env);
/**
@ -722,7 +722,7 @@ int inplace_cb_reply_local_register(inplace_cb_reply_func_t* cb, void* cb_arg,
* @return true on success, false on failure (out of memory or trying to
* register after the environment is copied to the threads.)
*/
int inplace_cb_reply_servfail_register(inplace_cb_reply_func_t* cb,
int inplace_cb_reply_servfail_register(inplace_cb_reply_func_type* cb,
void* cb_arg, struct module_env* env);
/**
@ -751,7 +751,7 @@ void inplace_cb_reply_servfail_delete(struct module_env* env);
* @return true on success, false on failure (out of memory or trying to
* register after the environment is copied to the threads.)
*/
int inplace_cb_query_register(inplace_cb_query_func_t* cb, void* cb_arg,
int inplace_cb_query_register(inplace_cb_query_func_type* cb, void* cb_arg,
struct module_env* env);
/**

View File

@ -146,7 +146,7 @@ struct internal_signal {
/** create a tcp handler with a parent */
static struct comm_point* comm_point_create_tcp_handler(
struct comm_base *base, struct comm_point* parent, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/* -------- End of local definitions -------- */
@ -1573,7 +1573,7 @@ void comm_point_raw_handle_callback(int ATTR_UNUSED(fd),
struct comm_point*
comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1628,7 +1628,7 @@ comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
struct comm_point*
comm_point_create_udp_ancil(struct comm_base *base, int fd,
sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1683,7 +1683,7 @@ comm_point_create_udp_ancil(struct comm_base *base, int fd,
static struct comm_point*
comm_point_create_tcp_handler(struct comm_base *base,
struct comm_point* parent, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1749,7 +1749,7 @@ comm_point_create_tcp_handler(struct comm_base *base,
struct comm_point*
comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1820,7 +1820,7 @@ comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
struct comm_point*
comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1877,7 +1877,7 @@ comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
struct comm_point*
comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));
@ -1938,7 +1938,7 @@ comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
struct comm_point*
comm_point_create_raw(struct comm_base* base, int fd, int writing,
comm_point_callback_t* callback, void* callback_arg)
comm_point_callback_type* callback, void* callback_arg)
{
struct comm_point* c = (struct comm_point*)calloc(1,
sizeof(struct comm_point));

View File

@ -71,7 +71,7 @@ struct internal_base;
struct internal_timer; /* A sub struct of the comm_timer super struct */
/** callback from communication point function type */
typedef int comm_point_callback_t(struct comm_point*, void*, int,
typedef int comm_point_callback_type(struct comm_point*, void*, int,
struct comm_reply*);
/** to pass no_error to callback function */
@ -264,7 +264,7 @@ struct comm_point {
For UDP this is done without changing the commpoint.
In TCP it sets write state.
*/
comm_point_callback_t* callback;
comm_point_callback_type* callback;
/** argument to pass to callback. */
void *cb_arg;
};
@ -382,7 +382,7 @@ struct ub_event_base* comm_base_internal(struct comm_base* b);
*/
struct comm_point* comm_point_create_udp(struct comm_base* base,
int fd, struct sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/**
* Create an UDP with ancillary data comm point. Calls malloc.
@ -398,7 +398,7 @@ struct comm_point* comm_point_create_udp(struct comm_base* base,
*/
struct comm_point* comm_point_create_udp_ancil(struct comm_base* base,
int fd, struct sldns_buffer* buffer,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/**
* Create a TCP listener comm point. Calls malloc.
@ -419,7 +419,7 @@ struct comm_point* comm_point_create_udp_ancil(struct comm_base* base,
*/
struct comm_point* comm_point_create_tcp(struct comm_base* base,
int fd, int num, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/**
* Create an outgoing TCP commpoint. No file descriptor is opened, left at -1.
@ -430,7 +430,7 @@ struct comm_point* comm_point_create_tcp(struct comm_base* base,
* @return: the commpoint or NULL on error.
*/
struct comm_point* comm_point_create_tcp_out(struct comm_base* base,
size_t bufsize, comm_point_callback_t* callback, void* callback_arg);
size_t bufsize, comm_point_callback_type* callback, void* callback_arg);
/**
* Create commpoint to listen to a local domain file descriptor.
@ -443,7 +443,7 @@ struct comm_point* comm_point_create_tcp_out(struct comm_base* base,
*/
struct comm_point* comm_point_create_local(struct comm_base* base,
int fd, size_t bufsize,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/**
* Create commpoint to listen to a local domain pipe descriptor.
@ -456,7 +456,7 @@ struct comm_point* comm_point_create_local(struct comm_base* base,
*/
struct comm_point* comm_point_create_raw(struct comm_base* base,
int fd, int writing,
comm_point_callback_t* callback, void* callback_arg);
comm_point_callback_type* callback, void* callback_arg);
/**
* Close a comm point fd.

View File

@ -50,7 +50,7 @@
#define RED 1
/** the NULL node, global alloc */
rbnode_t rbtree_null_node = {
rbnode_type rbtree_null_node = {
RBTREE_NULL, /* Parent. */
RBTREE_NULL, /* Left. */
RBTREE_NULL, /* Right. */
@ -59,13 +59,14 @@ rbnode_t rbtree_null_node = {
};
/** rotate subtree left (to preserve redblack property) */
static void rbtree_rotate_left(rbtree_t *rbtree, rbnode_t *node);
static void rbtree_rotate_left(rbtree_type *rbtree, rbnode_type *node);
/** rotate subtree right (to preserve redblack property) */
static void rbtree_rotate_right(rbtree_t *rbtree, rbnode_t *node);
static void rbtree_rotate_right(rbtree_type *rbtree, rbnode_type *node);
/** Fixup node colours when insert happened */
static void rbtree_insert_fixup(rbtree_t *rbtree, rbnode_t *node);
static void rbtree_insert_fixup(rbtree_type *rbtree, rbnode_type *node);
/** Fixup node colours when delete happened */
static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* child_parent);
static void rbtree_delete_fixup(rbtree_type* rbtree, rbnode_type* child,
rbnode_type* child_parent);
/*
* Creates a new red black tree, initializes and returns a pointer to it.
@ -73,13 +74,13 @@ static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* chi
* Return NULL on failure.
*
*/
rbtree_t *
rbtree_type *
rbtree_create (int (*cmpf)(const void *, const void *))
{
rbtree_t *rbtree;
rbtree_type *rbtree;
/* Allocate memory for it */
rbtree = (rbtree_t *) malloc(sizeof(rbtree_t));
rbtree = (rbtree_type *) malloc(sizeof(rbtree_type));
if (!rbtree) {
return NULL;
}
@ -91,7 +92,7 @@ rbtree_create (int (*cmpf)(const void *, const void *))
}
void
rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *))
rbtree_init(rbtree_type *rbtree, int (*cmpf)(const void *, const void *))
{
/* Initialize it */
rbtree->root = RBTREE_NULL;
@ -104,9 +105,9 @@ rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *))
*
*/
static void
rbtree_rotate_left(rbtree_t *rbtree, rbnode_t *node)
rbtree_rotate_left(rbtree_type *rbtree, rbnode_type *node)
{
rbnode_t *right = node->right;
rbnode_type *right = node->right;
node->right = right->left;
if (right->left != RBTREE_NULL)
right->left->parent = node;
@ -131,9 +132,9 @@ rbtree_rotate_left(rbtree_t *rbtree, rbnode_t *node)
*
*/
static void
rbtree_rotate_right(rbtree_t *rbtree, rbnode_t *node)
rbtree_rotate_right(rbtree_type *rbtree, rbnode_type *node)
{
rbnode_t *left = node->left;
rbnode_type *left = node->left;
node->left = left->right;
if (left->right != RBTREE_NULL)
left->right->parent = node;
@ -154,9 +155,9 @@ rbtree_rotate_right(rbtree_t *rbtree, rbnode_t *node)
}
static void
rbtree_insert_fixup(rbtree_t *rbtree, rbnode_t *node)
rbtree_insert_fixup(rbtree_type *rbtree, rbnode_type *node)
{
rbnode_t *uncle;
rbnode_type *uncle;
/* While not at the root and need fixing... */
while (node != rbtree->root && node->parent->color == RED) {
@ -223,15 +224,15 @@ rbtree_insert_fixup(rbtree_t *rbtree, rbnode_t *node)
* Returns NULL on failure or the pointer to the newly added node
* otherwise.
*/
rbnode_t *
rbtree_insert (rbtree_t *rbtree, rbnode_t *data)
rbnode_type *
rbtree_insert (rbtree_type *rbtree, rbnode_type *data)
{
/* XXX Not necessary, but keeps compiler quiet... */
int r = 0;
/* We start at the root of the tree */
rbnode_t *node = rbtree->root;
rbnode_t *parent = RBTREE_NULL;
rbnode_type *node = rbtree->root;
rbnode_type *parent = RBTREE_NULL;
fptr_ok(fptr_whitelist_rbtree_cmp(rbtree->cmp));
/* Lets find the new parent... */
@ -276,10 +277,10 @@ rbtree_insert (rbtree_t *rbtree, rbnode_t *data)
* Searches the red black tree, returns the data if key is found or NULL otherwise.
*
*/
rbnode_t *
rbtree_search (rbtree_t *rbtree, const void *key)
rbnode_type *
rbtree_search (rbtree_type *rbtree, const void *key)
{
rbnode_t *node;
rbnode_type *node;
if (rbtree_find_less_equal(rbtree, key, &node)) {
return node;
@ -295,13 +296,14 @@ static void swap_int8(uint8_t* x, uint8_t* y)
}
/** helpers for delete: swap node pointers */
static void swap_np(rbnode_t** x, rbnode_t** y)
static void swap_np(rbnode_type** x, rbnode_type** y)
{
rbnode_t* t = *x; *x = *y; *y = t;
rbnode_type* t = *x; *x = *y; *y = t;
}
/** Update parent pointers of child trees of 'parent' */
static void change_parent_ptr(rbtree_t* rbtree, rbnode_t* parent, rbnode_t* old, rbnode_t* new)
static void change_parent_ptr(rbtree_type* rbtree, rbnode_type* parent,
rbnode_type* old, rbnode_type* new)
{
if(parent == RBTREE_NULL)
{
@ -315,18 +317,19 @@ static void change_parent_ptr(rbtree_t* rbtree, rbnode_t* parent, rbnode_t* old,
if(parent->right == old) parent->right = new;
}
/** Update parent pointer of a node 'child' */
static void change_child_ptr(rbnode_t* child, rbnode_t* old, rbnode_t* new)
static void change_child_ptr(rbnode_type* child, rbnode_type* old,
rbnode_type* new)
{
if(child == RBTREE_NULL) return;
log_assert(child->parent == old || child->parent == new);
if(child->parent == old) child->parent = new;
}
rbnode_t*
rbtree_delete(rbtree_t *rbtree, const void *key)
rbnode_type*
rbtree_delete(rbtree_type *rbtree, const void *key)
{
rbnode_t *to_delete;
rbnode_t *child;
rbnode_type *to_delete;
rbnode_type *child;
if((to_delete = rbtree_search(rbtree, key)) == 0) return 0;
rbtree->count--;
@ -334,11 +337,11 @@ rbtree_delete(rbtree_t *rbtree, const void *key)
if(to_delete->left != RBTREE_NULL && to_delete->right != RBTREE_NULL)
{
/* swap with smallest from right subtree (or largest from left) */
rbnode_t *smright = to_delete->right;
rbnode_type *smright = to_delete->right;
while(smright->left != RBTREE_NULL)
smright = smright->left;
/* swap the smright and to_delete elements in the tree,
* but the rbnode_t is first part of user data struct
* but the rbnode_type is first part of user data struct
* so cannot just swap the keys and data pointers. Instead
* readjust the pointers left,right,parent */
@ -400,9 +403,10 @@ rbtree_delete(rbtree_t *rbtree, const void *key)
return to_delete;
}
static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* child_parent)
static void rbtree_delete_fixup(rbtree_type* rbtree, rbnode_type* child,
rbnode_type* child_parent)
{
rbnode_t* sibling;
rbnode_type* sibling;
int go_up = 1;
/* determine sibling to the node that is one-black short */
@ -504,10 +508,11 @@ static void rbtree_delete_fixup(rbtree_t* rbtree, rbnode_t* child, rbnode_t* chi
}
int
rbtree_find_less_equal(rbtree_t *rbtree, const void *key, rbnode_t **result)
rbtree_find_less_equal(rbtree_type *rbtree, const void *key,
rbnode_type **result)
{
int r;
rbnode_t *node;
rbnode_type *node;
log_assert(result);
@ -540,19 +545,19 @@ rbtree_find_less_equal(rbtree_t *rbtree, const void *key, rbnode_t **result)
* Finds the first element in the red black tree
*
*/
rbnode_t *
rbtree_first (rbtree_t *rbtree)
rbnode_type *
rbtree_first (rbtree_type *rbtree)
{
rbnode_t *node;
rbnode_type *node;
for (node = rbtree->root; node->left != RBTREE_NULL; node = node->left);
return node;
}
rbnode_t *
rbtree_last (rbtree_t *rbtree)
rbnode_type *
rbtree_last (rbtree_type *rbtree)
{
rbnode_t *node;
rbnode_type *node;
for (node = rbtree->root; node->right != RBTREE_NULL; node = node->right);
return node;
@ -562,10 +567,10 @@ rbtree_last (rbtree_t *rbtree)
* Returns the next node...
*
*/
rbnode_t *
rbtree_next (rbnode_t *node)
rbnode_type *
rbtree_next (rbnode_type *node)
{
rbnode_t *parent;
rbnode_type *parent;
if (node->right != RBTREE_NULL) {
/* One right, then keep on going left... */
@ -581,10 +586,10 @@ rbtree_next (rbnode_t *node)
return node;
}
rbnode_t *
rbtree_previous(rbnode_t *node)
rbnode_type *
rbtree_previous(rbnode_type *node)
{
rbnode_t *parent;
rbnode_type *parent;
if (node->left != RBTREE_NULL) {
/* One left, then keep on going right... */
@ -602,7 +607,7 @@ rbtree_previous(rbnode_t *node)
/** recursive descent traverse */
static void
traverse_post(void (*func)(rbnode_t*, void*), void* arg, rbnode_t* node)
traverse_post(void (*func)(rbnode_type*, void*), void* arg, rbnode_type* node)
{
if(!node || node == RBTREE_NULL)
return;
@ -614,7 +619,7 @@ traverse_post(void (*func)(rbnode_t*, void*), void* arg, rbnode_t* node)
}
void
traverse_postorder(rbtree_t* tree, void (*func)(rbnode_t*, void*), void* arg)
traverse_postorder(rbtree_type* tree, void (*func)(rbnode_type*, void*), void* arg)
{
traverse_post(func, arg, tree->root);
}

View File

@ -45,20 +45,20 @@
/**
* This structure must be the first member of the data structure in
* the rbtree. This allows easy casting between an rbnode_t and the
* the rbtree. This allows easy casting between an rbnode_type and the
* user data (poor man's inheritance).
*/
typedef struct rbnode_t rbnode_t;
typedef struct rbnode_type rbnode_type;
/**
* The rbnode_t struct definition.
* The rbnode_type struct definition.
*/
struct rbnode_t {
struct rbnode_type {
/** parent in rbtree, RBTREE_NULL for root */
rbnode_t *parent;
rbnode_type *parent;
/** left node (smaller items) */
rbnode_t *left;
rbnode_type *left;
/** right node (larger items) */
rbnode_t *right;
rbnode_type *right;
/** pointer to sorting key */
const void *key;
/** colour of this node */
@ -68,14 +68,14 @@ struct rbnode_t {
/** The nullpointer, points to empty node */
#define RBTREE_NULL &rbtree_null_node
/** the global empty node */
extern rbnode_t rbtree_null_node;
extern rbnode_type rbtree_null_node;
/** An entire red black tree */
typedef struct rbtree_t rbtree_t;
typedef struct rbtree_type rbtree_type;
/** definition for tree struct */
struct rbtree_t {
struct rbtree_type {
/** The root of the red-black tree */
rbnode_t *root;
rbnode_type *root;
/** The number of the nodes in the tree */
size_t count;
@ -92,14 +92,14 @@ struct rbtree_t {
* @param cmpf: compare function (like strcmp) takes pointers to two keys.
* @return: new tree, empty.
*/
rbtree_t *rbtree_create(int (*cmpf)(const void *, const void *));
rbtree_type *rbtree_create(int (*cmpf)(const void *, const void *));
/**
* Init a new tree (malloced by caller) with given key compare function.
* @param rbtree: uninitialised memory for new tree, returned empty.
* @param cmpf: compare function (like strcmp) takes pointers to two keys.
*/
void rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *));
void rbtree_init(rbtree_type *rbtree, int (*cmpf)(const void *, const void *));
/**
* Insert data into the tree.
@ -107,7 +107,7 @@ void rbtree_init(rbtree_t *rbtree, int (*cmpf)(const void *, const void *));
* @param data: element to insert.
* @return: data ptr or NULL if key already present.
*/
rbnode_t *rbtree_insert(rbtree_t *rbtree, rbnode_t *data);
rbnode_type *rbtree_insert(rbtree_type *rbtree, rbnode_type *data);
/**
* Delete element from tree.
@ -116,7 +116,7 @@ rbnode_t *rbtree_insert(rbtree_t *rbtree, rbnode_t *data);
* @return: node that is now unlinked from the tree. User to delete it.
* returns 0 if node not present
*/
rbnode_t *rbtree_delete(rbtree_t *rbtree, const void *key);
rbnode_type *rbtree_delete(rbtree_type *rbtree, const void *key);
/**
* Find key in tree. Returns NULL if not found.
@ -124,7 +124,7 @@ rbnode_t *rbtree_delete(rbtree_t *rbtree, const void *key);
* @param key: key that must match.
* @return: node that fits or NULL.
*/
rbnode_t *rbtree_search(rbtree_t *rbtree, const void *key);
rbnode_type *rbtree_search(rbtree_type *rbtree, const void *key);
/**
* Find, but match does not have to be exact.
@ -135,45 +135,45 @@ rbnode_t *rbtree_search(rbtree_t *rbtree, const void *key);
* @return: true if exact match in result. Else result points to <= element,
* or NULL if key is smaller than the smallest key.
*/
int rbtree_find_less_equal(rbtree_t *rbtree, const void *key,
rbnode_t **result);
int rbtree_find_less_equal(rbtree_type *rbtree, const void *key,
rbnode_type **result);
/**
* Returns first (smallest) node in the tree
* @param rbtree: tree
* @return: smallest element or NULL if tree empty.
*/
rbnode_t *rbtree_first(rbtree_t *rbtree);
rbnode_type *rbtree_first(rbtree_type *rbtree);
/**
* Returns last (largest) node in the tree
* @param rbtree: tree
* @return: largest element or NULL if tree empty.
*/
rbnode_t *rbtree_last(rbtree_t *rbtree);
rbnode_type *rbtree_last(rbtree_type *rbtree);
/**
* Returns next larger node in the tree
* @param rbtree: tree
* @return: next larger element or NULL if no larger in tree.
*/
rbnode_t *rbtree_next(rbnode_t *rbtree);
rbnode_type *rbtree_next(rbnode_type *rbtree);
/**
* Returns previous smaller node in the tree
* @param rbtree: tree
* @return: previous smaller element or NULL if no previous in tree.
*/
rbnode_t *rbtree_previous(rbnode_t *rbtree);
rbnode_type *rbtree_previous(rbnode_type *rbtree);
/**
* Call with node=variable of struct* with rbnode_t as first element.
* Call with node=variable of struct* with rbnode_type as first element.
* with type is the type of a pointer to that struct.
*/
#define RBTREE_FOR(node, type, rbtree) \
for(node=(type)rbtree_first(rbtree); \
(rbnode_t*)node != RBTREE_NULL; \
node = (type)rbtree_next((rbnode_t*)node))
(rbnode_type*)node != RBTREE_NULL; \
node = (type)rbtree_next((rbnode_type*)node))
/**
* Call function for all elements in the redblack tree, such that
@ -186,7 +186,7 @@ rbnode_t *rbtree_previous(rbnode_t *rbtree);
* The function must not alter the rbtree.
* @param arg: user argument.
*/
void traverse_postorder(rbtree_t* tree, void (*func)(rbnode_t*, void*),
void traverse_postorder(rbtree_type* tree, void (*func)(rbnode_type*, void*),
void* arg);
#endif /* UTIL_RBTREE_H_ */

View File

@ -71,17 +71,17 @@ int addr_tree_compare(const void* k1, const void* k2)
return 0;
}
void name_tree_init(rbtree_t* tree)
void name_tree_init(rbtree_type* tree)
{
rbtree_init(tree, &name_tree_compare);
}
void addr_tree_init(rbtree_t* tree)
void addr_tree_init(rbtree_type* tree)
{
rbtree_init(tree, &addr_tree_compare);
}
int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
int name_tree_insert(rbtree_type* tree, struct name_tree_node* node,
uint8_t* name, size_t len, int labs, uint16_t dclass)
{
node->node.key = node;
@ -93,7 +93,7 @@ int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
return rbtree_insert(tree, &node->node) != NULL;
}
int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
int addr_tree_insert(rbtree_type* tree, struct addr_tree_node* node,
struct sockaddr_storage* addr, socklen_t addrlen, int net)
{
node->node.key = node;
@ -104,7 +104,7 @@ int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
return rbtree_insert(tree, &node->node) != NULL;
}
void addr_tree_init_parents(rbtree_t* tree)
void addr_tree_init_parents(rbtree_type* tree)
{
struct addr_tree_node* node, *prev = NULL, *p;
int m;
@ -130,7 +130,7 @@ void addr_tree_init_parents(rbtree_t* tree)
}
}
void name_tree_init_parents(rbtree_t* tree)
void name_tree_init_parents(rbtree_type* tree)
{
struct name_tree_node* node, *prev = NULL, *p;
int m;
@ -156,7 +156,7 @@ void name_tree_init_parents(rbtree_t* tree)
}
}
struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
struct name_tree_node* name_tree_find(rbtree_type* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass)
{
struct name_tree_node key;
@ -168,10 +168,10 @@ struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
return (struct name_tree_node*)rbtree_search(tree, &key);
}
struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
struct name_tree_node* name_tree_lookup(rbtree_type* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass)
{
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct name_tree_node *result;
struct name_tree_node key;
key.node.key = &key;
@ -200,10 +200,10 @@ struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
return result;
}
struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
struct addr_tree_node* addr_tree_lookup(rbtree_type* tree,
struct sockaddr_storage* addr, socklen_t addrlen)
{
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct addr_tree_node* result;
struct addr_tree_node key;
key.node.key = &key;
@ -231,10 +231,10 @@ struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
return result;
}
struct addr_tree_node* addr_tree_find(rbtree_t* tree,
struct addr_tree_node* addr_tree_find(rbtree_type* tree,
struct sockaddr_storage* addr, socklen_t addrlen, int net)
{
rbnode_t* res = NULL;
rbnode_type* res = NULL;
struct addr_tree_node key;
key.node.key = &key;
memcpy(&key.addr, addr, addrlen);
@ -245,10 +245,10 @@ struct addr_tree_node* addr_tree_find(rbtree_t* tree,
}
int
name_tree_next_root(rbtree_t* tree, uint16_t* dclass)
name_tree_next_root(rbtree_type* tree, uint16_t* dclass)
{
struct name_tree_node key;
rbnode_t* n;
rbnode_type* n;
struct name_tree_node* p;
if(*dclass == 0) {
/* first root item is first item in tree */

View File

@ -49,12 +49,12 @@
* This is not sorted canonically, but fast.
* This can be looked up to obtain a closest encloser parent name.
*
* The tree itself is a rbtree_t.
* The tree itself is a rbtree_type.
* This is the element node put as first entry in the client structure.
*/
struct name_tree_node {
/** rbtree node, key is this struct : dclass and name */
rbnode_t node;
rbnode_type node;
/** parent in tree */
struct name_tree_node* parent;
/** name in uncompressed wireformat */
@ -71,12 +71,12 @@ struct name_tree_node {
* Tree of IP addresses. Sorted first by protocol, then by bits.
* This can be looked up to obtain the enclosing subnet.
*
* The tree itself is a rbtree_t.
* The tree itself is a rbtree_type.
* This is the element node put as first entry in the client structure.
*/
struct addr_tree_node {
/** rbtree node, key is this struct : proto and subnet */
rbnode_t node;
rbnode_type node;
/** parent in tree */
struct addr_tree_node* parent;
/** address */
@ -91,7 +91,7 @@ struct addr_tree_node {
* Init a name tree to be empty
* @param tree: to init.
*/
void name_tree_init(rbtree_t* tree);
void name_tree_init(rbtree_type* tree);
/**
* insert element into name tree.
@ -105,7 +105,7 @@ void name_tree_init(rbtree_t* tree);
* @param dclass: class of name
* @return false on error (duplicate element).
*/
int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
int name_tree_insert(rbtree_type* tree, struct name_tree_node* node,
uint8_t* name, size_t len, int labs, uint16_t dclass);
/**
@ -113,7 +113,7 @@ int name_tree_insert(rbtree_t* tree, struct name_tree_node* node,
* Should be performed after insertions are done, before lookups
* @param tree: name tree
*/
void name_tree_init_parents(rbtree_t* tree);
void name_tree_init_parents(rbtree_type* tree);
/**
* Lookup exact match in name tree
@ -124,7 +124,7 @@ void name_tree_init_parents(rbtree_t* tree);
* @param dclass: class of name
* @return node or NULL if not found.
*/
struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
struct name_tree_node* name_tree_find(rbtree_type* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass);
/**
@ -136,7 +136,7 @@ struct name_tree_node* name_tree_find(rbtree_t* tree, uint8_t* name,
* @param dclass: class of name
* @return closest enclosing node (could be equal) or NULL if not found.
*/
struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
struct name_tree_node* name_tree_lookup(rbtree_type* tree, uint8_t* name,
size_t len, int labs, uint16_t dclass);
/**
@ -145,13 +145,13 @@ struct name_tree_node* name_tree_lookup(rbtree_t* tree, uint8_t* name,
* @param dclass: the class to look for next (or higher).
* @return false if no classes found, true means class put into c.
*/
int name_tree_next_root(rbtree_t* tree, uint16_t* dclass);
int name_tree_next_root(rbtree_type* tree, uint16_t* dclass);
/**
* Init addr tree to be empty.
* @param tree: to init.
*/
void addr_tree_init(rbtree_t* tree);
void addr_tree_init(rbtree_type* tree);
/**
* insert element into addr tree.
@ -163,7 +163,7 @@ void addr_tree_init(rbtree_t* tree);
* @param net: size of subnet.
* @return false on error (duplicate element).
*/
int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
int addr_tree_insert(rbtree_type* tree, struct addr_tree_node* node,
struct sockaddr_storage* addr, socklen_t addrlen, int net);
/**
@ -171,7 +171,7 @@ int addr_tree_insert(rbtree_t* tree, struct addr_tree_node* node,
* Should be performed after insertions are done, before lookups
* @param tree: addr tree
*/
void addr_tree_init_parents(rbtree_t* tree);
void addr_tree_init_parents(rbtree_type* tree);
/**
* Lookup closest encloser in addr tree.
@ -180,7 +180,7 @@ void addr_tree_init_parents(rbtree_t* tree);
* @param addrlen: length of addr
* @return closest enclosing node (could be equal) or NULL if not found.
*/
struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
struct addr_tree_node* addr_tree_lookup(rbtree_type* tree,
struct sockaddr_storage* addr, socklen_t addrlen);
/**
@ -191,7 +191,7 @@ struct addr_tree_node* addr_tree_lookup(rbtree_t* tree,
* @param net: size of subnet
* @return addr tree element, or NULL if not found.
*/
struct addr_tree_node* addr_tree_find(rbtree_t* tree,
struct addr_tree_node* addr_tree_find(rbtree_type* tree,
struct sockaddr_storage* addr, socklen_t addrlen, int net);
/** compare name tree nodes */

View File

@ -59,9 +59,10 @@ bin_init(struct lruhash_bin* array, size_t size)
}
struct lruhash*
lruhash_create(size_t start_size, size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg)
lruhash_create(size_t start_size, size_t maxmem,
lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
lruhash_delkeyfunc_type delkeyfunc,
lruhash_deldatafunc_type deldatafunc, void* arg)
{
struct lruhash* table = (struct lruhash*)calloc(1,
sizeof(struct lruhash));
@ -215,7 +216,7 @@ reclaim_space(struct lruhash* table, struct lruhash_entry** list)
struct lruhash_entry*
bin_find_entry(struct lruhash* table,
struct lruhash_bin* bin, hashvalue_t hash, void* key)
struct lruhash_bin* bin, hashvalue_type hash, void* key)
{
struct lruhash_entry* p = bin->overflow_list;
while(p) {
@ -296,7 +297,7 @@ lru_touch(struct lruhash* table, struct lruhash_entry* entry)
}
void
lruhash_insert(struct lruhash* table, hashvalue_t hash,
lruhash_insert(struct lruhash* table, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* cb_arg)
{
struct lruhash_bin* bin;
@ -352,7 +353,7 @@ lruhash_insert(struct lruhash* table, hashvalue_t hash,
}
struct lruhash_entry*
lruhash_lookup(struct lruhash* table, hashvalue_t hash, void* key, int wr)
lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
@ -374,7 +375,7 @@ lruhash_lookup(struct lruhash* table, hashvalue_t hash, void* key, int wr)
}
void
lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
@ -512,7 +513,7 @@ lruhash_get_mem(struct lruhash* table)
}
void
lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md)
lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
{
lock_quick_lock(&table->lock);
table->markdelfunc = md;

View File

@ -116,7 +116,7 @@ struct lruhash_entry;
#define HASH_DEFAULT_MAXMEM 4*1024*1024 /* bytes */
/** the type of a hash value */
typedef uint32_t hashvalue_t;
typedef uint32_t hashvalue_type;
/**
* Type of function that calculates the size of an entry.
@ -124,39 +124,39 @@ typedef uint32_t hashvalue_t;
* Keys that are identical must also calculate to the same size.
* size = func(key, data).
*/
typedef size_t (*lruhash_sizefunc_t)(void*, void*);
typedef size_t (*lruhash_sizefunc_type)(void*, void*);
/** type of function that compares two keys. return 0 if equal. */
typedef int (*lruhash_compfunc_t)(void*, void*);
typedef int (*lruhash_compfunc_type)(void*, void*);
/** old keys are deleted.
* The RRset type has to revoke its ID number, markdel() is used first.
* This function is called: func(key, userarg) */
typedef void (*lruhash_delkeyfunc_t)(void*, void*);
typedef void (*lruhash_delkeyfunc_type)(void*, void*);
/** old data is deleted. This function is called: func(data, userarg). */
typedef void (*lruhash_deldatafunc_t)(void*, void*);
typedef void (*lruhash_deldatafunc_type)(void*, void*);
/** mark a key as pending to be deleted (and not to be used by anyone).
* called: func(key) */
typedef void (*lruhash_markdelfunc_t)(void*);
typedef void (*lruhash_markdelfunc_type)(void*);
/**
* Hash table that keeps LRU list of entries.
*/
struct lruhash {
/** lock for exclusive access, to the lookup array */
lock_quick_t lock;
lock_quick_type lock;
/** the size function for entries in this table */
lruhash_sizefunc_t sizefunc;
lruhash_sizefunc_type sizefunc;
/** the compare function for entries in this table. */
lruhash_compfunc_t compfunc;
lruhash_compfunc_type compfunc;
/** how to delete keys. */
lruhash_delkeyfunc_t delkeyfunc;
lruhash_delkeyfunc_type delkeyfunc;
/** how to delete data. */
lruhash_deldatafunc_t deldatafunc;
lruhash_deldatafunc_type deldatafunc;
/** how to mark a key pending deletion */
lruhash_markdelfunc_t markdelfunc;
lruhash_markdelfunc_type markdelfunc;
/** user argument for user functions */
void* cb_arg;
@ -188,7 +188,7 @@ struct lruhash_bin {
* Lock for exclusive access to the linked list
* This lock makes deletion of items safe in this overflow list.
*/
lock_quick_t lock;
lock_quick_type lock;
/** linked list of overflow entries */
struct lruhash_entry* overflow_list;
};
@ -207,7 +207,7 @@ struct lruhash_entry {
* Even with a writelock, you cannot change hash and key.
* You need to delete it to change hash or key.
*/
lock_rw_t lock;
lock_rw_type lock;
/** next entry in overflow chain. Covered by hashlock and binlock. */
struct lruhash_entry* overflow_next;
/** next entry in lru chain. covered by hashlock. */
@ -215,7 +215,7 @@ struct lruhash_entry {
/** prev entry in lru chain. covered by hashlock. */
struct lruhash_entry* lru_prev;
/** hash value of the key. It may not change, until entry deleted. */
hashvalue_t hash;
hashvalue_type hash;
/** key */
void* key;
/** data */
@ -236,9 +236,9 @@ struct lruhash_entry {
* @return: new hash table or NULL on malloc failure.
*/
struct lruhash* lruhash_create(size_t start_size, size_t maxmem,
lruhash_sizefunc_t sizefunc, lruhash_compfunc_t compfunc,
lruhash_delkeyfunc_t delkeyfunc, lruhash_deldatafunc_t deldatafunc,
void* arg);
lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
lruhash_delkeyfunc_type delkeyfunc,
lruhash_deldatafunc_type deldatafunc, void* arg);
/**
* Delete hash table. Entries are all deleted.
@ -269,7 +269,7 @@ void lruhash_clear(struct lruhash* table);
* @param data: the data.
* @param cb_override: if not null overrides the cb_arg for the deletefunc.
*/
void lruhash_insert(struct lruhash* table, hashvalue_t hash,
void lruhash_insert(struct lruhash* table, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* cb_override);
/**
@ -285,8 +285,8 @@ void lruhash_insert(struct lruhash* table, hashvalue_t hash,
* @return: pointer to the entry or NULL. The entry is locked.
* The user must unlock the entry when done.
*/
struct lruhash_entry* lruhash_lookup(struct lruhash* table, hashvalue_t hash,
void* key, int wr);
struct lruhash_entry* lruhash_lookup(struct lruhash* table,
hashvalue_type hash, void* key, int wr);
/**
* Touch entry, so it becomes the most recently used in the LRU list.
@ -299,7 +299,7 @@ void lru_touch(struct lruhash* table, struct lruhash_entry* entry);
/**
* Set the markdelfunction (or NULL)
*/
void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md);
void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md);
/************************* Internal functions ************************/
/*** these are only exposed for unit tests. ***/
@ -311,7 +311,7 @@ void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md);
* @param hash: hash of key.
* @param key: what to look for.
*/
void lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key);
void lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key);
/** init the hash bins for the table */
void bin_init(struct lruhash_bin* array, size_t size);
@ -328,7 +328,7 @@ void bin_delete(struct lruhash* table, struct lruhash_bin* bin);
* @return: the entry or NULL if not found.
*/
struct lruhash_entry* bin_find_entry(struct lruhash* table,
struct lruhash_bin* bin, hashvalue_t hash, void* key);
struct lruhash_bin* bin, hashvalue_type hash, void* key);
/**
* Remove entry from bin overflow chain.

View File

@ -46,9 +46,9 @@
#include "util/storage/slabhash.h"
struct slabhash* slabhash_create(size_t numtables, size_t start_size,
size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg)
size_t maxmem, lruhash_sizefunc_type sizefunc,
lruhash_compfunc_type compfunc, lruhash_delkeyfunc_type delkeyfunc,
lruhash_deldatafunc_type deldatafunc, void* arg)
{
size_t i;
struct slabhash* sl = (struct slabhash*)calloc(1,
@ -108,24 +108,24 @@ void slabhash_clear(struct slabhash* sl)
/** helper routine to calculate the slabhash index */
static unsigned int
slab_idx(struct slabhash* sl, hashvalue_t hash)
slab_idx(struct slabhash* sl, hashvalue_type hash)
{
return ((hash & sl->mask) >> sl->shift);
}
void slabhash_insert(struct slabhash* sl, hashvalue_t hash,
void slabhash_insert(struct slabhash* sl, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* arg)
{
lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data, arg);
}
struct lruhash_entry* slabhash_lookup(struct slabhash* sl,
hashvalue_t hash, void* key, int wr)
hashvalue_type hash, void* key, int wr)
{
return lruhash_lookup(sl->array[slab_idx(sl, hash)], hash, key, wr);
}
void slabhash_remove(struct slabhash* sl, hashvalue_t hash, void* key)
void slabhash_remove(struct slabhash* sl, hashvalue_type hash, void* key)
{
lruhash_remove(sl->array[slab_idx(sl, hash)], hash, key);
}
@ -163,7 +163,7 @@ size_t slabhash_get_mem(struct slabhash* sl)
return total;
}
struct lruhash* slabhash_gettable(struct slabhash* sl, hashvalue_t hash)
struct lruhash* slabhash_gettable(struct slabhash* sl, hashvalue_type hash)
{
return sl->array[slab_idx(sl, hash)];
}
@ -202,7 +202,7 @@ void test_slabhash_deldata(void* data, void* ATTR_UNUSED(arg))
deldata((struct slabhash_testdata*)data);
}
void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_t md)
void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_type md)
{
size_t i;
for(i=0; i<sl->size; i++) {

View File

@ -80,9 +80,9 @@ struct slabhash {
* @return: new hash table or NULL on malloc failure.
*/
struct slabhash* slabhash_create(size_t numtables, size_t start_size,
size_t maxmem, lruhash_sizefunc_t sizefunc,
lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
lruhash_deldatafunc_t deldatafunc, void* arg);
size_t maxmem, lruhash_sizefunc_type sizefunc,
lruhash_compfunc_type compfunc, lruhash_delkeyfunc_type delkeyfunc,
lruhash_deldatafunc_type deldatafunc, void* arg);
/**
* Delete hash table. Entries are all deleted.
@ -109,7 +109,7 @@ void slabhash_clear(struct slabhash* table);
* @param data: the data.
* @param cb_override: if not NULL overrides the cb_arg for deletfunc.
*/
void slabhash_insert(struct slabhash* table, hashvalue_t hash,
void slabhash_insert(struct slabhash* table, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* cb_override);
/**
@ -126,7 +126,7 @@ void slabhash_insert(struct slabhash* table, hashvalue_t hash,
* The user must unlock the entry when done.
*/
struct lruhash_entry* slabhash_lookup(struct slabhash* table,
hashvalue_t hash, void* key, int wr);
hashvalue_type hash, void* key, int wr);
/**
* Remove entry from hashtable. Does nothing if not found in hashtable.
@ -135,7 +135,7 @@ struct lruhash_entry* slabhash_lookup(struct slabhash* table,
* @param hash: hash of key.
* @param key: what to look for.
*/
void slabhash_remove(struct slabhash* table, hashvalue_t hash, void* key);
void slabhash_remove(struct slabhash* table, hashvalue_type hash, void* key);
/**
* Output debug info to the log as to state of the hash table.
@ -165,14 +165,14 @@ size_t slabhash_get_mem(struct slabhash* table);
* @param hash: hash value.
* @return the lru hash table.
*/
struct lruhash* slabhash_gettable(struct slabhash* table, hashvalue_t hash);
struct lruhash* slabhash_gettable(struct slabhash* table, hashvalue_type hash);
/**
* Set markdel function
* @param table: slabbed hash table.
* @param md: markdel function ptr.
*/
void slabhash_setmarkdel(struct slabhash* table, lruhash_markdelfunc_t md);
void slabhash_setmarkdel(struct slabhash* table, lruhash_markdelfunc_type md);
/**
* Traverse a slabhash.

View File

@ -426,7 +426,7 @@ int tube_read_fd(struct tube* tube)
}
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg)
tube_callback_type* cb, void* arg)
{
tube->listen_cb = cb;
tube->listen_arg = arg;
@ -667,7 +667,7 @@ tube_handle_write(struct comm_point* ATTR_UNUSED(c), void* ATTR_UNUSED(arg),
}
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg)
tube_callback_type* cb, void* arg)
{
tube->listen_cb = cb;
tube->listen_arg = arg;

View File

@ -55,7 +55,7 @@ struct tube_res_list;
* void mycallback(tube, msg, len, error, user_argument);
* if error is true (NETEVENT_*), msg is probably NULL.
*/
typedef void tube_callback_t(struct tube*, uint8_t*, size_t, int, void*);
typedef void tube_callback_type(struct tube*, uint8_t*, size_t, int, void*);
/**
* A pipe
@ -70,7 +70,7 @@ struct tube {
/** listen commpoint */
struct comm_point* listen_com;
/** listen callback */
tube_callback_t* listen_cb;
tube_callback_type* listen_cb;
/** listen callback user arg */
void* listen_arg;
/** are we currently reading a command, 0 if not, else bytecount */
@ -92,7 +92,7 @@ struct tube {
#else /* USE_WINSOCK */
/** listen callback */
tube_callback_t* listen_cb;
tube_callback_type* listen_cb;
/** listen callback user arg */
void* listen_arg;
/** the windows sockets event (signaled if items in pipe) */
@ -101,7 +101,7 @@ struct tube {
struct ub_event* ev_listen;
/** lock on the list of outstanding items */
lock_basic_t res_lock;
lock_basic_type res_lock;
/** list of outstanding results on pipe */
struct tube_res_list* res_list;
/** last in list */
@ -222,7 +222,7 @@ int tube_read_fd(struct tube* tube);
* @return true if successful, false on error.
*/
int tube_setup_bg_listen(struct tube* tube, struct comm_base* base,
tube_callback_t* cb, void* arg);
tube_callback_type* cb, void* arg);
/**
* Remove bg listen setup from event base.

View File

@ -1064,7 +1064,7 @@ int autr_read_file(struct val_anchors* anchors, const char* nm)
/** string for a trustanchor state */
static const char*
trustanchor_state2str(autr_state_t s)
trustanchor_state2str(autr_state_type s)
{
switch (s) {
case AUTR_STATE_START: return " START ";
@ -1679,7 +1679,7 @@ reset_holddown(struct module_env* env, struct autr_ta* ta, int* changed)
/** Set the state for this trust anchor */
static void
set_trustanchor_state(struct module_env* env, struct autr_ta* ta, int* changed,
autr_state_t s)
autr_state_type s)
{
verbose_key(ta, VERB_ALGO, "update: %s to %s",
trustanchor_state2str(ta->s), trustanchor_state2str(s));
@ -1989,7 +1989,7 @@ calc_next_probe(struct module_env* env, time_t wait)
static time_t
wait_probe_time(struct val_anchors* anchors)
{
rbnode_t* t = rbtree_first(&anchors->autr->probe);
rbnode_type* t = rbtree_first(&anchors->autr->probe);
if(t != RBTREE_NULL)
return ((struct trust_anchor*)t->key)->autr->next_probe_time;
return 0;
@ -2363,7 +2363,7 @@ static struct trust_anchor*
todo_probe(struct module_env* env, time_t* next)
{
struct trust_anchor* tp;
rbnode_t* el;
rbnode_type* el;
/* get first one */
lock_basic_lock(&env->anchors->lock);
if( (el=rbtree_first(&env->anchors->autr->probe)) == RBTREE_NULL) {

View File

@ -58,7 +58,7 @@ typedef enum {
AUTR_STATE_MISSING = 3,
AUTR_STATE_REVOKED = 4,
AUTR_STATE_REMOVED = 5
} autr_state_t;
} autr_state_type;
/**
* Autotrust metadata for one trust anchor key.
@ -73,7 +73,7 @@ struct autr_ta {
/** last update of key state (new pending count keeps date the same) */
time_t last_change;
/** 5011 state */
autr_state_t s;
autr_state_type s;
/** pending count */
uint8_t pending_count;
/** fresh TA was seen */
@ -90,7 +90,7 @@ struct autr_point_data {
/** file to store the trust point in. chrootdir already applied. */
char* file;
/** rbtree node for probe sort, key is struct trust_anchor */
rbnode_t pnode;
rbnode_type pnode;
/** the keys */
struct autr_ta* keys;
@ -126,7 +126,7 @@ struct autr_point_data {
struct autr_global_data {
/** rbtree of autotrust anchors sorted by next probe time.
* When time is equal, sorted by anchor class, name. */
rbtree_t probe;
rbtree_type probe;
};
/**

View File

@ -113,7 +113,7 @@ assembled_rrset_delete(struct ub_packed_rrset_key* pkey)
/** destroy locks in tree and delete autotrust anchors */
static void
anchors_delfunc(rbnode_t* elem, void* ATTR_UNUSED(arg))
anchors_delfunc(rbnode_type* elem, void* ATTR_UNUSED(arg))
{
struct trust_anchor* ta = (struct trust_anchor*)elem;
if(!ta) return;
@ -198,7 +198,7 @@ anchor_find(struct val_anchors* anchors, uint8_t* name, int namelabs,
size_t namelen, uint16_t dclass)
{
struct trust_anchor key;
rbnode_t* n;
rbnode_type* n;
if(!name) return NULL;
key.node.key = &key;
key.name = name;
@ -222,7 +222,7 @@ anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs,
size_t namelen, uint16_t dclass, int lockit)
{
#ifdef UNBOUND_DEBUG
rbnode_t* r;
rbnode_type* r;
#endif
struct trust_anchor* ta = (struct trust_anchor*)malloc(
sizeof(struct trust_anchor));
@ -990,7 +990,7 @@ anchors_assemble_rrsets(struct val_anchors* anchors)
size_t nods, nokey;
lock_basic_lock(&anchors->lock);
ta=(struct trust_anchor*)rbtree_first(anchors->tree);
while((rbnode_t*)ta != RBTREE_NULL) {
while((rbnode_type*)ta != RBTREE_NULL) {
next = (struct trust_anchor*)rbtree_next(&ta->node);
lock_basic_lock(&ta->lock);
if(ta->autr || (ta->numDS == 0 && ta->numDNSKEY == 0)) {
@ -1164,7 +1164,7 @@ anchors_lookup(struct val_anchors* anchors,
{
struct trust_anchor key;
struct trust_anchor* result;
rbnode_t* res = NULL;
rbnode_type* res = NULL;
key.node.key = &key;
key.name = qname;
key.namelabs = dname_count_labels(qname);

View File

@ -59,14 +59,14 @@ struct sldns_buffer;
*/
struct val_anchors {
/** lock on trees */
lock_basic_t lock;
lock_basic_type lock;
/**
* Anchors are store in this tree. Sort order is chosen, so that
* dnames are in nsec-like order. A lookup on class, name will return
* an exact match of the closest match, with the ancestor needed.
* contents of type trust_anchor.
*/
rbtree_t* tree;
rbtree_type* tree;
/** The DLV trust anchor (if one is configured, else NULL) */
struct trust_anchor* dlv_anchor;
/** Autotrust global data, anchors sorted by next probe time */
@ -93,9 +93,9 @@ struct ta_key {
*/
struct trust_anchor {
/** rbtree node, key is this structure */
rbnode_t node;
rbnode_type node;
/** lock on the entire anchor and its keys; for autotrust changes */
lock_basic_t lock;
lock_basic_type lock;
/** name of this trust anchor */
uint8_t* name;
/** length of name */

View File

@ -111,7 +111,7 @@ size_t val_neg_get_mem(struct val_neg_cache* neg)
/** clear datas on cache deletion */
static void
neg_clear_datas(rbnode_t* n, void* ATTR_UNUSED(arg))
neg_clear_datas(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct val_neg_data* d = (struct val_neg_data*)n;
free(d->name);
@ -120,7 +120,7 @@ neg_clear_datas(rbnode_t* n, void* ATTR_UNUSED(arg))
/** clear zones on cache deletion */
static void
neg_clear_zones(rbnode_t* n, void* ATTR_UNUSED(arg))
neg_clear_zones(rbnode_type* n, void* ATTR_UNUSED(arg))
{
struct val_neg_zone* z = (struct val_neg_zone*)n;
/* delete all the rrset entries in the tree */
@ -371,7 +371,7 @@ static struct val_neg_zone* neg_closest_zone_parent(struct val_neg_cache* neg,
{
struct val_neg_zone key;
struct val_neg_zone* result;
rbnode_t* res = NULL;
rbnode_type* res = NULL;
key.node.key = &key;
key.name = nm;
key.len = nm_len;
@ -411,7 +411,7 @@ static struct val_neg_data* neg_closest_data_parent(
{
struct val_neg_data key;
struct val_neg_data* result;
rbnode_t* res = NULL;
rbnode_type* res = NULL;
key.node.key = &key;
key.name = nm;
key.len = nm_len;
@ -677,7 +677,7 @@ static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone,
uint8_t* end;
size_t end_len;
int end_labs, m;
rbnode_t* walk, *next;
rbnode_type* walk, *next;
struct val_neg_data* cur;
uint8_t buf[257];
/* get endpoint */
@ -911,7 +911,7 @@ static int neg_closest_data(struct val_neg_zone* zone,
uint8_t* qname, size_t len, int labs, struct val_neg_data** data)
{
struct val_neg_data key;
rbnode_t* r;
rbnode_type* r;
key.node.key = &key;
key.name = qname;
key.len = len;

View File

@ -67,9 +67,9 @@ struct ub_packed_rrset_key;
struct val_neg_cache {
/** the big lock on the negative cache. Because we use a rbtree
* for the data (quick lookup), we need a big lock */
lock_basic_t lock;
lock_basic_type lock;
/** The zone rbtree. contents sorted canonical, type val_neg_zone */
rbtree_t tree;
rbtree_type tree;
/** the first in linked list of LRU of val_neg_data */
struct val_neg_data* first;
/** last in lru (least recently used element) */
@ -87,7 +87,7 @@ struct val_neg_cache {
*/
struct val_neg_zone {
/** rbtree node element, key is this struct: the name, class */
rbnode_t node;
rbnode_type node;
/** name; the key */
uint8_t* name;
/** length of name */
@ -114,7 +114,7 @@ struct val_neg_zone {
/** tree of NSEC data for this zone, sorted canonical
* by NSEC owner name */
rbtree_t tree;
rbtree_type tree;
/** class of node; host order */
uint16_t dclass;
@ -135,7 +135,7 @@ struct val_neg_zone {
*/
struct val_neg_data {
/** rbtree node element, key is this struct: the name */
rbnode_t node;
rbnode_type node;
/** name; the key */
uint8_t* name;
/** length of name */

View File

@ -623,14 +623,14 @@ nsec3_calc_b32(struct regional* region, sldns_buffer* buf,
}
int
nsec3_hash_name(rbtree_t* table, struct regional* region, sldns_buffer* buf,
nsec3_hash_name(rbtree_type* table, struct regional* region, sldns_buffer* buf,
struct ub_packed_rrset_key* nsec3, int rr, uint8_t* dname,
size_t dname_len, struct nsec3_cached_hash** hash)
{
struct nsec3_cached_hash* c;
struct nsec3_cached_hash looki;
#ifdef UNBOUND_DEBUG
rbnode_t* n;
rbnode_type* n;
#endif
int r;
looki.node.key = &looki;
@ -730,7 +730,7 @@ nsec3_hash_matches_owner(struct nsec3_filter* flt,
*/
static int
find_matching_nsec3(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, uint8_t* nm, size_t nmlen,
rbtree_type* ct, uint8_t* nm, size_t nmlen,
struct ub_packed_rrset_key** rrset, int* rr)
{
size_t i_rs;
@ -823,7 +823,7 @@ nsec3_covers(uint8_t* zone, struct nsec3_cached_hash* hash,
*/
static int
find_covering_nsec3(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, uint8_t* nm, size_t nmlen,
rbtree_type* ct, uint8_t* nm, size_t nmlen,
struct ub_packed_rrset_key** rrset, int* rr)
{
size_t i_rs;
@ -869,7 +869,7 @@ find_covering_nsec3(struct module_env* env, struct nsec3_filter* flt,
*/
static int
nsec3_find_closest_encloser(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, struct query_info* qinfo, struct ce_response* ce)
rbtree_type* ct, struct query_info* qinfo, struct ce_response* ce)
{
uint8_t* nm = qinfo->qname;
size_t nmlen = qinfo->qname_len;
@ -936,7 +936,7 @@ next_closer(uint8_t* qname, size_t qnamelen, uint8_t* ce,
*/
static enum sec_status
nsec3_prove_closest_encloser(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, struct query_info* qinfo, int prove_does_not_exist,
rbtree_type* ct, struct query_info* qinfo, int prove_does_not_exist,
struct ce_response* ce)
{
uint8_t* nc;
@ -1016,7 +1016,7 @@ nsec3_ce_wildcard(struct regional* region, uint8_t* ce, size_t celen,
/** Do the name error proof */
static enum sec_status
nsec3_do_prove_nameerror(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, struct query_info* qinfo)
rbtree_type* ct, struct query_info* qinfo)
{
struct ce_response ce;
uint8_t* wc;
@ -1062,7 +1062,7 @@ nsec3_prove_nameerror(struct module_env* env, struct val_env* ve,
struct ub_packed_rrset_key** list, size_t num,
struct query_info* qinfo, struct key_entry_key* kkey)
{
rbtree_t ct;
rbtree_type ct;
struct nsec3_filter flt;
if(!list || num == 0 || !kkey || !key_entry_isgood(kkey))
@ -1086,7 +1086,7 @@ nsec3_prove_nameerror(struct module_env* env, struct val_env* ve,
/** Do the nodata proof */
static enum sec_status
nsec3_do_prove_nodata(struct module_env* env, struct nsec3_filter* flt,
rbtree_t* ct, struct query_info* qinfo)
rbtree_type* ct, struct query_info* qinfo)
{
struct ce_response ce;
uint8_t* wc;
@ -1221,7 +1221,7 @@ nsec3_prove_nodata(struct module_env* env, struct val_env* ve,
struct ub_packed_rrset_key** list, size_t num,
struct query_info* qinfo, struct key_entry_key* kkey)
{
rbtree_t ct;
rbtree_type ct;
struct nsec3_filter flt;
if(!list || num == 0 || !kkey || !key_entry_isgood(kkey))
@ -1240,7 +1240,7 @@ nsec3_prove_wildcard(struct module_env* env, struct val_env* ve,
struct ub_packed_rrset_key** list, size_t num,
struct query_info* qinfo, struct key_entry_key* kkey, uint8_t* wc)
{
rbtree_t ct;
rbtree_type ct;
struct nsec3_filter flt;
struct ce_response ce;
uint8_t* nc;
@ -1314,7 +1314,7 @@ nsec3_prove_nods(struct module_env* env, struct val_env* ve,
struct ub_packed_rrset_key** list, size_t num,
struct query_info* qinfo, struct key_entry_key* kkey, char** reason)
{
rbtree_t ct;
rbtree_type ct;
struct nsec3_filter flt;
struct ce_response ce;
struct ub_packed_rrset_key* rrset;
@ -1403,7 +1403,7 @@ nsec3_prove_nxornodata(struct module_env* env, struct val_env* ve,
struct query_info* qinfo, struct key_entry_key* kkey, int* nodata)
{
enum sec_status sec, secnx;
rbtree_t ct;
rbtree_type ct;
struct nsec3_filter flt;
*nodata = 0;

View File

@ -224,7 +224,7 @@ nsec3_prove_nxornodata(struct module_env* env, struct val_env* ve,
*/
struct nsec3_cached_hash {
/** rbtree node, key is this structure */
rbnode_t node;
rbnode_type node;
/** where are the parameters for conversion, in this rrset data */
struct ub_packed_rrset_key* nsec3;
/** where are the parameters for conversion, this RR number in data */
@ -271,7 +271,7 @@ int nsec3_hash_cmp(const void* c1, const void* c2);
* 0 on a malloc failure.
* -1 if the NSEC3 rr was badly formatted (i.e. formerr).
*/
int nsec3_hash_name(rbtree_t* table, struct regional* region,
int nsec3_hash_name(rbtree_type* table, struct regional* region,
struct sldns_buffer* buf, struct ub_packed_rrset_key* nsec3, int rr,
uint8_t* dname, size_t dname_len, struct nsec3_cached_hash** hash);

View File

@ -1387,7 +1387,7 @@ _verify_nettle_dsa(sldns_buffer* buf, unsigned char* sigblock,
unsigned int sigblock_len, unsigned char* key, unsigned int keylen)
{
uint8_t digest[SHA1_DIGEST_SIZE];
uint8_t key_t;
uint8_t key_t_value;
int res = 0;
size_t offset;
struct dsa_public_key pubkey;
@ -1426,8 +1426,8 @@ _verify_nettle_dsa(sldns_buffer* buf, unsigned char* sigblock,
}
/* Validate T values constraints - RFC 2536 sec. 2 & sec. 3 */
key_t = key[0];
if (key_t > 8) {
key_t_value = key[0];
if (key_t_value > 8) {
return "invalid T value in DSA pubkey";
}
@ -1438,9 +1438,9 @@ _verify_nettle_dsa(sldns_buffer* buf, unsigned char* sigblock,
expected_len = 1 + /* T */
20 + /* Q */
(64 + key_t*8) + /* P */
(64 + key_t*8) + /* G */
(64 + key_t*8); /* Y */
(64 + key_t_value*8) + /* P */
(64 + key_t_value*8) + /* G */
(64 + key_t_value*8); /* Y */
if (keylen != expected_len ) {
return "invalid DSA pubkey length";
}
@ -1450,11 +1450,11 @@ _verify_nettle_dsa(sldns_buffer* buf, unsigned char* sigblock,
offset = 1;
nettle_mpz_set_str_256_u(pubkey.q, 20, key+offset);
offset += 20;
nettle_mpz_set_str_256_u(pubkey.p, (64 + key_t*8), key+offset);
offset += (64 + key_t*8);
nettle_mpz_set_str_256_u(pubkey.g, (64 + key_t*8), key+offset);
offset += (64 + key_t*8);
nettle_mpz_set_str_256_u(pubkey.y, (64 + key_t*8), key+offset);
nettle_mpz_set_str_256_u(pubkey.p, (64 + key_t_value*8), key+offset);
offset += (64 + key_t_value*8);
nettle_mpz_set_str_256_u(pubkey.g, (64 + key_t_value*8), key+offset);
offset += (64 + key_t_value*8);
nettle_mpz_set_str_256_u(pubkey.y, (64 + key_t_value*8), key+offset);
/* Digest content of "buf" and verify its DSA signature in "sigblock"*/
res = _digest_nettle(SHA1_DIGEST_SIZE, (unsigned char*)sldns_buffer_begin(buf),

View File

@ -483,7 +483,7 @@ dnskeyset_verify_rrset(struct module_env* env, struct val_env* ve,
{
enum sec_status sec;
size_t i, num;
rbtree_t* sortree = NULL;
rbtree_type* sortree = NULL;
/* make sure that for all DNSKEY algorithms there are valid sigs */
struct algo_needs needs;
int alg;
@ -551,7 +551,7 @@ dnskey_verify_rrset(struct module_env* env, struct val_env* ve,
{
enum sec_status sec;
size_t i, num, numchecked = 0;
rbtree_t* sortree = NULL;
rbtree_type* sortree = NULL;
int buf_canon = 0;
uint16_t tag = dnskey_calc_keytag(dnskey, dnskey_idx);
int algo = dnskey_get_algo(dnskey, dnskey_idx);
@ -585,7 +585,7 @@ enum sec_status
dnskeyset_verify_rrset_sig(struct module_env* env, struct val_env* ve,
time_t now, struct ub_packed_rrset_key* rrset,
struct ub_packed_rrset_key* dnskey, size_t sig_idx,
struct rbtree_t** sortree, char** reason)
struct rbtree_type** sortree, char** reason)
{
/* find matching keys and check them */
enum sec_status sec = sec_status_bogus;
@ -627,7 +627,7 @@ dnskeyset_verify_rrset_sig(struct module_env* env, struct val_env* ve,
*/
struct canon_rr {
/** rbtree node, key is this structure */
rbnode_t node;
rbnode_type node;
/** rrset the RR is in */
struct ub_packed_rrset_key* rrset;
/** which RR in the rrset */
@ -885,7 +885,7 @@ canonical_tree_compare(const void* k1, const void* k2)
*/
static void
canonical_sort(struct ub_packed_rrset_key* rrset, struct packed_rrset_data* d,
rbtree_t* sortree, struct canon_rr* rrs)
rbtree_type* sortree, struct canon_rr* rrs)
{
size_t i;
/* insert into rbtree to sort and detect duplicates */
@ -1043,7 +1043,7 @@ canonicalize_rdata(sldns_buffer* buf, struct ub_packed_rrset_key* rrset,
int rrset_canonical_equal(struct regional* region,
struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
{
struct rbtree_t sortree1, sortree2;
struct rbtree_type sortree1, sortree2;
struct canon_rr *rrs1, *rrs2, *p1, *p2;
struct packed_rrset_data* d1=(struct packed_rrset_data*)k1->entry.data;
struct packed_rrset_data* d2=(struct packed_rrset_data*)k2->entry.data;
@ -1120,7 +1120,7 @@ int rrset_canonical_equal(struct regional* region,
static int
rrset_canonical(struct regional* region, sldns_buffer* buf,
struct ub_packed_rrset_key* k, uint8_t* sig, size_t siglen,
struct rbtree_t** sortree)
struct rbtree_type** sortree)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)k->entry.data;
uint8_t* can_owner = NULL;
@ -1129,8 +1129,8 @@ rrset_canonical(struct regional* region, sldns_buffer* buf,
struct canon_rr* rrs;
if(!*sortree) {
*sortree = (struct rbtree_t*)regional_alloc(region,
sizeof(rbtree_t));
*sortree = (struct rbtree_type*)regional_alloc(region,
sizeof(rbtree_type));
if(!*sortree)
return 0;
if(d->count > RR_COUNT_MAX)
@ -1312,7 +1312,7 @@ dnskey_verify_rrset_sig(struct regional* region, sldns_buffer* buf,
struct val_env* ve, time_t now,
struct ub_packed_rrset_key* rrset, struct ub_packed_rrset_key* dnskey,
size_t dnskey_idx, size_t sig_idx,
struct rbtree_t** sortree, int* buf_canon, char** reason)
struct rbtree_type** sortree, int* buf_canon, char** reason)
{
enum sec_status sec;
uint8_t* sig; /* RRSIG rdata */

View File

@ -47,7 +47,7 @@
struct val_env;
struct module_env;
struct ub_packed_rrset_key;
struct rbtree_t;
struct rbtree_type;
struct regional;
struct sldns_buffer;
@ -277,7 +277,7 @@ enum sec_status dnskey_verify_rrset(struct module_env* env,
enum sec_status dnskeyset_verify_rrset_sig(struct module_env* env,
struct val_env* ve, time_t now, struct ub_packed_rrset_key* rrset,
struct ub_packed_rrset_key* dnskey, size_t sig_idx,
struct rbtree_t** sortree, char** reason);
struct rbtree_type** sortree, char** reason);
/**
* verify rrset, with specific dnskey(from set), for a specific rrsig
@ -302,7 +302,7 @@ enum sec_status dnskey_verify_rrset_sig(struct regional* region,
struct sldns_buffer* buf, struct val_env* ve, time_t now,
struct ub_packed_rrset_key* rrset, struct ub_packed_rrset_key* dnskey,
size_t dnskey_idx, size_t sig_idx,
struct rbtree_t** sortree, int* buf_canon, char** reason);
struct rbtree_type** sortree, int* buf_canon, char** reason);
/**
* canonical compare for two tree entries

View File

@ -126,7 +126,7 @@ struct val_env {
size_t* nsec3_maxiter;
/** lock on bogus counter */
lock_basic_t bogus_lock;
lock_basic_type bogus_lock;
/** number of times rrsets marked bogus */
size_t num_rrset_bogus;
};