Lines Matching +full:total +full:- +full:timeout

2  *		INETPEER - A storage for permanent information about peers
28 * We keep one entry for each peer IP address. The nodes contains long-living
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
59 bp->rb_root = RB_ROOT; in inet_peer_base_init()
60 seqlock_init(&bp->lock); in inet_peer_base_init()
61 bp->total = 0; in inet_peer_base_init()
90 /* Called with rcu_read_lock() or base->lock held */
102 pp = &base->rb_root.rb_node; in lookup()
112 cmp = inetpeer_addr_cmp(daddr, &p->daddr); in lookup()
114 if (!refcount_inc_not_zero(&p->refcnt)) in lookup()
121 } else if (unlikely(read_seqretry(&base->lock, seq))) { in lookup()
124 if (cmp == -1) in lookup()
125 pp = &next->rb_left; in lookup()
127 pp = &next->rb_right; in lookup()
153 if (base->total >= peer_threshold) in inet_peer_gc()
156 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * in inet_peer_gc()
157 base->total / peer_threshold * HZ; in inet_peer_gc()
164 delta = (__u32)jiffies - READ_ONCE(p->dtime); in inet_peer_gc()
166 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) in inet_peer_gc()
172 rb_erase(&p->rb_node, &base->rb_root); in inet_peer_gc()
173 base->total--; in inet_peer_gc()
174 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_peer_gc()
192 seq = read_seqbegin(&base->lock); in inet_getpeer()
194 invalidated = read_seqretry(&base->lock, seq); in inet_getpeer()
208 write_seqlock_bh(&base->lock); in inet_getpeer()
215 p->daddr = *daddr; in inet_getpeer()
216 p->dtime = (__u32)jiffies; in inet_getpeer()
217 refcount_set(&p->refcnt, 2); in inet_getpeer()
218 atomic_set(&p->rid, 0); in inet_getpeer()
219 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; in inet_getpeer()
220 p->rate_tokens = 0; in inet_getpeer()
221 p->n_redirects = 0; in inet_getpeer()
225 p->rate_last = jiffies - 60*HZ; in inet_getpeer()
227 rb_link_node(&p->rb_node, parent, pp); in inet_getpeer()
228 rb_insert_color(&p->rb_node, &base->rb_root); in inet_getpeer()
229 base->total++; in inet_getpeer()
234 write_sequnlock_bh(&base->lock); in inet_getpeer()
245 WRITE_ONCE(p->dtime, (__u32)jiffies); in inet_putpeer()
247 if (refcount_dec_and_test(&p->refcnt)) in inet_putpeer()
248 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_putpeer()
261 * for one "ip object" is shared - and these ICMPs are twice limited:
270 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) in inet_peer_xrlim_allow() argument
278 token = peer->rate_tokens; in inet_peer_xrlim_allow()
280 token += now - peer->rate_last; in inet_peer_xrlim_allow()
281 peer->rate_last = now; in inet_peer_xrlim_allow()
282 if (token > XRLIM_BURST_FACTOR * timeout) in inet_peer_xrlim_allow()
283 token = XRLIM_BURST_FACTOR * timeout; in inet_peer_xrlim_allow()
284 if (token >= timeout) { in inet_peer_xrlim_allow()
285 token -= timeout; in inet_peer_xrlim_allow()
288 peer->rate_tokens = token; in inet_peer_xrlim_allow()
295 struct rb_node *p = rb_first(&base->rb_root); in inetpeer_invalidate_tree()
301 rb_erase(&peer->rb_node, &base->rb_root); in inetpeer_invalidate_tree()
306 base->total = 0; in inetpeer_invalidate_tree()