Lines Matching +full:hall +full:- +full:switch +full:-

1 // SPDX-License-Identifier: GPL-2.0
4 * change in the future and be a per-client cache.
64 * XXX: these limits are per-container, so memory used will increase
71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); in nfsd_cache_size_limit()
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); in nfsd_cache_size_limit()
90 return hash_32(be32_to_cpu(xid), nn->maskbits); in nfsd_cache_hash()
101 rp->c_state = RC_UNUSED; in nfsd_reply_cache_alloc()
102 rp->c_type = RC_NOCACHE; in nfsd_reply_cache_alloc()
103 RB_CLEAR_NODE(&rp->c_node); in nfsd_reply_cache_alloc()
104 INIT_LIST_HEAD(&rp->c_lru); in nfsd_reply_cache_alloc()
106 memset(&rp->c_key, 0, sizeof(rp->c_key)); in nfsd_reply_cache_alloc()
107 rp->c_key.k_xid = rqstp->rq_xid; in nfsd_reply_cache_alloc()
108 rp->c_key.k_proc = rqstp->rq_proc; in nfsd_reply_cache_alloc()
109 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); in nfsd_reply_cache_alloc()
110 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); in nfsd_reply_cache_alloc()
111 rp->c_key.k_prot = rqstp->rq_prot; in nfsd_reply_cache_alloc()
112 rp->c_key.k_vers = rqstp->rq_vers; in nfsd_reply_cache_alloc()
113 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_reply_cache_alloc()
114 rp->c_key.k_csum = csum; in nfsd_reply_cache_alloc()
123 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { in nfsd_reply_cache_free_locked()
124 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_reply_cache_free_locked()
125 kfree(rp->c_replvec.iov_base); in nfsd_reply_cache_free_locked()
127 if (rp->c_state != RC_UNUSED) { in nfsd_reply_cache_free_locked()
128 rb_erase(&rp->c_node, &b->rb_head); in nfsd_reply_cache_free_locked()
129 list_del(&rp->c_lru); in nfsd_reply_cache_free_locked()
130 atomic_dec(&nn->num_drc_entries); in nfsd_reply_cache_free_locked()
140 spin_lock(&b->cache_lock); in nfsd_reply_cache_free()
142 spin_unlock(&b->cache_lock); in nfsd_reply_cache_free()
149 return drc_slab ? 0: -ENOMEM; in nfsd_drc_slab_create()
159 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); in nfsd_reply_cache_stats_init()
164 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); in nfsd_reply_cache_stats_destroy()
173 nn->max_drc_entries = nfsd_cache_size_limit(); in nfsd_reply_cache_init()
174 atomic_set(&nn->num_drc_entries, 0); in nfsd_reply_cache_init()
175 hashsize = nfsd_hashsize(nn->max_drc_entries); in nfsd_reply_cache_init()
176 nn->maskbits = ilog2(hashsize); in nfsd_reply_cache_init()
182 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan; in nfsd_reply_cache_init()
183 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count; in nfsd_reply_cache_init()
184 nn->nfsd_reply_cache_shrinker.seeks = 1; in nfsd_reply_cache_init()
185 status = register_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
189 nn->drc_hashtbl = kvzalloc(array_size(hashsize, in nfsd_reply_cache_init()
190 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); in nfsd_reply_cache_init()
191 if (!nn->drc_hashtbl) in nfsd_reply_cache_init()
195 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); in nfsd_reply_cache_init()
196 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); in nfsd_reply_cache_init()
198 nn->drc_hashsize = hashsize; in nfsd_reply_cache_init()
202 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
207 return -ENOMEM; in nfsd_reply_cache_init()
216 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_shutdown()
218 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_shutdown()
219 struct list_head *head = &nn->drc_hashtbl[i].lru_head; in nfsd_reply_cache_shutdown()
222 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], in nfsd_reply_cache_shutdown()
227 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_shutdown()
228 nn->drc_hashtbl = NULL; in nfsd_reply_cache_shutdown()
229 nn->drc_hashsize = 0; in nfsd_reply_cache_shutdown()
240 rp->c_timestamp = jiffies; in lru_put_end()
241 list_move_tail(&rp->c_lru, &b->lru_head); in lru_put_end()
250 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { in prune_bucket()
253 * in-progress, but do keep scanning the list. in prune_bucket()
255 if (rp->c_state == RC_INPROG) in prune_bucket()
257 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && in prune_bucket()
258 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) in prune_bucket()
276 for (i = 0; i < nn->drc_hashsize; i++) { in prune_cache_entries()
277 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; in prune_cache_entries()
279 if (list_empty(&b->lru_head)) in prune_cache_entries()
281 spin_lock(&b->cache_lock); in prune_cache_entries()
283 spin_unlock(&b->cache_lock); in prune_cache_entries()
294 return atomic_read(&nn->num_drc_entries); in nfsd_reply_cache_count()
314 struct xdr_buf *buf = &rqstp->rq_arg; in nfsd_cache_csum()
315 const unsigned char *p = buf->head[0].iov_base; in nfsd_cache_csum()
316 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum()
318 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum()
322 csum_len -= len; in nfsd_cache_csum()
325 idx = buf->page_base / PAGE_SIZE; in nfsd_cache_csum()
326 base = buf->page_base & ~PAGE_MASK; in nfsd_cache_csum()
328 p = page_address(buf->pages[idx]) + base; in nfsd_cache_csum()
329 len = min_t(size_t, PAGE_SIZE - base, csum_len); in nfsd_cache_csum()
331 csum_len -= len; in nfsd_cache_csum()
342 if (key->c_key.k_xid == rp->c_key.k_xid && in nfsd_cache_key_cmp()
343 key->c_key.k_csum != rp->c_key.k_csum) { in nfsd_cache_key_cmp()
348 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); in nfsd_cache_key_cmp()
361 struct rb_node **p = &b->rb_head.rb_node, in nfsd_cache_insert()
373 p = &parent->rb_left; in nfsd_cache_insert()
375 p = &parent->rb_right; in nfsd_cache_insert()
381 rb_link_node(&key->c_node, parent, p); in nfsd_cache_insert()
382 rb_insert_color(&key->c_node, &b->rb_head); in nfsd_cache_insert()
385 if (entries > nn->longest_chain) { in nfsd_cache_insert()
386 nn->longest_chain = entries; in nfsd_cache_insert()
387 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); in nfsd_cache_insert()
388 } else if (entries == nn->longest_chain) { in nfsd_cache_insert()
390 nn->longest_chain_cachesize = min_t(unsigned int, in nfsd_cache_insert()
391 nn->longest_chain_cachesize, in nfsd_cache_insert()
392 atomic_read(&nn->num_drc_entries)); in nfsd_cache_insert()
400 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
418 __be32 xid = rqstp->rq_xid; in nfsd_cache_lookup()
421 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash]; in nfsd_cache_lookup()
422 int type = rqstp->rq_cachetype; in nfsd_cache_lookup()
425 rqstp->rq_cacherep = NULL; in nfsd_cache_lookup()
441 spin_lock(&b->cache_lock); in nfsd_cache_lookup()
450 rqstp->rq_cacherep = rp; in nfsd_cache_lookup()
451 rp->c_state = RC_INPROG; in nfsd_cache_lookup()
453 atomic_inc(&nn->num_drc_entries); in nfsd_cache_lookup()
460 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
470 if (rp->c_state == RC_INPROG) in nfsd_cache_lookup()
473 /* From the hall of fame of impractical attacks: in nfsd_cache_lookup()
476 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
480 switch (rp->c_type) { in nfsd_cache_lookup()
484 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); in nfsd_cache_lookup()
488 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) in nfsd_cache_lookup()
493 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); in nfsd_cache_lookup()
502 * nfsd_cache_update - Update an entry in the duplicate reply cache.
508 * executed and the complete reply is in rqstp->rq_res.
511 * the toplevel loop requires max-sized buffers, which would be a waste
524 struct svc_cacherep *rp = rqstp->rq_cacherep; in nfsd_cache_update()
525 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; in nfsd_cache_update()
534 hash = nfsd_cache_hash(rp->c_key.k_xid, nn); in nfsd_cache_update()
535 b = &nn->drc_hashtbl[hash]; in nfsd_cache_update()
537 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
546 switch (cachetype) { in nfsd_cache_update()
550 rp->c_replstat = *statp; in nfsd_cache_update()
553 cachv = &rp->c_replvec; in nfsd_cache_update()
555 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); in nfsd_cache_update()
556 if (!cachv->iov_base) { in nfsd_cache_update()
560 cachv->iov_len = bufsize; in nfsd_cache_update()
561 memcpy(cachv->iov_base, statp, bufsize); in nfsd_cache_update()
567 spin_lock(&b->cache_lock); in nfsd_cache_update()
570 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
571 rp->c_type = cachetype; in nfsd_cache_update()
572 rp->c_state = RC_DONE; in nfsd_cache_update()
573 spin_unlock(&b->cache_lock); in nfsd_cache_update()
585 struct kvec *vec = &rqstp->rq_res.head[0]; in nfsd_cache_append()
587 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append()
589 data->iov_len); in nfsd_cache_append()
592 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append()
593 vec->iov_len += data->iov_len; in nfsd_cache_append()
604 struct nfsd_net *nn = m->private; in nfsd_reply_cache_stats_show()
606 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); in nfsd_reply_cache_stats_show()
608 atomic_read(&nn->num_drc_entries)); in nfsd_reply_cache_stats_show()
609 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); in nfsd_reply_cache_stats_show()
611 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); in nfsd_reply_cache_stats_show()
619 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); in nfsd_reply_cache_stats_show()
620 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); in nfsd_reply_cache_stats_show()
621 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); in nfsd_reply_cache_stats_show()
627 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info, in nfsd_reply_cache_stats_open()