1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
20
21 #include "nfsd.h"
22 #include "cache.h"
23 #include "trace.h"
24
25 /*
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
29 */
30 #define TARGET_BUCKET_SIZE 64
31
32 struct nfsd_drc_bucket {
33 struct rb_root rb_head;
34 struct list_head lru_head;
35 spinlock_t cache_lock;
36 };
37
38 static struct kmem_cache *drc_slab;
39
40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
45
46 /*
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
49 *
50 * 64MB: 8192
51 * 128MB: 11585
52 * 256MB: 16384
53 * 512MB: 23170
54 * 1GB: 32768
55 * 2GB: 46340
56 * 4GB: 65536
57 * 8GB: 92681
58 * 16GB: 131072
59 *
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
62 * used in k.
63 *
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
66 */
67 static unsigned int
nfsd_cache_size_limit(void)68 nfsd_cache_size_limit(void)
69 {
70 unsigned int limit;
71 unsigned long low_pages = totalram_pages() - totalhigh_pages();
72
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
75 }
76
77 /*
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
80 */
81 static unsigned int
nfsd_hashsize(unsigned int limit)82 nfsd_hashsize(unsigned int limit)
83 {
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85 }
86
87 static struct nfsd_cacherep *
nfsd_cacherep_alloc(struct svc_rqst * rqstp,__wsum csum,struct nfsd_net * nn)88 nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
89 struct nfsd_net *nn)
90 {
91 struct nfsd_cacherep *rp;
92
93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
94 if (rp) {
95 rp->c_state = RC_UNUSED;
96 rp->c_type = RC_NOCACHE;
97 RB_CLEAR_NODE(&rp->c_node);
98 INIT_LIST_HEAD(&rp->c_lru);
99
100 memset(&rp->c_key, 0, sizeof(rp->c_key));
101 rp->c_key.k_xid = rqstp->rq_xid;
102 rp->c_key.k_proc = rqstp->rq_proc;
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105 rp->c_key.k_prot = rqstp->rq_prot;
106 rp->c_key.k_vers = rqstp->rq_vers;
107 rp->c_key.k_len = rqstp->rq_arg.len;
108 rp->c_key.k_csum = csum;
109 }
110 return rp;
111 }
112
nfsd_cacherep_free(struct nfsd_cacherep * rp)113 static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
114 {
115 if (rp->c_type == RC_REPLBUFF)
116 kfree(rp->c_replvec.iov_base);
117 kmem_cache_free(drc_slab, rp);
118 }
119
120 static unsigned long
nfsd_cacherep_dispose(struct list_head * dispose)121 nfsd_cacherep_dispose(struct list_head *dispose)
122 {
123 struct nfsd_cacherep *rp;
124 unsigned long freed = 0;
125
126 while (!list_empty(dispose)) {
127 rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
128 list_del(&rp->c_lru);
129 nfsd_cacherep_free(rp);
130 freed++;
131 }
132 return freed;
133 }
134
135 static void
nfsd_cacherep_unlink_locked(struct nfsd_net * nn,struct nfsd_drc_bucket * b,struct nfsd_cacherep * rp)136 nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
137 struct nfsd_cacherep *rp)
138 {
139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
140 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
141 if (rp->c_state != RC_UNUSED) {
142 rb_erase(&rp->c_node, &b->rb_head);
143 list_del(&rp->c_lru);
144 atomic_dec(&nn->num_drc_entries);
145 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
146 }
147 }
148
149 static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket * b,struct nfsd_cacherep * rp,struct nfsd_net * nn)150 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
151 struct nfsd_net *nn)
152 {
153 nfsd_cacherep_unlink_locked(nn, b, rp);
154 nfsd_cacherep_free(rp);
155 }
156
157 static void
nfsd_reply_cache_free(struct nfsd_drc_bucket * b,struct nfsd_cacherep * rp,struct nfsd_net * nn)158 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
159 struct nfsd_net *nn)
160 {
161 spin_lock(&b->cache_lock);
162 nfsd_cacherep_unlink_locked(nn, b, rp);
163 spin_unlock(&b->cache_lock);
164 nfsd_cacherep_free(rp);
165 }
166
nfsd_drc_slab_create(void)167 int nfsd_drc_slab_create(void)
168 {
169 drc_slab = kmem_cache_create("nfsd_drc",
170 sizeof(struct nfsd_cacherep), 0, 0, NULL);
171 return drc_slab ? 0: -ENOMEM;
172 }
173
nfsd_drc_slab_free(void)174 void nfsd_drc_slab_free(void)
175 {
176 kmem_cache_destroy(drc_slab);
177 }
178
179 /**
180 * nfsd_net_reply_cache_init - per net namespace reply cache set-up
181 * @nn: nfsd_net being initialized
182 *
183 * Returns zero on succes; otherwise a negative errno is returned.
184 */
nfsd_net_reply_cache_init(struct nfsd_net * nn)185 int nfsd_net_reply_cache_init(struct nfsd_net *nn)
186 {
187 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
188 }
189
190 /**
191 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
192 * @nn: nfsd_net being freed
193 *
194 */
nfsd_net_reply_cache_destroy(struct nfsd_net * nn)195 void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
196 {
197 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
198 }
199
nfsd_reply_cache_init(struct nfsd_net * nn)200 int nfsd_reply_cache_init(struct nfsd_net *nn)
201 {
202 unsigned int hashsize;
203 unsigned int i;
204 int status = 0;
205
206 nn->max_drc_entries = nfsd_cache_size_limit();
207 atomic_set(&nn->num_drc_entries, 0);
208 hashsize = nfsd_hashsize(nn->max_drc_entries);
209 nn->maskbits = ilog2(hashsize);
210
211 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
212 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
213 nn->nfsd_reply_cache_shrinker.seeks = 1;
214 status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
215 "nfsd-reply:%s", nn->nfsd_name);
216 if (status)
217 return status;
218
219 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
220 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
221 if (!nn->drc_hashtbl)
222 goto out_shrinker;
223
224 for (i = 0; i < hashsize; i++) {
225 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
226 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
227 }
228 nn->drc_hashsize = hashsize;
229
230 return 0;
231 out_shrinker:
232 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
233 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
234 return -ENOMEM;
235 }
236
nfsd_reply_cache_shutdown(struct nfsd_net * nn)237 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
238 {
239 struct nfsd_cacherep *rp;
240 unsigned int i;
241
242 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
243
244 for (i = 0; i < nn->drc_hashsize; i++) {
245 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
246 while (!list_empty(head)) {
247 rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
248 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
249 rp, nn);
250 }
251 }
252
253 kvfree(nn->drc_hashtbl);
254 nn->drc_hashtbl = NULL;
255 nn->drc_hashsize = 0;
256
257 }
258
259 /*
260 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
261 * not already scheduled.
262 */
263 static void
lru_put_end(struct nfsd_drc_bucket * b,struct nfsd_cacherep * rp)264 lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
265 {
266 rp->c_timestamp = jiffies;
267 list_move_tail(&rp->c_lru, &b->lru_head);
268 }
269
270 static noinline struct nfsd_drc_bucket *
nfsd_cache_bucket_find(__be32 xid,struct nfsd_net * nn)271 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
272 {
273 unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
274
275 return &nn->drc_hashtbl[hash];
276 }
277
278 /*
279 * Remove and return no more than @max expired entries in bucket @b.
280 * If @max is zero, do not limit the number of removed entries.
281 */
282 static void
nfsd_prune_bucket_locked(struct nfsd_net * nn,struct nfsd_drc_bucket * b,unsigned int max,struct list_head * dispose)283 nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
284 unsigned int max, struct list_head *dispose)
285 {
286 unsigned long expiry = jiffies - RC_EXPIRE;
287 struct nfsd_cacherep *rp, *tmp;
288 unsigned int freed = 0;
289
290 lockdep_assert_held(&b->cache_lock);
291
292 /* The bucket LRU is ordered oldest-first. */
293 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
294 /*
295 * Don't free entries attached to calls that are still
296 * in-progress, but do keep scanning the list.
297 */
298 if (rp->c_state == RC_INPROG)
299 continue;
300
301 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
302 time_before(expiry, rp->c_timestamp))
303 break;
304
305 nfsd_cacherep_unlink_locked(nn, b, rp);
306 list_add(&rp->c_lru, dispose);
307
308 if (max && ++freed > max)
309 break;
310 }
311 }
312
313 /**
314 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
315 * @shrink: our registered shrinker context
316 * @sc: garbage collection parameters
317 *
318 * Returns the total number of entries in the duplicate reply cache. To
319 * keep things simple and quick, this is not the number of expired entries
320 * in the cache (ie, the number that would be removed by a call to
321 * nfsd_reply_cache_scan).
322 */
323 static unsigned long
nfsd_reply_cache_count(struct shrinker * shrink,struct shrink_control * sc)324 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
325 {
326 struct nfsd_net *nn = container_of(shrink,
327 struct nfsd_net, nfsd_reply_cache_shrinker);
328
329 return atomic_read(&nn->num_drc_entries);
330 }
331
332 /**
333 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
334 * @shrink: our registered shrinker context
335 * @sc: garbage collection parameters
336 *
337 * Free expired entries on each bucket's LRU list until we've released
338 * nr_to_scan freed objects. Nothing will be released if the cache
339 * has not exceeded it's max_drc_entries limit.
340 *
341 * Returns the number of entries released by this call.
342 */
343 static unsigned long
nfsd_reply_cache_scan(struct shrinker * shrink,struct shrink_control * sc)344 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
345 {
346 struct nfsd_net *nn = container_of(shrink,
347 struct nfsd_net, nfsd_reply_cache_shrinker);
348 unsigned long freed = 0;
349 LIST_HEAD(dispose);
350 unsigned int i;
351
352 for (i = 0; i < nn->drc_hashsize; i++) {
353 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
354
355 if (list_empty(&b->lru_head))
356 continue;
357
358 spin_lock(&b->cache_lock);
359 nfsd_prune_bucket_locked(nn, b, 0, &dispose);
360 spin_unlock(&b->cache_lock);
361
362 freed += nfsd_cacherep_dispose(&dispose);
363 if (freed > sc->nr_to_scan)
364 break;
365 }
366
367 trace_nfsd_drc_gc(nn, freed);
368 return freed;
369 }
370
371 /*
372 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
373 */
374 static __wsum
nfsd_cache_csum(struct svc_rqst * rqstp)375 nfsd_cache_csum(struct svc_rqst *rqstp)
376 {
377 int idx;
378 unsigned int base;
379 __wsum csum;
380 struct xdr_buf *buf = &rqstp->rq_arg;
381 const unsigned char *p = buf->head[0].iov_base;
382 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
383 RC_CSUMLEN);
384 size_t len = min(buf->head[0].iov_len, csum_len);
385
386 /* rq_arg.head first */
387 csum = csum_partial(p, len, 0);
388 csum_len -= len;
389
390 /* Continue into page array */
391 idx = buf->page_base / PAGE_SIZE;
392 base = buf->page_base & ~PAGE_MASK;
393 while (csum_len) {
394 p = page_address(buf->pages[idx]) + base;
395 len = min_t(size_t, PAGE_SIZE - base, csum_len);
396 csum = csum_partial(p, len, csum);
397 csum_len -= len;
398 base = 0;
399 ++idx;
400 }
401 return csum;
402 }
403
404 static int
nfsd_cache_key_cmp(const struct nfsd_cacherep * key,const struct nfsd_cacherep * rp,struct nfsd_net * nn)405 nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
406 const struct nfsd_cacherep *rp, struct nfsd_net *nn)
407 {
408 if (key->c_key.k_xid == rp->c_key.k_xid &&
409 key->c_key.k_csum != rp->c_key.k_csum) {
410 nfsd_stats_payload_misses_inc(nn);
411 trace_nfsd_drc_mismatch(nn, key, rp);
412 }
413
414 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
415 }
416
417 /*
418 * Search the request hash for an entry that matches the given rqstp.
419 * Must be called with cache_lock held. Returns the found entry or
420 * inserts an empty key on failure.
421 */
422 static struct nfsd_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket * b,struct nfsd_cacherep * key,struct nfsd_net * nn)423 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
424 struct nfsd_net *nn)
425 {
426 struct nfsd_cacherep *rp, *ret = key;
427 struct rb_node **p = &b->rb_head.rb_node,
428 *parent = NULL;
429 unsigned int entries = 0;
430 int cmp;
431
432 while (*p != NULL) {
433 ++entries;
434 parent = *p;
435 rp = rb_entry(parent, struct nfsd_cacherep, c_node);
436
437 cmp = nfsd_cache_key_cmp(key, rp, nn);
438 if (cmp < 0)
439 p = &parent->rb_left;
440 else if (cmp > 0)
441 p = &parent->rb_right;
442 else {
443 ret = rp;
444 goto out;
445 }
446 }
447 rb_link_node(&key->c_node, parent, p);
448 rb_insert_color(&key->c_node, &b->rb_head);
449 out:
450 /* tally hash chain length stats */
451 if (entries > nn->longest_chain) {
452 nn->longest_chain = entries;
453 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
454 } else if (entries == nn->longest_chain) {
455 /* prefer to keep the smallest cachesize possible here */
456 nn->longest_chain_cachesize = min_t(unsigned int,
457 nn->longest_chain_cachesize,
458 atomic_read(&nn->num_drc_entries));
459 }
460
461 lru_put_end(b, ret);
462 return ret;
463 }
464
465 /**
466 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
467 * @rqstp: Incoming Call to find
468 * @cacherep: OUT: DRC entry for this request
469 *
470 * Try to find an entry matching the current call in the cache. When none
471 * is found, we try to grab the oldest expired entry off the LRU list. If
472 * a suitable one isn't there, then drop the cache_lock and allocate a
473 * new one, then search again in case one got inserted while this thread
474 * didn't hold the lock.
475 *
476 * Return values:
477 * %RC_DOIT: Process the request normally
478 * %RC_REPLY: Reply from cache
479 * %RC_DROPIT: Do not process the request further
480 */
nfsd_cache_lookup(struct svc_rqst * rqstp,struct nfsd_cacherep ** cacherep)481 int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
482 {
483 struct nfsd_net *nn;
484 struct nfsd_cacherep *rp, *found;
485 __wsum csum;
486 struct nfsd_drc_bucket *b;
487 int type = rqstp->rq_cachetype;
488 unsigned long freed;
489 LIST_HEAD(dispose);
490 int rtn = RC_DOIT;
491
492 if (type == RC_NOCACHE) {
493 nfsd_stats_rc_nocache_inc();
494 goto out;
495 }
496
497 csum = nfsd_cache_csum(rqstp);
498
499 /*
500 * Since the common case is a cache miss followed by an insert,
501 * preallocate an entry.
502 */
503 nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
504 rp = nfsd_cacherep_alloc(rqstp, csum, nn);
505 if (!rp)
506 goto out;
507
508 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
509 spin_lock(&b->cache_lock);
510 found = nfsd_cache_insert(b, rp, nn);
511 if (found != rp)
512 goto found_entry;
513 *cacherep = rp;
514 rp->c_state = RC_INPROG;
515 nfsd_prune_bucket_locked(nn, b, 3, &dispose);
516 spin_unlock(&b->cache_lock);
517
518 freed = nfsd_cacherep_dispose(&dispose);
519 trace_nfsd_drc_gc(nn, freed);
520
521 nfsd_stats_rc_misses_inc();
522 atomic_inc(&nn->num_drc_entries);
523 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
524 goto out;
525
526 found_entry:
527 /* We found a matching entry which is either in progress or done. */
528 nfsd_reply_cache_free_locked(NULL, rp, nn);
529 nfsd_stats_rc_hits_inc();
530 rtn = RC_DROPIT;
531 rp = found;
532
533 /* Request being processed */
534 if (rp->c_state == RC_INPROG)
535 goto out_trace;
536
537 /* From the hall of fame of impractical attacks:
538 * Is this a user who tries to snoop on the cache? */
539 rtn = RC_DOIT;
540 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
541 goto out_trace;
542
543 /* Compose RPC reply header */
544 switch (rp->c_type) {
545 case RC_NOCACHE:
546 break;
547 case RC_REPLSTAT:
548 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
549 rtn = RC_REPLY;
550 break;
551 case RC_REPLBUFF:
552 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
553 goto out_unlock; /* should not happen */
554 rtn = RC_REPLY;
555 break;
556 default:
557 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
558 }
559
560 out_trace:
561 trace_nfsd_drc_found(nn, rqstp, rtn);
562 out_unlock:
563 spin_unlock(&b->cache_lock);
564 out:
565 return rtn;
566 }
567
568 /**
569 * nfsd_cache_update - Update an entry in the duplicate reply cache.
570 * @rqstp: svc_rqst with a finished Reply
571 * @rp: IN: DRC entry for this request
572 * @cachetype: which cache to update
573 * @statp: pointer to Reply's NFS status code, or NULL
574 *
575 * This is called from nfsd_dispatch when the procedure has been
576 * executed and the complete reply is in rqstp->rq_res.
577 *
578 * We're copying around data here rather than swapping buffers because
579 * the toplevel loop requires max-sized buffers, which would be a waste
580 * of memory for a cache with a max reply size of 100 bytes (diropokres).
581 *
582 * If we should start to use different types of cache entries tailored
583 * specifically for attrstat and fh's, we may save even more space.
584 *
585 * Also note that a cachetype of RC_NOCACHE can legally be passed when
586 * nfsd failed to encode a reply that otherwise would have been cached.
587 * In this case, nfsd_cache_update is called with statp == NULL.
588 */
nfsd_cache_update(struct svc_rqst * rqstp,struct nfsd_cacherep * rp,int cachetype,__be32 * statp)589 void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
590 int cachetype, __be32 *statp)
591 {
592 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
593 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
594 struct nfsd_drc_bucket *b;
595 int len;
596 size_t bufsize = 0;
597
598 if (!rp)
599 return;
600
601 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
602
603 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
604 len >>= 2;
605
606 /* Don't cache excessive amounts of data and XDR failures */
607 if (!statp || len > (256 >> 2)) {
608 nfsd_reply_cache_free(b, rp, nn);
609 return;
610 }
611
612 switch (cachetype) {
613 case RC_REPLSTAT:
614 if (len != 1)
615 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
616 rp->c_replstat = *statp;
617 break;
618 case RC_REPLBUFF:
619 cachv = &rp->c_replvec;
620 bufsize = len << 2;
621 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
622 if (!cachv->iov_base) {
623 nfsd_reply_cache_free(b, rp, nn);
624 return;
625 }
626 cachv->iov_len = bufsize;
627 memcpy(cachv->iov_base, statp, bufsize);
628 break;
629 case RC_NOCACHE:
630 nfsd_reply_cache_free(b, rp, nn);
631 return;
632 }
633 spin_lock(&b->cache_lock);
634 nfsd_stats_drc_mem_usage_add(nn, bufsize);
635 lru_put_end(b, rp);
636 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
637 rp->c_type = cachetype;
638 rp->c_state = RC_DONE;
639 spin_unlock(&b->cache_lock);
640 return;
641 }
642
643 /*
644 * Copy cached reply to current reply buffer. Should always fit.
645 * FIXME as reply is in a page, we should just attach the page, and
646 * keep a refcount....
647 */
648 static int
nfsd_cache_append(struct svc_rqst * rqstp,struct kvec * data)649 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
650 {
651 struct kvec *vec = &rqstp->rq_res.head[0];
652
653 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
654 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
655 data->iov_len);
656 return 0;
657 }
658 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
659 vec->iov_len += data->iov_len;
660 return 1;
661 }
662
663 /*
664 * Note that fields may be added, removed or reordered in the future. Programs
665 * scraping this file for info should test the labels to ensure they're
666 * getting the correct field.
667 */
nfsd_reply_cache_stats_show(struct seq_file * m,void * v)668 int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
669 {
670 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
671 nfsd_net_id);
672
673 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
674 seq_printf(m, "num entries: %u\n",
675 atomic_read(&nn->num_drc_entries));
676 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
677 seq_printf(m, "mem usage: %lld\n",
678 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
679 seq_printf(m, "cache hits: %lld\n",
680 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
681 seq_printf(m, "cache misses: %lld\n",
682 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
683 seq_printf(m, "not cached: %lld\n",
684 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
685 seq_printf(m, "payload misses: %lld\n",
686 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
687 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
688 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
689 return 0;
690 }
691