1 /* netfs cookie management
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/netfs-api.txt for more information on
12 * the netfs API.
13 */
14
15 #define FSCACHE_DEBUG_LEVEL COOKIE
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 struct kmem_cache *fscache_cookie_jar;
21
22 static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
23
24 #define fscache_cookie_hash_shift 15
25 static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
26
27 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
28 loff_t object_size);
29 static int fscache_alloc_object(struct fscache_cache *cache,
30 struct fscache_cookie *cookie);
31 static int fscache_attach_object(struct fscache_cookie *cookie,
32 struct fscache_object *object);
33
fscache_print_cookie(struct fscache_cookie * cookie,char prefix)34 static void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
35 {
36 struct hlist_node *object;
37 const u8 *k;
38 unsigned loop;
39
40 pr_err("%c-cookie c=%p [p=%p fl=%lx nc=%u na=%u]\n",
41 prefix, cookie, cookie->parent, cookie->flags,
42 atomic_read(&cookie->n_children),
43 atomic_read(&cookie->n_active));
44 pr_err("%c-cookie d=%p n=%p\n",
45 prefix, cookie->def, cookie->netfs_data);
46
47 object = READ_ONCE(cookie->backing_objects.first);
48 if (object)
49 pr_err("%c-cookie o=%p\n",
50 prefix, hlist_entry(object, struct fscache_object, cookie_link));
51
52 pr_err("%c-key=[%u] '", prefix, cookie->key_len);
53 k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
54 cookie->inline_key : cookie->key;
55 for (loop = 0; loop < cookie->key_len; loop++)
56 pr_cont("%02x", k[loop]);
57 pr_cont("'\n");
58 }
59
fscache_free_cookie(struct fscache_cookie * cookie)60 void fscache_free_cookie(struct fscache_cookie *cookie)
61 {
62 if (cookie) {
63 BUG_ON(!hlist_empty(&cookie->backing_objects));
64 if (cookie->aux_len > sizeof(cookie->inline_aux))
65 kfree(cookie->aux);
66 if (cookie->key_len > sizeof(cookie->inline_key))
67 kfree(cookie->key);
68 kmem_cache_free(fscache_cookie_jar, cookie);
69 }
70 }
71
72 /*
73 * Set the index key in a cookie. The cookie struct has space for a 16-byte
74 * key plus length and hash, but if that's not big enough, it's instead a
75 * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
76 * the key data.
77 */
fscache_set_key(struct fscache_cookie * cookie,const void * index_key,size_t index_key_len)78 static int fscache_set_key(struct fscache_cookie *cookie,
79 const void *index_key, size_t index_key_len)
80 {
81 unsigned long long h;
82 u32 *buf;
83 int bufs;
84 int i;
85
86 bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
87
88 if (index_key_len > sizeof(cookie->inline_key)) {
89 buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
90 if (!buf)
91 return -ENOMEM;
92 cookie->key = buf;
93 } else {
94 buf = (u32 *)cookie->inline_key;
95 }
96
97 memcpy(buf, index_key, index_key_len);
98
99 /* Calculate a hash and combine this with the length in the first word
100 * or first half word
101 */
102 h = (unsigned long)cookie->parent;
103 h += index_key_len + cookie->type;
104
105 for (i = 0; i < bufs; i++)
106 h += buf[i];
107
108 cookie->key_hash = h ^ (h >> 32);
109 return 0;
110 }
111
fscache_compare_cookie(const struct fscache_cookie * a,const struct fscache_cookie * b)112 static long fscache_compare_cookie(const struct fscache_cookie *a,
113 const struct fscache_cookie *b)
114 {
115 const void *ka, *kb;
116
117 if (a->key_hash != b->key_hash)
118 return (long)a->key_hash - (long)b->key_hash;
119 if (a->parent != b->parent)
120 return (long)a->parent - (long)b->parent;
121 if (a->key_len != b->key_len)
122 return (long)a->key_len - (long)b->key_len;
123 if (a->type != b->type)
124 return (long)a->type - (long)b->type;
125
126 if (a->key_len <= sizeof(a->inline_key)) {
127 ka = &a->inline_key;
128 kb = &b->inline_key;
129 } else {
130 ka = a->key;
131 kb = b->key;
132 }
133 return memcmp(ka, kb, a->key_len);
134 }
135
136 /*
137 * Allocate a cookie.
138 */
fscache_alloc_cookie(struct fscache_cookie * parent,const struct fscache_cookie_def * def,const void * index_key,size_t index_key_len,const void * aux_data,size_t aux_data_len,void * netfs_data,loff_t object_size)139 struct fscache_cookie *fscache_alloc_cookie(
140 struct fscache_cookie *parent,
141 const struct fscache_cookie_def *def,
142 const void *index_key, size_t index_key_len,
143 const void *aux_data, size_t aux_data_len,
144 void *netfs_data,
145 loff_t object_size)
146 {
147 struct fscache_cookie *cookie;
148
149 /* allocate and initialise a cookie */
150 cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
151 if (!cookie)
152 return NULL;
153
154 cookie->key_len = index_key_len;
155 cookie->aux_len = aux_data_len;
156
157 if (fscache_set_key(cookie, index_key, index_key_len) < 0)
158 goto nomem;
159
160 if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
161 memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
162 } else {
163 cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
164 if (!cookie->aux)
165 goto nomem;
166 }
167
168 atomic_set(&cookie->usage, 1);
169 atomic_set(&cookie->n_children, 0);
170
171 /* We keep the active count elevated until relinquishment to prevent an
172 * attempt to wake up every time the object operations queue quiesces.
173 */
174 atomic_set(&cookie->n_active, 1);
175
176 cookie->def = def;
177 cookie->parent = parent;
178 cookie->netfs_data = netfs_data;
179 cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
180 cookie->type = def->type;
181 spin_lock_init(&cookie->lock);
182 spin_lock_init(&cookie->stores_lock);
183 INIT_HLIST_HEAD(&cookie->backing_objects);
184
185 /* radix tree insertion won't use the preallocation pool unless it's
186 * told it may not wait */
187 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
188 return cookie;
189
190 nomem:
191 fscache_free_cookie(cookie);
192 return NULL;
193 }
194
195 /*
196 * Attempt to insert the new cookie into the hash. If there's a collision, we
197 * return the old cookie if it's not in use and an error otherwise.
198 */
fscache_hash_cookie(struct fscache_cookie * candidate)199 struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate)
200 {
201 struct fscache_cookie *cursor;
202 struct hlist_bl_head *h;
203 struct hlist_bl_node *p;
204 unsigned int bucket;
205
206 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
207 h = &fscache_cookie_hash[bucket];
208
209 hlist_bl_lock(h);
210 hlist_bl_for_each_entry(cursor, p, h, hash_link) {
211 if (fscache_compare_cookie(candidate, cursor) == 0)
212 goto collision;
213 }
214
215 __set_bit(FSCACHE_COOKIE_ACQUIRED, &candidate->flags);
216 fscache_cookie_get(candidate->parent, fscache_cookie_get_acquire_parent);
217 atomic_inc(&candidate->parent->n_children);
218 hlist_bl_add_head(&candidate->hash_link, h);
219 hlist_bl_unlock(h);
220 return candidate;
221
222 collision:
223 if (test_and_set_bit(FSCACHE_COOKIE_ACQUIRED, &cursor->flags)) {
224 trace_fscache_cookie(cursor, fscache_cookie_collision,
225 atomic_read(&cursor->usage));
226 pr_err("Duplicate cookie detected\n");
227 fscache_print_cookie(cursor, 'O');
228 fscache_print_cookie(candidate, 'N');
229 hlist_bl_unlock(h);
230 return NULL;
231 }
232
233 fscache_cookie_get(cursor, fscache_cookie_get_reacquire);
234 hlist_bl_unlock(h);
235 return cursor;
236 }
237
238 /*
239 * request a cookie to represent an object (index, datafile, xattr, etc)
240 * - parent specifies the parent object
241 * - the top level index cookie for each netfs is stored in the fscache_netfs
242 * struct upon registration
243 * - def points to the definition
244 * - the netfs_data will be passed to the functions pointed to in *def
245 * - all attached caches will be searched to see if they contain this object
246 * - index objects aren't stored on disk until there's a dependent file that
247 * needs storing
248 * - other objects are stored in a selected cache immediately, and all the
249 * indices forming the path to it are instantiated if necessary
250 * - we never let on to the netfs about errors
251 * - we may set a negative cookie pointer, but that's okay
252 */
__fscache_acquire_cookie(struct fscache_cookie * parent,const struct fscache_cookie_def * def,const void * index_key,size_t index_key_len,const void * aux_data,size_t aux_data_len,void * netfs_data,loff_t object_size,bool enable)253 struct fscache_cookie *__fscache_acquire_cookie(
254 struct fscache_cookie *parent,
255 const struct fscache_cookie_def *def,
256 const void *index_key, size_t index_key_len,
257 const void *aux_data, size_t aux_data_len,
258 void *netfs_data,
259 loff_t object_size,
260 bool enable)
261 {
262 struct fscache_cookie *candidate, *cookie;
263
264 BUG_ON(!def);
265
266 _enter("{%s},{%s},%p,%u",
267 parent ? (char *) parent->def->name : "<no-parent>",
268 def->name, netfs_data, enable);
269
270 if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
271 return NULL;
272 if (!aux_data || !aux_data_len) {
273 aux_data = NULL;
274 aux_data_len = 0;
275 }
276
277 fscache_stat(&fscache_n_acquires);
278
279 /* if there's no parent cookie, then we don't create one here either */
280 if (!parent) {
281 fscache_stat(&fscache_n_acquires_null);
282 _leave(" [no parent]");
283 return NULL;
284 }
285
286 /* validate the definition */
287 BUG_ON(!def->name[0]);
288
289 BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
290 parent->type != FSCACHE_COOKIE_TYPE_INDEX);
291
292 candidate = fscache_alloc_cookie(parent, def,
293 index_key, index_key_len,
294 aux_data, aux_data_len,
295 netfs_data, object_size);
296 if (!candidate) {
297 fscache_stat(&fscache_n_acquires_oom);
298 _leave(" [ENOMEM]");
299 return NULL;
300 }
301
302 cookie = fscache_hash_cookie(candidate);
303 if (!cookie) {
304 trace_fscache_cookie(candidate, fscache_cookie_discard, 1);
305 goto out;
306 }
307
308 if (cookie == candidate)
309 candidate = NULL;
310
311 switch (cookie->type) {
312 case FSCACHE_COOKIE_TYPE_INDEX:
313 fscache_stat(&fscache_n_cookie_index);
314 break;
315 case FSCACHE_COOKIE_TYPE_DATAFILE:
316 fscache_stat(&fscache_n_cookie_data);
317 break;
318 default:
319 fscache_stat(&fscache_n_cookie_special);
320 break;
321 }
322
323 trace_fscache_acquire(cookie);
324
325 if (enable) {
326 /* if the object is an index then we need do nothing more here
327 * - we create indices on disk when we need them as an index
328 * may exist in multiple caches */
329 if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
330 if (fscache_acquire_non_index_cookie(cookie, object_size) == 0) {
331 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
332 } else {
333 atomic_dec(&parent->n_children);
334 fscache_cookie_put(cookie,
335 fscache_cookie_put_acquire_nobufs);
336 fscache_stat(&fscache_n_acquires_nobufs);
337 _leave(" = NULL");
338 return NULL;
339 }
340 } else {
341 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
342 }
343 }
344
345 fscache_stat(&fscache_n_acquires_ok);
346
347 out:
348 fscache_free_cookie(candidate);
349 return cookie;
350 }
351 EXPORT_SYMBOL(__fscache_acquire_cookie);
352
353 /*
354 * Enable a cookie to permit it to accept new operations.
355 */
__fscache_enable_cookie(struct fscache_cookie * cookie,const void * aux_data,loff_t object_size,bool (* can_enable)(void * data),void * data)356 void __fscache_enable_cookie(struct fscache_cookie *cookie,
357 const void *aux_data,
358 loff_t object_size,
359 bool (*can_enable)(void *data),
360 void *data)
361 {
362 _enter("%p", cookie);
363
364 trace_fscache_enable(cookie);
365
366 wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
367 TASK_UNINTERRUPTIBLE);
368
369 fscache_update_aux(cookie, aux_data);
370
371 if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
372 goto out_unlock;
373
374 if (can_enable && !can_enable(data)) {
375 /* The netfs decided it didn't want to enable after all */
376 } else if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
377 /* Wait for outstanding disablement to complete */
378 __fscache_wait_on_invalidate(cookie);
379
380 if (fscache_acquire_non_index_cookie(cookie, object_size) == 0)
381 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
382 } else {
383 set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
384 }
385
386 out_unlock:
387 clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
388 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
389 }
390 EXPORT_SYMBOL(__fscache_enable_cookie);
391
392 /*
393 * acquire a non-index cookie
394 * - this must make sure the index chain is instantiated and instantiate the
395 * object representation too
396 */
fscache_acquire_non_index_cookie(struct fscache_cookie * cookie,loff_t object_size)397 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
398 loff_t object_size)
399 {
400 struct fscache_object *object;
401 struct fscache_cache *cache;
402 int ret;
403
404 _enter("");
405
406 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
407
408 /* now we need to see whether the backing objects for this cookie yet
409 * exist, if not there'll be nothing to search */
410 down_read(&fscache_addremove_sem);
411
412 if (list_empty(&fscache_cache_list)) {
413 up_read(&fscache_addremove_sem);
414 _leave(" = 0 [no caches]");
415 return 0;
416 }
417
418 /* select a cache in which to store the object */
419 cache = fscache_select_cache_for_object(cookie->parent);
420 if (!cache) {
421 up_read(&fscache_addremove_sem);
422 fscache_stat(&fscache_n_acquires_no_cache);
423 _leave(" = -ENOMEDIUM [no cache]");
424 return -ENOMEDIUM;
425 }
426
427 _debug("cache %s", cache->tag->name);
428
429 set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
430
431 /* ask the cache to allocate objects for this cookie and its parent
432 * chain */
433 ret = fscache_alloc_object(cache, cookie);
434 if (ret < 0) {
435 up_read(&fscache_addremove_sem);
436 _leave(" = %d", ret);
437 return ret;
438 }
439
440 spin_lock(&cookie->lock);
441 if (hlist_empty(&cookie->backing_objects)) {
442 spin_unlock(&cookie->lock);
443 goto unavailable;
444 }
445
446 object = hlist_entry(cookie->backing_objects.first,
447 struct fscache_object, cookie_link);
448
449 fscache_set_store_limit(object, object_size);
450
451 /* initiate the process of looking up all the objects in the chain
452 * (done by fscache_initialise_object()) */
453 fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
454
455 spin_unlock(&cookie->lock);
456
457 /* we may be required to wait for lookup to complete at this point */
458 if (!fscache_defer_lookup) {
459 _debug("non-deferred lookup %p", &cookie->flags);
460 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
461 TASK_UNINTERRUPTIBLE);
462 _debug("complete");
463 if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
464 goto unavailable;
465 }
466
467 up_read(&fscache_addremove_sem);
468 _leave(" = 0 [deferred]");
469 return 0;
470
471 unavailable:
472 up_read(&fscache_addremove_sem);
473 _leave(" = -ENOBUFS");
474 return -ENOBUFS;
475 }
476
477 /*
478 * recursively allocate cache object records for a cookie/cache combination
479 * - caller must be holding the addremove sem
480 */
fscache_alloc_object(struct fscache_cache * cache,struct fscache_cookie * cookie)481 static int fscache_alloc_object(struct fscache_cache *cache,
482 struct fscache_cookie *cookie)
483 {
484 struct fscache_object *object;
485 int ret;
486
487 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
488
489 spin_lock(&cookie->lock);
490 hlist_for_each_entry(object, &cookie->backing_objects,
491 cookie_link) {
492 if (object->cache == cache)
493 goto object_already_extant;
494 }
495 spin_unlock(&cookie->lock);
496
497 /* ask the cache to allocate an object (we may end up with duplicate
498 * objects at this stage, but we sort that out later) */
499 fscache_stat(&fscache_n_cop_alloc_object);
500 object = cache->ops->alloc_object(cache, cookie);
501 fscache_stat_d(&fscache_n_cop_alloc_object);
502 if (IS_ERR(object)) {
503 fscache_stat(&fscache_n_object_no_alloc);
504 ret = PTR_ERR(object);
505 goto error;
506 }
507
508 ASSERTCMP(object->cookie, ==, cookie);
509 fscache_stat(&fscache_n_object_alloc);
510
511 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
512
513 _debug("ALLOC OBJ%x: %s {%lx}",
514 object->debug_id, cookie->def->name, object->events);
515
516 ret = fscache_alloc_object(cache, cookie->parent);
517 if (ret < 0)
518 goto error_put;
519
520 /* only attach if we managed to allocate all we needed, otherwise
521 * discard the object we just allocated and instead use the one
522 * attached to the cookie */
523 if (fscache_attach_object(cookie, object) < 0) {
524 fscache_stat(&fscache_n_cop_put_object);
525 cache->ops->put_object(object, fscache_obj_put_attach_fail);
526 fscache_stat_d(&fscache_n_cop_put_object);
527 }
528
529 _leave(" = 0");
530 return 0;
531
532 object_already_extant:
533 ret = -ENOBUFS;
534 if (fscache_object_is_dying(object) ||
535 fscache_cache_is_broken(object)) {
536 spin_unlock(&cookie->lock);
537 goto error;
538 }
539 spin_unlock(&cookie->lock);
540 _leave(" = 0 [found]");
541 return 0;
542
543 error_put:
544 fscache_stat(&fscache_n_cop_put_object);
545 cache->ops->put_object(object, fscache_obj_put_alloc_fail);
546 fscache_stat_d(&fscache_n_cop_put_object);
547 error:
548 _leave(" = %d", ret);
549 return ret;
550 }
551
552 /*
553 * attach a cache object to a cookie
554 */
fscache_attach_object(struct fscache_cookie * cookie,struct fscache_object * object)555 static int fscache_attach_object(struct fscache_cookie *cookie,
556 struct fscache_object *object)
557 {
558 struct fscache_object *p;
559 struct fscache_cache *cache = object->cache;
560 int ret;
561
562 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
563
564 ASSERTCMP(object->cookie, ==, cookie);
565
566 spin_lock(&cookie->lock);
567
568 /* there may be multiple initial creations of this object, but we only
569 * want one */
570 ret = -EEXIST;
571 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
572 if (p->cache == object->cache) {
573 if (fscache_object_is_dying(p))
574 ret = -ENOBUFS;
575 goto cant_attach_object;
576 }
577 }
578
579 /* pin the parent object */
580 spin_lock_nested(&cookie->parent->lock, 1);
581 hlist_for_each_entry(p, &cookie->parent->backing_objects,
582 cookie_link) {
583 if (p->cache == object->cache) {
584 if (fscache_object_is_dying(p)) {
585 ret = -ENOBUFS;
586 spin_unlock(&cookie->parent->lock);
587 goto cant_attach_object;
588 }
589 object->parent = p;
590 spin_lock(&p->lock);
591 p->n_children++;
592 spin_unlock(&p->lock);
593 break;
594 }
595 }
596 spin_unlock(&cookie->parent->lock);
597
598 /* attach to the cache's object list */
599 if (list_empty(&object->cache_link)) {
600 spin_lock(&cache->object_list_lock);
601 list_add(&object->cache_link, &cache->object_list);
602 spin_unlock(&cache->object_list_lock);
603 }
604
605 /* Attach to the cookie. The object already has a ref on it. */
606 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
607
608 fscache_objlist_add(object);
609 ret = 0;
610
611 cant_attach_object:
612 spin_unlock(&cookie->lock);
613 _leave(" = %d", ret);
614 return ret;
615 }
616
617 /*
618 * Invalidate an object. Callable with spinlocks held.
619 */
__fscache_invalidate(struct fscache_cookie * cookie)620 void __fscache_invalidate(struct fscache_cookie *cookie)
621 {
622 struct fscache_object *object;
623
624 _enter("{%s}", cookie->def->name);
625
626 fscache_stat(&fscache_n_invalidates);
627
628 /* Only permit invalidation of data files. Invalidating an index will
629 * require the caller to release all its attachments to the tree rooted
630 * there, and if it's doing that, it may as well just retire the
631 * cookie.
632 */
633 ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
634
635 /* If there's an object, we tell the object state machine to handle the
636 * invalidation on our behalf, otherwise there's nothing to do.
637 */
638 if (!hlist_empty(&cookie->backing_objects)) {
639 spin_lock(&cookie->lock);
640
641 if (fscache_cookie_enabled(cookie) &&
642 !hlist_empty(&cookie->backing_objects) &&
643 !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
644 &cookie->flags)) {
645 object = hlist_entry(cookie->backing_objects.first,
646 struct fscache_object,
647 cookie_link);
648 if (fscache_object_is_live(object))
649 fscache_raise_event(
650 object, FSCACHE_OBJECT_EV_INVALIDATE);
651 }
652
653 spin_unlock(&cookie->lock);
654 }
655
656 _leave("");
657 }
658 EXPORT_SYMBOL(__fscache_invalidate);
659
660 /*
661 * Wait for object invalidation to complete.
662 */
__fscache_wait_on_invalidate(struct fscache_cookie * cookie)663 void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
664 {
665 _enter("%p", cookie);
666
667 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
668 TASK_UNINTERRUPTIBLE);
669
670 _leave("");
671 }
672 EXPORT_SYMBOL(__fscache_wait_on_invalidate);
673
674 /*
675 * update the index entries backing a cookie
676 */
__fscache_update_cookie(struct fscache_cookie * cookie,const void * aux_data)677 void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
678 {
679 struct fscache_object *object;
680
681 fscache_stat(&fscache_n_updates);
682
683 if (!cookie) {
684 fscache_stat(&fscache_n_updates_null);
685 _leave(" [no cookie]");
686 return;
687 }
688
689 _enter("{%s}", cookie->def->name);
690
691 spin_lock(&cookie->lock);
692
693 fscache_update_aux(cookie, aux_data);
694
695 if (fscache_cookie_enabled(cookie)) {
696 /* update the index entry on disk in each cache backing this
697 * cookie.
698 */
699 hlist_for_each_entry(object,
700 &cookie->backing_objects, cookie_link) {
701 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
702 }
703 }
704
705 spin_unlock(&cookie->lock);
706 _leave("");
707 }
708 EXPORT_SYMBOL(__fscache_update_cookie);
709
710 /*
711 * Disable a cookie to stop it from accepting new requests from the netfs.
712 */
__fscache_disable_cookie(struct fscache_cookie * cookie,const void * aux_data,bool invalidate)713 void __fscache_disable_cookie(struct fscache_cookie *cookie,
714 const void *aux_data,
715 bool invalidate)
716 {
717 struct fscache_object *object;
718 bool awaken = false;
719
720 _enter("%p,%u", cookie, invalidate);
721
722 trace_fscache_disable(cookie);
723
724 ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
725
726 if (atomic_read(&cookie->n_children) != 0) {
727 pr_err("Cookie '%s' still has children\n",
728 cookie->def->name);
729 BUG();
730 }
731
732 wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
733 TASK_UNINTERRUPTIBLE);
734
735 fscache_update_aux(cookie, aux_data);
736
737 if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
738 goto out_unlock_enable;
739
740 /* If the cookie is being invalidated, wait for that to complete first
741 * so that we can reuse the flag.
742 */
743 __fscache_wait_on_invalidate(cookie);
744
745 /* Dispose of the backing objects */
746 set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
747
748 spin_lock(&cookie->lock);
749 if (!hlist_empty(&cookie->backing_objects)) {
750 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
751 if (invalidate)
752 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
753 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
754 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
755 }
756 } else {
757 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
758 awaken = true;
759 }
760 spin_unlock(&cookie->lock);
761 if (awaken)
762 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
763
764 /* Wait for cessation of activity requiring access to the netfs (when
765 * n_active reaches 0). This makes sure outstanding reads and writes
766 * have completed.
767 */
768 if (!atomic_dec_and_test(&cookie->n_active)) {
769 wait_var_event(&cookie->n_active,
770 !atomic_read(&cookie->n_active));
771 }
772
773 /* Make sure any pending writes are cancelled. */
774 if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX)
775 fscache_invalidate_writes(cookie);
776
777 /* Reset the cookie state if it wasn't relinquished */
778 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
779 atomic_inc(&cookie->n_active);
780 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
781 }
782
783 out_unlock_enable:
784 clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
785 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
786 _leave("");
787 }
788 EXPORT_SYMBOL(__fscache_disable_cookie);
789
790 /*
791 * release a cookie back to the cache
792 * - the object will be marked as recyclable on disk if retire is true
793 * - all dependents of this cookie must have already been unregistered
794 * (indices/files/pages)
795 */
__fscache_relinquish_cookie(struct fscache_cookie * cookie,const void * aux_data,bool retire)796 void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
797 const void *aux_data,
798 bool retire)
799 {
800 fscache_stat(&fscache_n_relinquishes);
801 if (retire)
802 fscache_stat(&fscache_n_relinquishes_retire);
803
804 if (!cookie) {
805 fscache_stat(&fscache_n_relinquishes_null);
806 _leave(" [no cookie]");
807 return;
808 }
809
810 _enter("%p{%s,%p,%d},%d",
811 cookie, cookie->def->name, cookie->netfs_data,
812 atomic_read(&cookie->n_active), retire);
813
814 trace_fscache_relinquish(cookie, retire);
815
816 /* No further netfs-accessing operations on this cookie permitted */
817 if (test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags))
818 BUG();
819
820 __fscache_disable_cookie(cookie, aux_data, retire);
821
822 /* Clear pointers back to the netfs */
823 cookie->netfs_data = NULL;
824 cookie->def = NULL;
825 BUG_ON(!radix_tree_empty(&cookie->stores));
826
827 if (cookie->parent) {
828 ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
829 ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
830 atomic_dec(&cookie->parent->n_children);
831 }
832
833 /* Dispose of the netfs's link to the cookie */
834 ASSERTCMP(atomic_read(&cookie->usage), >, 0);
835 fscache_cookie_put(cookie, fscache_cookie_put_relinquish);
836
837 _leave("");
838 }
839 EXPORT_SYMBOL(__fscache_relinquish_cookie);
840
841 /*
842 * Remove a cookie from the hash table.
843 */
fscache_unhash_cookie(struct fscache_cookie * cookie)844 static void fscache_unhash_cookie(struct fscache_cookie *cookie)
845 {
846 struct hlist_bl_head *h;
847 unsigned int bucket;
848
849 bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
850 h = &fscache_cookie_hash[bucket];
851
852 hlist_bl_lock(h);
853 hlist_bl_del(&cookie->hash_link);
854 hlist_bl_unlock(h);
855 }
856
857 /*
858 * Drop a reference to a cookie.
859 */
fscache_cookie_put(struct fscache_cookie * cookie,enum fscache_cookie_trace where)860 void fscache_cookie_put(struct fscache_cookie *cookie,
861 enum fscache_cookie_trace where)
862 {
863 struct fscache_cookie *parent;
864 int usage;
865
866 _enter("%p", cookie);
867
868 do {
869 usage = atomic_dec_return(&cookie->usage);
870 trace_fscache_cookie(cookie, where, usage);
871
872 if (usage > 0)
873 return;
874 BUG_ON(usage < 0);
875
876 parent = cookie->parent;
877 fscache_unhash_cookie(cookie);
878 fscache_free_cookie(cookie);
879
880 cookie = parent;
881 where = fscache_cookie_put_parent;
882 } while (cookie);
883
884 _leave("");
885 }
886
887 /*
888 * check the consistency between the netfs inode and the backing cache
889 *
890 * NOTE: it only serves no-index type
891 */
__fscache_check_consistency(struct fscache_cookie * cookie,const void * aux_data)892 int __fscache_check_consistency(struct fscache_cookie *cookie,
893 const void *aux_data)
894 {
895 struct fscache_operation *op;
896 struct fscache_object *object;
897 bool wake_cookie = false;
898 int ret;
899
900 _enter("%p,", cookie);
901
902 ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
903
904 if (fscache_wait_for_deferred_lookup(cookie) < 0)
905 return -ERESTARTSYS;
906
907 if (hlist_empty(&cookie->backing_objects))
908 return 0;
909
910 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
911 if (!op)
912 return -ENOMEM;
913
914 fscache_operation_init(cookie, op, NULL, NULL, NULL);
915 op->flags = FSCACHE_OP_MYTHREAD |
916 (1 << FSCACHE_OP_WAITING) |
917 (1 << FSCACHE_OP_UNUSE_COOKIE);
918 trace_fscache_page_op(cookie, NULL, op, fscache_page_op_check_consistency);
919
920 spin_lock(&cookie->lock);
921
922 fscache_update_aux(cookie, aux_data);
923
924 if (!fscache_cookie_enabled(cookie) ||
925 hlist_empty(&cookie->backing_objects))
926 goto inconsistent;
927 object = hlist_entry(cookie->backing_objects.first,
928 struct fscache_object, cookie_link);
929 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
930 goto inconsistent;
931
932 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
933
934 __fscache_use_cookie(cookie);
935 if (fscache_submit_op(object, op) < 0)
936 goto submit_failed;
937
938 /* the work queue now carries its own ref on the object */
939 spin_unlock(&cookie->lock);
940
941 ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
942 if (ret == 0) {
943 /* ask the cache to honour the operation */
944 ret = object->cache->ops->check_consistency(op);
945 fscache_op_complete(op, false);
946 } else if (ret == -ENOBUFS) {
947 ret = 0;
948 }
949
950 fscache_put_operation(op);
951 _leave(" = %d", ret);
952 return ret;
953
954 submit_failed:
955 wake_cookie = __fscache_unuse_cookie(cookie);
956 inconsistent:
957 spin_unlock(&cookie->lock);
958 if (wake_cookie)
959 __fscache_wake_unused_cookie(cookie);
960 kfree(op);
961 _leave(" = -ESTALE");
962 return -ESTALE;
963 }
964 EXPORT_SYMBOL(__fscache_check_consistency);
965