Lines Matching full:cache

6  * User extended attribute client side cache functions.
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
40 * Two shrinkers deal with the cache entries themselves: one for
45 * The other shrinker frees the cache structures themselves.
64 struct nfs4_xattr_cache *cache; member
106 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) in nfs4_xattr_hash_init() argument
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
125 * Wrapper functions to add a cache entry to the right LRU.
150 * This function allocates cache entries. They are the normal
152 * cache. Those allocations use the same entry so that they can be
155 * xattr cache entries are allocated together with names. If the
163 * @name: Name of the extended attribute. NULL for listxattr cache
165 * @value: Value of attribute, or listxattr cache. NULL if the
270 struct nfs4_xattr_cache *cache; in nfs4_xattr_free_cache_cb() local
273 cache = container_of(kref, struct nfs4_xattr_cache, ref); in nfs4_xattr_free_cache_cb()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
281 cache->listxattr = NULL; in nfs4_xattr_free_cache_cb()
283 kmem_cache_free(nfs4_xattr_cache_cachep, cache); in nfs4_xattr_free_cache_cb()
290 struct nfs4_xattr_cache *cache; in nfs4_xattr_alloc_cache() local
292 cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, GFP_KERNEL); in nfs4_xattr_alloc_cache()
293 if (cache == NULL) in nfs4_xattr_alloc_cache()
296 kref_init(&cache->ref); in nfs4_xattr_alloc_cache()
297 atomic_long_set(&cache->nent, 0); in nfs4_xattr_alloc_cache()
299 return cache; in nfs4_xattr_alloc_cache()
303 * Set the listxattr cache, which is a special-cased cache entry.
305 * the cache is being drained - this prevents a new listxattr
306 * cache from being added to what is now a stale cache.
309 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, in nfs4_xattr_set_listcache() argument
315 spin_lock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
317 old = cache->listxattr; in nfs4_xattr_set_listcache()
324 cache->listxattr = new; in nfs4_xattr_set_listcache()
333 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
339 * Unlink a cache from its parent inode, clearing out an invalid
340 * cache. Must be called with i_lock held.
363 * Discard a cache. Called by get_cache() if there was an old,
364 * invalid cache. Can also be called from a shrinker callback.
366 * The cache is dead, it has already been unlinked from its inode,
367 * and no longer appears on the cache LRU list.
375 * any way to 'find' this cache. Then, remove the entries from the hash
378 * At that point, the cache will remain empty and can be freed when the final
384 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) in nfs4_xattr_discard_cache() argument
391 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); in nfs4_xattr_discard_cache()
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
406 atomic_long_set(&cache->nent, 0); in nfs4_xattr_discard_cache()
408 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_discard_cache()
412 * Get a referenced copy of the cache structure. Avoid doing allocs
416 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
417 * and acts accordingly, replacing the cache when needed. For the read case
418 * (!add), this means that the caller must make sure that the cache
420 * revalidate_inode to do this. The attribute cache timeout (for the
429 struct nfs4_xattr_cache *cache, *oldcache, *newcache; in nfs4_xattr_get_cache() local
433 cache = oldcache = NULL; in nfs4_xattr_get_cache()
440 cache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
442 if (cache != NULL) in nfs4_xattr_get_cache()
443 kref_get(&cache->ref); in nfs4_xattr_get_cache()
447 if (add && cache == NULL) { in nfs4_xattr_get_cache()
450 cache = nfs4_xattr_alloc_cache(); in nfs4_xattr_get_cache()
451 if (cache == NULL) in nfs4_xattr_get_cache()
457 * The cache was invalidated again. Give up, in nfs4_xattr_get_cache()
462 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
463 cache = NULL; in nfs4_xattr_get_cache()
474 kref_get(&cache->ref); in nfs4_xattr_get_cache()
475 nfsi->xattr_cache = cache; in nfs4_xattr_get_cache()
476 cache->inode = inode; in nfs4_xattr_get_cache()
477 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); in nfs4_xattr_get_cache()
483 * If there was a race, throw away the cache we just in nfs4_xattr_get_cache()
488 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
489 cache = newcache; in nfs4_xattr_get_cache()
495 * Discard the now orphaned old cache. in nfs4_xattr_get_cache()
500 return cache; in nfs4_xattr_get_cache()
504 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_bucket() argument
506 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
507 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
526 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, in nfs4_xattr_hash_add() argument
533 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); in nfs4_xattr_hash_add()
548 atomic_long_inc(&cache->nent); in nfs4_xattr_hash_add()
564 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_remove() argument
569 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_remove()
577 atomic_long_dec(&cache->nent); in nfs4_xattr_hash_remove()
587 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_find() argument
592 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_find()
606 * Entry point to retrieve an entry from the cache.
611 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_get() local
615 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_get()
616 if (cache == NULL) in nfs4_xattr_cache_get()
620 entry = nfs4_xattr_hash_find(cache, name); in nfs4_xattr_cache_get()
623 dprintk("%s: cache hit '%s', len %lu\n", __func__, in nfs4_xattr_cache_get()
636 dprintk("%s: cache miss '%s'\n", __func__, name); in nfs4_xattr_cache_get()
640 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_get()
646 * Retrieve a cached list of xattrs from the cache.
650 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_list() local
654 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_list()
655 if (cache == NULL) in nfs4_xattr_cache_list()
658 spin_lock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
660 entry = cache->listxattr; in nfs4_xattr_cache_list()
676 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
678 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_list()
684 * Add an xattr to the cache.
686 * This also invalidates the xattr list cache.
691 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_add() local
697 cache = nfs4_xattr_get_cache(inode, 1); in nfs4_xattr_cache_add()
698 if (cache == NULL) in nfs4_xattr_cache_add()
705 (void)nfs4_xattr_set_listcache(cache, NULL); in nfs4_xattr_cache_add()
707 if (!nfs4_xattr_hash_add(cache, entry)) in nfs4_xattr_cache_add()
711 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_add()
716 * Remove an xattr from the cache.
718 * This also invalidates the xattr list cache.
722 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_remove() local
726 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_remove()
727 if (cache == NULL) in nfs4_xattr_cache_remove()
730 (void)nfs4_xattr_set_listcache(cache, NULL); in nfs4_xattr_cache_remove()
731 nfs4_xattr_hash_remove(cache, name); in nfs4_xattr_cache_remove()
733 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_remove()
737 * Cache listxattr output, replacing any possible old one.
742 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_set_list() local
745 cache = nfs4_xattr_get_cache(inode, 1); in nfs4_xattr_cache_set_list()
746 if (cache == NULL) in nfs4_xattr_cache_set_list()
754 * This is just there to be able to get to bucket->cache, in nfs4_xattr_cache_set_list()
758 entry->bucket = &cache->buckets[0]; in nfs4_xattr_cache_set_list()
760 if (!nfs4_xattr_set_listcache(cache, entry)) in nfs4_xattr_cache_set_list()
764 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_set_list()
768 * Zap the entire cache. Called when an inode is evicted.
783 * The entry LRU is shrunk more aggressively than the cache LRU,
786 * Cache structures are freed only when they've become empty, after
828 struct nfs4_xattr_cache *cache = container_of(item, in cache_lru_isolate() local
831 if (atomic_long_read(&cache->nent) > 1) in cache_lru_isolate()
835 * If a cache structure is on the LRU list, we know that in cache_lru_isolate()
839 inode = cache->inode; in cache_lru_isolate()
844 kref_get(&cache->ref); in cache_lru_isolate()
846 cache->inode = NULL; in cache_lru_isolate()
849 list_lru_isolate(lru, &cache->lru); in cache_lru_isolate()
853 list_add_tail(&cache->dispose, dispose); in cache_lru_isolate()
862 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_scan() local
867 cache = list_first_entry(&dispose, struct nfs4_xattr_cache, in nfs4_xattr_cache_scan()
869 list_del_init(&cache->dispose); in nfs4_xattr_cache_scan()
870 nfs4_xattr_discard_cache(cache); in nfs4_xattr_cache_scan()
871 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_scan()
893 struct nfs4_xattr_cache *cache; in entry_lru_isolate() local
898 cache = bucket->cache; in entry_lru_isolate()
901 * Unhook the entry from its parent (either a cache bucket in entry_lru_isolate()
902 * or a cache structure if it's a listxattr buf), so that in entry_lru_isolate()
910 /* Regular cache entry */ in entry_lru_isolate()
917 atomic_long_dec(&cache->nent); in entry_lru_isolate()
922 /* Listxattr cache entry */ in entry_lru_isolate()
923 if (!spin_trylock(&cache->listxattr_lock)) in entry_lru_isolate()
928 cache->listxattr = NULL; in entry_lru_isolate()
931 spin_unlock(&cache->listxattr_lock); in entry_lru_isolate()
984 struct nfs4_xattr_cache *cache = p; in nfs4_xattr_cache_init_once() local
986 spin_lock_init(&cache->listxattr_lock); in nfs4_xattr_cache_init_once()
987 atomic_long_set(&cache->nent, 0); in nfs4_xattr_cache_init_once()
988 nfs4_xattr_hash_init(cache); in nfs4_xattr_cache_init_once()
989 cache->listxattr = NULL; in nfs4_xattr_cache_init_once()
990 INIT_LIST_HEAD(&cache->lru); in nfs4_xattr_cache_init_once()
991 INIT_LIST_HEAD(&cache->dispose); in nfs4_xattr_cache_init_once()