Lines Matching +full:entry +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0
25 * to a special-cased entry for the listxattr cache.
51 * You can certainly add a lot more - but you get what you ask for
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
125 * Wrapper functions to add a cache entry to the right LRU.
128 nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) in nfs4_xattr_entry_lru_add() argument
132 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? in nfs4_xattr_entry_lru_add()
135 return list_lru_add(lru, &entry->lru); in nfs4_xattr_entry_lru_add()
139 nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) in nfs4_xattr_entry_lru_del() argument
143 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? in nfs4_xattr_entry_lru_del()
146 return list_lru_del(lru, &entry->lru); in nfs4_xattr_entry_lru_del()
151 * extended attribute name/value pairs, but may also be a listxattr
152 * cache. Those allocations use the same entry so that they can be
156 * value fits in to one page with the entry structure and the name,
163 * @name: Name of the extended attribute. NULL for listxattr cache
164 * entry.
171 * @len: Length of the value. Can be 0 for zero-length attributes.
175 nfs4_xattr_alloc_entry(const char *name, const void *value, in nfs4_xattr_alloc_entry() argument
178 struct nfs4_xattr_entry *entry; in nfs4_xattr_alloc_entry() local
189 if (name != NULL) { in nfs4_xattr_alloc_entry()
190 slen = strlen(name) + 1; in nfs4_xattr_alloc_entry()
205 entry = (struct nfs4_xattr_entry *)buf; in nfs4_xattr_alloc_entry()
207 if (name != NULL) { in nfs4_xattr_alloc_entry()
209 memcpy(namep, name, slen); in nfs4_xattr_alloc_entry()
233 entry->flags = flags; in nfs4_xattr_alloc_entry()
234 entry->xattr_value = valp; in nfs4_xattr_alloc_entry()
235 kref_init(&entry->ref); in nfs4_xattr_alloc_entry()
236 entry->xattr_name = namep; in nfs4_xattr_alloc_entry()
237 entry->xattr_size = len; in nfs4_xattr_alloc_entry()
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
239 INIT_LIST_HEAD(&entry->lru); in nfs4_xattr_alloc_entry()
240 INIT_LIST_HEAD(&entry->dispose); in nfs4_xattr_alloc_entry()
241 INIT_HLIST_NODE(&entry->hnode); in nfs4_xattr_alloc_entry()
243 return entry; in nfs4_xattr_alloc_entry()
247 nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) in nfs4_xattr_free_entry() argument
249 if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) in nfs4_xattr_free_entry()
250 kvfree(entry->xattr_value); in nfs4_xattr_free_entry()
251 kfree(entry); in nfs4_xattr_free_entry()
257 struct nfs4_xattr_entry *entry; in nfs4_xattr_free_entry_cb() local
259 entry = container_of(kref, struct nfs4_xattr_entry, ref); in nfs4_xattr_free_entry_cb()
261 if (WARN_ON(!list_empty(&entry->lru))) in nfs4_xattr_free_entry_cb()
264 nfs4_xattr_free_entry(entry); in nfs4_xattr_free_entry_cb()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
281 cache->listxattr = NULL; in nfs4_xattr_free_cache_cb()
296 kref_init(&cache->ref); in nfs4_xattr_alloc_cache()
297 atomic_long_set(&cache->nent, 0); in nfs4_xattr_alloc_cache()
303 * Set the listxattr cache, which is a special-cased cache entry.
304 * The special value ERR_PTR(-ESTALE) is used to indicate that
305 * the cache is being drained - this prevents a new listxattr
315 spin_lock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
317 old = cache->listxattr; in nfs4_xattr_set_listcache()
319 if (old == ERR_PTR(-ESTALE)) { in nfs4_xattr_set_listcache()
324 cache->listxattr = new; in nfs4_xattr_set_listcache()
325 if (new != NULL && new != ERR_PTR(-ESTALE)) in nfs4_xattr_set_listcache()
330 kref_put(&old->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_set_listcache()
333 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
350 oldcache = nfsi->xattr_cache; in nfs4_xattr_cache_unlink()
352 list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); in nfs4_xattr_cache_unlink()
353 oldcache->inode = NULL; in nfs4_xattr_cache_unlink()
355 nfsi->xattr_cache = NULL; in nfs4_xattr_cache_unlink()
356 nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; in nfs4_xattr_cache_unlink()
387 struct nfs4_xattr_entry *entry; in nfs4_xattr_discard_cache() local
391 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); in nfs4_xattr_discard_cache()
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
397 bucket->draining = true; in nfs4_xattr_discard_cache()
398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
399 nfs4_xattr_entry_lru_del(entry); in nfs4_xattr_discard_cache()
400 hlist_del_init(&entry->hnode); in nfs4_xattr_discard_cache()
401 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_discard_cache()
403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
406 atomic_long_set(&cache->nent, 0); in nfs4_xattr_discard_cache()
408 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_discard_cache()
421 * non-delegated case) is expected to be dealt with in the revalidate
435 spin_lock(&inode->i_lock); in nfs4_xattr_get_cache()
437 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) in nfs4_xattr_get_cache()
440 cache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
443 kref_get(&cache->ref); in nfs4_xattr_get_cache()
445 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
454 spin_lock(&inode->i_lock); in nfs4_xattr_get_cache()
455 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { in nfs4_xattr_get_cache()
461 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
462 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
470 if (nfsi->xattr_cache != NULL) { in nfs4_xattr_get_cache()
471 newcache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
472 kref_get(&newcache->ref); in nfs4_xattr_get_cache()
474 kref_get(&cache->ref); in nfs4_xattr_get_cache()
475 nfsi->xattr_cache = cache; in nfs4_xattr_get_cache()
476 cache->inode = inode; in nfs4_xattr_get_cache()
477 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); in nfs4_xattr_get_cache()
480 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
488 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
504 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_bucket() argument
506 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
507 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
513 struct nfs4_xattr_entry *entry; in nfs4_xattr_get_entry() local
515 entry = NULL; in nfs4_xattr_get_entry()
517 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry()
518 if (!strcmp(entry->xattr_name, name)) in nfs4_xattr_get_entry()
522 return entry; in nfs4_xattr_get_entry()
527 struct nfs4_xattr_entry *entry) in nfs4_xattr_hash_add() argument
533 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); in nfs4_xattr_hash_add()
534 entry->bucket = bucket; in nfs4_xattr_hash_add()
536 spin_lock(&bucket->lock); in nfs4_xattr_hash_add()
538 if (bucket->draining) { in nfs4_xattr_hash_add()
543 oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); in nfs4_xattr_hash_add()
545 hlist_del_init(&oldentry->hnode); in nfs4_xattr_hash_add()
548 atomic_long_inc(&cache->nent); in nfs4_xattr_hash_add()
551 hlist_add_head(&entry->hnode, &bucket->hlist); in nfs4_xattr_hash_add()
552 nfs4_xattr_entry_lru_add(entry); in nfs4_xattr_hash_add()
555 spin_unlock(&bucket->lock); in nfs4_xattr_hash_add()
558 kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_hash_add()
564 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_remove() argument
567 struct nfs4_xattr_entry *entry; in nfs4_xattr_hash_remove() local
569 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_remove()
571 spin_lock(&bucket->lock); in nfs4_xattr_hash_remove()
573 entry = nfs4_xattr_get_entry(bucket, name); in nfs4_xattr_hash_remove()
574 if (entry != NULL) { in nfs4_xattr_hash_remove()
575 hlist_del_init(&entry->hnode); in nfs4_xattr_hash_remove()
576 nfs4_xattr_entry_lru_del(entry); in nfs4_xattr_hash_remove()
577 atomic_long_dec(&cache->nent); in nfs4_xattr_hash_remove()
580 spin_unlock(&bucket->lock); in nfs4_xattr_hash_remove()
582 if (entry != NULL) in nfs4_xattr_hash_remove()
583 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_hash_remove()
587 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_find() argument
590 struct nfs4_xattr_entry *entry; in nfs4_xattr_hash_find() local
592 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_find()
594 spin_lock(&bucket->lock); in nfs4_xattr_hash_find()
596 entry = nfs4_xattr_get_entry(bucket, name); in nfs4_xattr_hash_find()
597 if (entry != NULL) in nfs4_xattr_hash_find()
598 kref_get(&entry->ref); in nfs4_xattr_hash_find()
600 spin_unlock(&bucket->lock); in nfs4_xattr_hash_find()
602 return entry; in nfs4_xattr_hash_find()
606 * Entry point to retrieve an entry from the cache.
608 ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, in nfs4_xattr_cache_get() argument
612 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_get() local
617 return -ENOENT; in nfs4_xattr_cache_get()
620 entry = nfs4_xattr_hash_find(cache, name); in nfs4_xattr_cache_get()
622 if (entry != NULL) { in nfs4_xattr_cache_get()
624 entry->xattr_name, (unsigned long)entry->xattr_size); in nfs4_xattr_cache_get()
627 ret = entry->xattr_size; in nfs4_xattr_cache_get()
628 } else if (buflen < entry->xattr_size) in nfs4_xattr_cache_get()
629 ret = -ERANGE; in nfs4_xattr_cache_get()
631 memcpy(buf, entry->xattr_value, entry->xattr_size); in nfs4_xattr_cache_get()
632 ret = entry->xattr_size; in nfs4_xattr_cache_get()
634 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_get()
636 dprintk("%s: cache miss '%s'\n", __func__, name); in nfs4_xattr_cache_get()
637 ret = -ENOENT; in nfs4_xattr_cache_get()
640 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_get()
651 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_list() local
656 return -ENOENT; in nfs4_xattr_cache_list()
658 spin_lock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
660 entry = cache->listxattr; in nfs4_xattr_cache_list()
662 if (entry != NULL && entry != ERR_PTR(-ESTALE)) { in nfs4_xattr_cache_list()
665 ret = entry->xattr_size; in nfs4_xattr_cache_list()
666 } else if (entry->xattr_size > buflen) in nfs4_xattr_cache_list()
667 ret = -ERANGE; in nfs4_xattr_cache_list()
669 memcpy(buf, entry->xattr_value, entry->xattr_size); in nfs4_xattr_cache_list()
670 ret = entry->xattr_size; in nfs4_xattr_cache_list()
673 ret = -ENOENT; in nfs4_xattr_cache_list()
676 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
678 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_list()
688 void nfs4_xattr_cache_add(struct inode *inode, const char *name, in nfs4_xattr_cache_add() argument
692 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_add() local
695 name, (unsigned long)buflen); in nfs4_xattr_cache_add()
701 entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); in nfs4_xattr_cache_add()
702 if (entry == NULL) in nfs4_xattr_cache_add()
707 if (!nfs4_xattr_hash_add(cache, entry)) in nfs4_xattr_cache_add()
708 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_add()
711 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_add()
720 void nfs4_xattr_cache_remove(struct inode *inode, const char *name) in nfs4_xattr_cache_remove() argument
724 dprintk("%s: remove '%s'\n", __func__, name); in nfs4_xattr_cache_remove()
731 nfs4_xattr_hash_remove(cache, name); in nfs4_xattr_cache_remove()
733 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_remove()
743 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_set_list() local
749 entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); in nfs4_xattr_cache_set_list()
750 if (entry == NULL) in nfs4_xattr_cache_set_list()
754 * This is just there to be able to get to bucket->cache, in nfs4_xattr_cache_set_list()
758 entry->bucket = &cache->buckets[0]; in nfs4_xattr_cache_set_list()
760 if (!nfs4_xattr_set_listcache(cache, entry)) in nfs4_xattr_cache_set_list()
761 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_set_list()
764 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_set_list()
774 spin_lock(&inode->i_lock); in nfs4_xattr_cache_zap()
776 spin_unlock(&inode->i_lock); in nfs4_xattr_cache_zap()
783 * The entry LRU is shrunk more aggressively than the cache LRU,
787 * pruning all but one entry.
831 if (atomic_long_read(&cache->nent) > 1) in cache_lru_isolate()
839 inode = cache->inode; in cache_lru_isolate()
841 if (!spin_trylock(&inode->i_lock)) in cache_lru_isolate()
844 kref_get(&cache->ref); in cache_lru_isolate()
846 cache->inode = NULL; in cache_lru_isolate()
847 NFS_I(inode)->xattr_cache = NULL; in cache_lru_isolate()
848 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; in cache_lru_isolate()
849 list_lru_isolate(lru, &cache->lru); in cache_lru_isolate()
851 spin_unlock(&inode->i_lock); in cache_lru_isolate()
853 list_add_tail(&cache->dispose, dispose); in cache_lru_isolate()
869 list_del_init(&cache->dispose); in nfs4_xattr_cache_scan()
871 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_scan()
894 struct nfs4_xattr_entry *entry = container_of(item, in entry_lru_isolate() local
897 bucket = entry->bucket; in entry_lru_isolate()
898 cache = bucket->cache; in entry_lru_isolate()
901 * Unhook the entry from its parent (either a cache bucket in entry_lru_isolate()
907 * trylock and skip the entry if we can't get the lock. in entry_lru_isolate()
909 if (entry->xattr_name != NULL) { in entry_lru_isolate()
910 /* Regular cache entry */ in entry_lru_isolate()
911 if (!spin_trylock(&bucket->lock)) in entry_lru_isolate()
914 kref_get(&entry->ref); in entry_lru_isolate()
916 hlist_del_init(&entry->hnode); in entry_lru_isolate()
917 atomic_long_dec(&cache->nent); in entry_lru_isolate()
918 list_lru_isolate(lru, &entry->lru); in entry_lru_isolate()
920 spin_unlock(&bucket->lock); in entry_lru_isolate()
922 /* Listxattr cache entry */ in entry_lru_isolate()
923 if (!spin_trylock(&cache->listxattr_lock)) in entry_lru_isolate()
926 kref_get(&entry->ref); in entry_lru_isolate()
928 cache->listxattr = NULL; in entry_lru_isolate()
929 list_lru_isolate(lru, &entry->lru); in entry_lru_isolate()
931 spin_unlock(&cache->listxattr_lock); in entry_lru_isolate()
934 list_add_tail(&entry->dispose, dispose); in entry_lru_isolate()
943 struct nfs4_xattr_entry *entry; in nfs4_xattr_entry_scan() local
952 entry = list_first_entry(&dispose, struct nfs4_xattr_entry, in nfs4_xattr_entry_scan()
954 list_del_init(&entry->dispose); in nfs4_xattr_entry_scan()
959 * when the entry was first allocated. in nfs4_xattr_entry_scan()
961 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_entry_scan()
962 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_entry_scan()
986 spin_lock_init(&cache->listxattr_lock); in nfs4_xattr_cache_init_once()
987 atomic_long_set(&cache->nent, 0); in nfs4_xattr_cache_init_once()
989 cache->listxattr = NULL; in nfs4_xattr_cache_init_once()
990 INIT_LIST_HEAD(&cache->lru); in nfs4_xattr_cache_init_once()
991 INIT_LIST_HEAD(&cache->dispose); in nfs4_xattr_cache_init_once()
1003 return -ENOMEM; in nfs4_xattr_cache_init()
1020 ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache"); in nfs4_xattr_cache_init()
1024 ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry"); in nfs4_xattr_cache_init()
1029 "nfs-xattr_large_entry"); in nfs4_xattr_cache_init()