Lines Matching refs:ac
198 struct array_cache ac; member
522 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument
524 if (ac) { in init_arraycache()
525 ac->avail = 0; in init_arraycache()
526 ac->limit = limit; in init_arraycache()
527 ac->batchcount = batch; in init_arraycache()
528 ac->touched = 0; in init_arraycache()
536 struct array_cache *ac = NULL; in alloc_arraycache() local
538 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
546 kmemleak_no_scan(ac); in alloc_arraycache()
547 init_arraycache(ac, entries, batchcount); in alloc_arraycache()
548 return ac; in alloc_arraycache()
642 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
685 struct array_cache *ac, int node, in __drain_alien_cache() argument
690 if (ac->avail) { in __drain_alien_cache()
698 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
700 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
701 ac->avail = 0; in __drain_alien_cache()
715 struct array_cache *ac; in reap_alien() local
718 ac = &alc->ac; in reap_alien()
719 if (ac->avail && spin_trylock_irq(&alc->lock)) { in reap_alien()
722 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
735 struct array_cache *ac; in drain_alien_cache() local
743 ac = &alc->ac; in drain_alien_cache()
745 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
757 struct array_cache *ac; in __cache_free_alien() local
764 ac = &alien->ac; in __cache_free_alien()
766 if (unlikely(ac->avail == ac->limit)) { in __cache_free_alien()
768 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
770 ac->entry[ac->avail++] = objp; in __cache_free_alien()
2119 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, in drain_array_locked() argument
2124 if (!ac || !ac->avail) in drain_array_locked()
2127 tofree = free_all ? ac->avail : (ac->limit + 4) / 5; in drain_array_locked()
2128 if (tofree > ac->avail) in drain_array_locked()
2129 tofree = (ac->avail + 1) / 2; in drain_array_locked()
2131 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2132 ac->avail -= tofree; in drain_array_locked()
2133 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); in drain_array_locked()
2139 struct array_cache *ac; in do_drain() local
2145 ac = cpu_cache_get(cachep); in do_drain()
2148 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2151 ac->avail = 0; in do_drain()
2886 struct array_cache *ac, struct page *page, int batchcount) in alloc_block() argument
2899 ac->entry[ac->avail++] = slab_get_obj(cachep, page); in alloc_block()
2909 struct array_cache *ac, *shared; in cache_alloc_refill() local
2917 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2918 batchcount = ac->batchcount; in cache_alloc_refill()
2919 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { in cache_alloc_refill()
2929 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2938 if (shared && transfer_objects(ac, shared, batchcount)) { in cache_alloc_refill()
2951 batchcount = alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
2956 n->free_objects -= ac->avail; in cache_alloc_refill()
2962 if (unlikely(!ac->avail)) { in cache_alloc_refill()
2977 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2978 if (!ac->avail && page) in cache_alloc_refill()
2979 alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
2982 if (!ac->avail) in cache_alloc_refill()
2985 ac->touched = 1; in cache_alloc_refill()
2987 return ac->entry[--ac->avail]; in cache_alloc_refill()
3040 struct array_cache *ac; in ____cache_alloc() local
3044 ac = cpu_cache_get(cachep); in ____cache_alloc()
3045 if (likely(ac->avail)) { in ____cache_alloc()
3046 ac->touched = 1; in ____cache_alloc()
3047 objp = ac->entry[--ac->avail]; in ____cache_alloc()
3059 ac = cpu_cache_get(cachep); in ____cache_alloc()
3068 kmemleak_erase(&ac->entry[ac->avail]); in ____cache_alloc()
3371 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3378 batchcount = ac->batchcount; in cache_flusharray()
3390 ac->entry, sizeof(void *) * batchcount); in cache_flusharray()
3396 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3413 ac->avail -= batchcount; in cache_flusharray()
3414 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); in cache_flusharray()
3434 struct array_cache *ac = cpu_cache_get(cachep); in ___cache_free() local
3452 if (ac->avail < ac->limit) { in ___cache_free()
3456 cache_flusharray(cachep, ac); in ___cache_free()
3468 ac->entry[ac->avail++] = objp; in ___cache_free()
3829 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache() local
3834 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3945 struct array_cache *ac, int node) in drain_array() argument
3952 if (!ac || !ac->avail) in drain_array()
3955 if (ac->touched) { in drain_array()
3956 ac->touched = 0; in drain_array()
3961 drain_array_locked(cachep, ac, node, false, &list); in drain_array()