Lines Matching refs:krcp
2876 struct kfree_rcu_cpu *krcp; member
2940 struct kfree_rcu_cpu *krcp; in krc_this_cpu_lock() local
2943 krcp = this_cpu_ptr(&krc); in krc_this_cpu_lock()
2944 raw_spin_lock(&krcp->lock); in krc_this_cpu_lock()
2946 return krcp; in krc_this_cpu_lock()
2950 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) in krc_this_cpu_unlock() argument
2952 raw_spin_unlock_irqrestore(&krcp->lock, flags); in krc_this_cpu_unlock()
2956 get_cached_bnode(struct kfree_rcu_cpu *krcp) in get_cached_bnode() argument
2958 if (!krcp->nr_bkv_objs) in get_cached_bnode()
2961 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); in get_cached_bnode()
2963 llist_del_first(&krcp->bkvcache); in get_cached_bnode()
2967 put_cached_bnode(struct kfree_rcu_cpu *krcp, in put_cached_bnode() argument
2971 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) in put_cached_bnode()
2974 llist_add((struct llist_node *) bnode, &krcp->bkvcache); in put_cached_bnode()
2975 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); in put_cached_bnode()
2980 drain_page_cache(struct kfree_rcu_cpu *krcp) in drain_page_cache() argument
2986 raw_spin_lock_irqsave(&krcp->lock, flags); in drain_page_cache()
2987 page_list = llist_del_all(&krcp->bkvcache); in drain_page_cache()
2988 WRITE_ONCE(krcp->nr_bkv_objs, 0); in drain_page_cache()
2989 raw_spin_unlock_irqrestore(&krcp->lock, flags); in drain_page_cache()
3008 struct kfree_rcu_cpu *krcp; in kfree_rcu_work() local
3014 krcp = krwp->krcp; in kfree_rcu_work()
3016 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3026 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3053 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3054 if (put_cached_bnode(krcp, bkvhead[i])) in kfree_rcu_work()
3056 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3090 need_offload_krc(struct kfree_rcu_cpu *krcp) in need_offload_krc() argument
3095 if (krcp->bkvhead[i]) in need_offload_krc()
3098 return !!krcp->head; in need_offload_krc()
3102 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) in schedule_delayed_monitor_work() argument
3106 delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES; in schedule_delayed_monitor_work()
3107 if (delayed_work_pending(&krcp->monitor_work)) { in schedule_delayed_monitor_work()
3108 delay_left = krcp->monitor_work.timer.expires - jiffies; in schedule_delayed_monitor_work()
3110 mod_delayed_work(system_wq, &krcp->monitor_work, delay); in schedule_delayed_monitor_work()
3113 queue_delayed_work(system_wq, &krcp->monitor_work, delay); in schedule_delayed_monitor_work()
3121 struct kfree_rcu_cpu *krcp = container_of(work, in kfree_rcu_monitor() local
3126 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_monitor()
3130 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); in kfree_rcu_monitor()
3137 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || in kfree_rcu_monitor()
3138 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || in kfree_rcu_monitor()
3139 (krcp->head && !krwp->head_free)) { in kfree_rcu_monitor()
3144 krwp->bkvhead_free[j] = krcp->bkvhead[j]; in kfree_rcu_monitor()
3145 krcp->bkvhead[j] = NULL; in kfree_rcu_monitor()
3152 krwp->head_free = krcp->head; in kfree_rcu_monitor()
3153 krcp->head = NULL; in kfree_rcu_monitor()
3156 WRITE_ONCE(krcp->count, 0); in kfree_rcu_monitor()
3172 if (need_offload_krc(krcp)) in kfree_rcu_monitor()
3173 schedule_delayed_monitor_work(krcp); in kfree_rcu_monitor()
3175 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_monitor()
3181 struct kfree_rcu_cpu *krcp = in schedule_page_work_fn() local
3184 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); in schedule_page_work_fn()
3191 struct kfree_rcu_cpu *krcp = in fill_page_cache_func() local
3199 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? in fill_page_cache_func()
3209 raw_spin_lock_irqsave(&krcp->lock, flags); in fill_page_cache_func()
3210 pushed = put_cached_bnode(krcp, bnode); in fill_page_cache_func()
3211 raw_spin_unlock_irqrestore(&krcp->lock, flags); in fill_page_cache_func()
3219 atomic_set(&krcp->work_in_progress, 0); in fill_page_cache_func()
3220 atomic_set(&krcp->backoff_page_cache_fill, 0); in fill_page_cache_func()
3224 run_page_cache_worker(struct kfree_rcu_cpu *krcp) in run_page_cache_worker() argument
3227 !atomic_xchg(&krcp->work_in_progress, 1)) { in run_page_cache_worker()
3228 if (atomic_read(&krcp->backoff_page_cache_fill)) { in run_page_cache_worker()
3230 &krcp->page_cache_work, in run_page_cache_worker()
3233 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in run_page_cache_worker()
3234 krcp->hrtimer.function = schedule_page_work_fn; in run_page_cache_worker()
3235 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); in run_page_cache_worker()
3247 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, in add_ptr_to_bulk_krc_lock() argument
3253 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3254 if (unlikely(!(*krcp)->initialized)) in add_ptr_to_bulk_krc_lock()
3260 if (!(*krcp)->bkvhead[idx] || in add_ptr_to_bulk_krc_lock()
3261 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { in add_ptr_to_bulk_krc_lock()
3262 bnode = get_cached_bnode(*krcp); in add_ptr_to_bulk_krc_lock()
3264 krc_this_cpu_unlock(*krcp, *flags); in add_ptr_to_bulk_krc_lock()
3279 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3287 bnode->next = (*krcp)->bkvhead[idx]; in add_ptr_to_bulk_krc_lock()
3290 (*krcp)->bkvhead[idx] = bnode; in add_ptr_to_bulk_krc_lock()
3294 (*krcp)->bkvhead[idx]->records in add_ptr_to_bulk_krc_lock()
3295 [(*krcp)->bkvhead[idx]->nr_records++] = ptr; in add_ptr_to_bulk_krc_lock()
3315 struct kfree_rcu_cpu *krcp; in kvfree_call_rcu() local
3344 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); in kvfree_call_rcu()
3346 run_page_cache_worker(krcp); in kvfree_call_rcu()
3353 head->next = krcp->head; in kvfree_call_rcu()
3354 krcp->head = head; in kvfree_call_rcu()
3358 WRITE_ONCE(krcp->count, krcp->count + 1); in kvfree_call_rcu()
3362 schedule_delayed_monitor_work(krcp); in kvfree_call_rcu()
3365 krc_this_cpu_unlock(krcp, flags); in kvfree_call_rcu()
3388 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_count() local
3390 count += READ_ONCE(krcp->count); in kfree_rcu_shrink_count()
3391 count += READ_ONCE(krcp->nr_bkv_objs); in kfree_rcu_shrink_count()
3392 atomic_set(&krcp->backoff_page_cache_fill, 1); in kfree_rcu_shrink_count()
3405 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_scan() local
3407 count = krcp->count; in kfree_rcu_shrink_scan()
3408 count += drain_page_cache(krcp); in kfree_rcu_shrink_scan()
3409 kfree_rcu_monitor(&krcp->monitor_work.work); in kfree_rcu_shrink_scan()
3434 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_scheduler_running() local
3436 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_scheduler_running()
3437 if (need_offload_krc(krcp)) in kfree_rcu_scheduler_running()
3438 schedule_delayed_monitor_work(krcp); in kfree_rcu_scheduler_running()
3439 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_scheduler_running()
4742 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_batch_init() local
4745 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); in kfree_rcu_batch_init()
4746 krcp->krw_arr[i].krcp = krcp; in kfree_rcu_batch_init()
4749 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); in kfree_rcu_batch_init()
4750 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); in kfree_rcu_batch_init()
4751 krcp->initialized = true; in kfree_rcu_batch_init()