Lines Matching refs:krcp
3109 struct kfree_rcu_cpu *krcp; member
3175 struct kfree_rcu_cpu *krcp; in krc_this_cpu_lock() local
3178 krcp = this_cpu_ptr(&krc); in krc_this_cpu_lock()
3179 raw_spin_lock(&krcp->lock); in krc_this_cpu_lock()
3181 return krcp; in krc_this_cpu_lock()
3185 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) in krc_this_cpu_unlock() argument
3187 raw_spin_unlock_irqrestore(&krcp->lock, flags); in krc_this_cpu_unlock()
3191 get_cached_bnode(struct kfree_rcu_cpu *krcp) in get_cached_bnode() argument
3193 if (!krcp->nr_bkv_objs) in get_cached_bnode()
3196 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); in get_cached_bnode()
3198 llist_del_first(&krcp->bkvcache); in get_cached_bnode()
3202 put_cached_bnode(struct kfree_rcu_cpu *krcp, in put_cached_bnode() argument
3206 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) in put_cached_bnode()
3209 llist_add((struct llist_node *) bnode, &krcp->bkvcache); in put_cached_bnode()
3210 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); in put_cached_bnode()
3215 drain_page_cache(struct kfree_rcu_cpu *krcp) in drain_page_cache() argument
3221 raw_spin_lock_irqsave(&krcp->lock, flags); in drain_page_cache()
3222 page_list = llist_del_all(&krcp->bkvcache); in drain_page_cache()
3223 WRITE_ONCE(krcp->nr_bkv_objs, 0); in drain_page_cache()
3224 raw_spin_unlock_irqrestore(&krcp->lock, flags); in drain_page_cache()
3243 struct kfree_rcu_cpu *krcp; in kfree_rcu_work() local
3249 krcp = krwp->krcp; in kfree_rcu_work()
3251 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3261 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3288 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3289 if (put_cached_bnode(krcp, bkvhead[i])) in kfree_rcu_work()
3291 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3329 struct kfree_rcu_cpu *krcp = container_of(work, in kfree_rcu_monitor() local
3334 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_monitor()
3338 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); in kfree_rcu_monitor()
3345 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || in kfree_rcu_monitor()
3346 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || in kfree_rcu_monitor()
3347 (krcp->head && !krwp->head_free)) { in kfree_rcu_monitor()
3352 krwp->bkvhead_free[j] = krcp->bkvhead[j]; in kfree_rcu_monitor()
3353 krcp->bkvhead[j] = NULL; in kfree_rcu_monitor()
3360 krwp->head_free = krcp->head; in kfree_rcu_monitor()
3361 krcp->head = NULL; in kfree_rcu_monitor()
3364 WRITE_ONCE(krcp->count, 0); in kfree_rcu_monitor()
3380 if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) in kfree_rcu_monitor()
3381 krcp->monitor_todo = false; in kfree_rcu_monitor()
3383 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); in kfree_rcu_monitor()
3385 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_monitor()
3391 struct kfree_rcu_cpu *krcp = in schedule_page_work_fn() local
3394 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); in schedule_page_work_fn()
3401 struct kfree_rcu_cpu *krcp = in fill_page_cache_func() local
3409 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? in fill_page_cache_func()
3417 raw_spin_lock_irqsave(&krcp->lock, flags); in fill_page_cache_func()
3418 pushed = put_cached_bnode(krcp, bnode); in fill_page_cache_func()
3419 raw_spin_unlock_irqrestore(&krcp->lock, flags); in fill_page_cache_func()
3428 atomic_set(&krcp->work_in_progress, 0); in fill_page_cache_func()
3429 atomic_set(&krcp->backoff_page_cache_fill, 0); in fill_page_cache_func()
3433 run_page_cache_worker(struct kfree_rcu_cpu *krcp) in run_page_cache_worker() argument
3436 !atomic_xchg(&krcp->work_in_progress, 1)) { in run_page_cache_worker()
3437 if (atomic_read(&krcp->backoff_page_cache_fill)) { in run_page_cache_worker()
3439 &krcp->page_cache_work, in run_page_cache_worker()
3442 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in run_page_cache_worker()
3443 krcp->hrtimer.function = schedule_page_work_fn; in run_page_cache_worker()
3444 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); in run_page_cache_worker()
3456 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, in add_ptr_to_bulk_krc_lock() argument
3462 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3463 if (unlikely(!(*krcp)->initialized)) in add_ptr_to_bulk_krc_lock()
3469 if (!(*krcp)->bkvhead[idx] || in add_ptr_to_bulk_krc_lock()
3470 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { in add_ptr_to_bulk_krc_lock()
3471 bnode = get_cached_bnode(*krcp); in add_ptr_to_bulk_krc_lock()
3473 krc_this_cpu_unlock(*krcp, *flags); in add_ptr_to_bulk_krc_lock()
3488 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3496 bnode->next = (*krcp)->bkvhead[idx]; in add_ptr_to_bulk_krc_lock()
3499 (*krcp)->bkvhead[idx] = bnode; in add_ptr_to_bulk_krc_lock()
3503 (*krcp)->bkvhead[idx]->records in add_ptr_to_bulk_krc_lock()
3504 [(*krcp)->bkvhead[idx]->nr_records++] = ptr; in add_ptr_to_bulk_krc_lock()
3524 struct kfree_rcu_cpu *krcp; in kvfree_call_rcu() local
3553 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); in kvfree_call_rcu()
3555 run_page_cache_worker(krcp); in kvfree_call_rcu()
3562 head->next = krcp->head; in kvfree_call_rcu()
3563 krcp->head = head; in kvfree_call_rcu()
3567 WRITE_ONCE(krcp->count, krcp->count + 1); in kvfree_call_rcu()
3571 !krcp->monitor_todo) { in kvfree_call_rcu()
3572 krcp->monitor_todo = true; in kvfree_call_rcu()
3573 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); in kvfree_call_rcu()
3577 krc_this_cpu_unlock(krcp, flags); in kvfree_call_rcu()
3600 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_count() local
3602 count += READ_ONCE(krcp->count); in kfree_rcu_shrink_count()
3603 count += READ_ONCE(krcp->nr_bkv_objs); in kfree_rcu_shrink_count()
3604 atomic_set(&krcp->backoff_page_cache_fill, 1); in kfree_rcu_shrink_count()
3617 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_scan() local
3619 count = krcp->count; in kfree_rcu_shrink_scan()
3620 count += drain_page_cache(krcp); in kfree_rcu_shrink_scan()
3621 kfree_rcu_monitor(&krcp->monitor_work.work); in kfree_rcu_shrink_scan()
3646 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_scheduler_running() local
3648 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_scheduler_running()
3649 if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) || in kfree_rcu_scheduler_running()
3650 krcp->monitor_todo) { in kfree_rcu_scheduler_running()
3651 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_scheduler_running()
3654 krcp->monitor_todo = true; in kfree_rcu_scheduler_running()
3655 schedule_delayed_work_on(cpu, &krcp->monitor_work, in kfree_rcu_scheduler_running()
3657 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_scheduler_running()
4687 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_batch_init() local
4690 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); in kfree_rcu_batch_init()
4691 krcp->krw_arr[i].krcp = krcp; in kfree_rcu_batch_init()
4694 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); in kfree_rcu_batch_init()
4695 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); in kfree_rcu_batch_init()
4696 krcp->initialized = true; in kfree_rcu_batch_init()