Lines Matching refs:pd
38 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) in padata_index_to_cpu() argument
42 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
44 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
49 static int padata_cpu_hash(struct parallel_data *pd) in padata_cpu_hash() argument
59 seq_nr = atomic_inc_return(&pd->seq_nr); in padata_cpu_hash()
60 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
62 return padata_index_to_cpu(pd, cpu_index); in padata_cpu_hash()
109 struct parallel_data *pd; in padata_do_parallel() local
113 pd = rcu_dereference_bh(pinst->pd); in padata_do_parallel()
119 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) in padata_do_parallel()
126 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) in padata_do_parallel()
130 atomic_inc(&pd->refcnt); in padata_do_parallel()
131 padata->pd = pd; in padata_do_parallel()
134 target_cpu = padata_cpu_hash(pd); in padata_do_parallel()
136 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
166 static struct padata_priv *padata_get_next(struct parallel_data *pd) in padata_get_next() argument
174 num_cpus = cpumask_weight(pd->cpumask.pcpu); in padata_get_next()
180 next_nr = pd->processed; in padata_get_next()
182 cpu = padata_index_to_cpu(pd, next_index); in padata_get_next()
183 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next()
193 atomic_dec(&pd->reorder_objects); in padata_get_next()
195 pd->processed++; in padata_get_next()
202 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { in padata_get_next()
212 static void padata_reorder(struct parallel_data *pd) in padata_reorder() argument
217 struct padata_instance *pinst = pd->pinst; in padata_reorder()
229 if (!spin_trylock_bh(&pd->lock)) in padata_reorder()
233 padata = padata_get_next(pd); in padata_reorder()
249 del_timer(&pd->timer); in padata_reorder()
250 spin_unlock_bh(&pd->lock); in padata_reorder()
255 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
264 spin_unlock_bh(&pd->lock); in padata_reorder()
271 if (atomic_read(&pd->reorder_objects) in padata_reorder()
273 mod_timer(&pd->timer, jiffies + HZ); in padata_reorder()
275 del_timer(&pd->timer); in padata_reorder()
283 struct parallel_data *pd; in invoke_padata_reorder() local
287 pd = pqueue->pd; in invoke_padata_reorder()
288 padata_reorder(pd); in invoke_padata_reorder()
294 struct parallel_data *pd = from_timer(pd, t, timer); in padata_reorder_timer() local
306 weight = cpumask_weight(pd->cpumask.pcpu); in padata_reorder_timer()
307 target_cpu = padata_index_to_cpu(pd, pd->processed % weight); in padata_reorder_timer()
317 pinst = pd->pinst; in padata_reorder_timer()
318 pqueue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_reorder_timer()
321 padata_reorder(pd); in padata_reorder_timer()
330 struct parallel_data *pd; in padata_serial_worker() local
335 pd = squeue->pd; in padata_serial_worker()
350 atomic_dec(&pd->refcnt); in padata_serial_worker()
367 struct parallel_data *pd; in padata_do_serial() local
370 pd = padata->pd; in padata_do_serial()
383 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial()
386 atomic_inc(&pd->reorder_objects); in padata_do_serial()
396 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); in padata_do_serial()
398 padata_reorder(pd); in padata_do_serial()
402 static int padata_setup_cpumasks(struct parallel_data *pd, in padata_setup_cpumasks() argument
406 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_setup_cpumasks()
409 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); in padata_setup_cpumasks()
410 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { in padata_setup_cpumasks()
411 free_cpumask_var(pd->cpumask.pcpu); in padata_setup_cpumasks()
415 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); in padata_setup_cpumasks()
426 static void padata_init_squeues(struct parallel_data *pd) in padata_init_squeues() argument
431 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues()
432 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
433 squeue->pd = pd; in padata_init_squeues()
440 static void padata_init_pqueues(struct parallel_data *pd) in padata_init_pqueues() argument
447 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues()
449 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { in padata_init_pqueues()
454 pqueue->pd = pd; in padata_init_pqueues()
471 struct parallel_data *pd; in padata_alloc_pd() local
473 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); in padata_alloc_pd()
474 if (!pd) in padata_alloc_pd()
477 pd->pqueue = alloc_percpu(struct padata_parallel_queue); in padata_alloc_pd()
478 if (!pd->pqueue) in padata_alloc_pd()
481 pd->squeue = alloc_percpu(struct padata_serial_queue); in padata_alloc_pd()
482 if (!pd->squeue) in padata_alloc_pd()
484 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) in padata_alloc_pd()
487 padata_init_pqueues(pd); in padata_alloc_pd()
488 padata_init_squeues(pd); in padata_alloc_pd()
489 timer_setup(&pd->timer, padata_reorder_timer, 0); in padata_alloc_pd()
490 atomic_set(&pd->seq_nr, -1); in padata_alloc_pd()
491 atomic_set(&pd->reorder_objects, 0); in padata_alloc_pd()
492 atomic_set(&pd->refcnt, 0); in padata_alloc_pd()
493 pd->pinst = pinst; in padata_alloc_pd()
494 spin_lock_init(&pd->lock); in padata_alloc_pd()
496 return pd; in padata_alloc_pd()
499 free_percpu(pd->squeue); in padata_alloc_pd()
501 free_percpu(pd->pqueue); in padata_alloc_pd()
503 kfree(pd); in padata_alloc_pd()
508 static void padata_free_pd(struct parallel_data *pd) in padata_free_pd() argument
510 free_cpumask_var(pd->cpumask.pcpu); in padata_free_pd()
511 free_cpumask_var(pd->cpumask.cbcpu); in padata_free_pd()
512 free_percpu(pd->pqueue); in padata_free_pd()
513 free_percpu(pd->squeue); in padata_free_pd()
514 kfree(pd); in padata_free_pd()
518 static void padata_flush_queues(struct parallel_data *pd) in padata_flush_queues() argument
524 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_flush_queues()
525 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues()
529 del_timer_sync(&pd->timer); in padata_flush_queues()
531 if (atomic_read(&pd->reorder_objects)) in padata_flush_queues()
532 padata_reorder(pd); in padata_flush_queues()
534 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_flush_queues()
535 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
539 BUG_ON(atomic_read(&pd->refcnt) != 0); in padata_flush_queues()
557 padata_flush_queues(pinst->pd); in __padata_stop()
565 struct parallel_data *pd_old = pinst->pd; in padata_replace()
570 rcu_assign_pointer(pinst->pd, pd_new); in padata_replace()
640 struct parallel_data *pd; in __padata_set_cpumasks() local
653 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in __padata_set_cpumasks()
654 if (!pd) in __padata_set_cpumasks()
660 padata_replace(pinst, pd); in __padata_set_cpumasks()
748 struct parallel_data *pd; in __padata_add_cpu() local
751 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_add_cpu()
753 if (!pd) in __padata_add_cpu()
756 padata_replace(pinst, pd); in __padata_add_cpu()
768 struct parallel_data *pd = NULL; in __padata_remove_cpu() local
776 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_remove_cpu()
778 if (!pd) in __padata_remove_cpu()
781 padata_replace(pinst, pd); in __padata_remove_cpu()
783 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); in __padata_remove_cpu()
784 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); in __padata_remove_cpu()
871 padata_free_pd(pinst->pd); in __padata_free()
1017 struct parallel_data *pd = NULL; in padata_alloc() local
1033 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in padata_alloc()
1034 if (!pd) in padata_alloc()
1037 rcu_assign_pointer(pinst->pd, pd); in padata_alloc()