Lines Matching refs:sbq

338 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,  in sbq_calc_wake_batch()  argument
360 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
361 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
362 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
369 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
375 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); in sbitmap_queue_init_node()
379 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); in sbitmap_queue_init_node()
380 if (!sbq->alloc_hint) { in sbitmap_queue_init_node()
381 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
387 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; in sbitmap_queue_init_node()
390 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
391 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
392 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
393 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
395 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
396 if (!sbq->ws) { in sbitmap_queue_init_node()
397 free_percpu(sbq->alloc_hint); in sbitmap_queue_init_node()
398 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
403 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
404 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); in sbitmap_queue_init_node()
407 sbq->round_robin = round_robin; in sbitmap_queue_init_node()
412 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_update_wake_batch() argument
415 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
418 if (sbq->wake_batch != wake_batch) { in sbitmap_queue_update_wake_batch()
419 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
427 atomic_set(&sbq->ws[i].wait_cnt, 1); in sbitmap_queue_update_wake_batch()
431 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
433 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
434 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
438 int __sbitmap_queue_get(struct sbitmap_queue *sbq) in __sbitmap_queue_get() argument
443 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get()
444 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get()
447 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
449 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); in __sbitmap_queue_get()
453 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get()
454 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get()
459 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
466 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, in __sbitmap_queue_get_shallow() argument
472 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in __sbitmap_queue_get_shallow()
474 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get_shallow()
475 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get_shallow()
478 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
480 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); in __sbitmap_queue_get_shallow()
484 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get_shallow()
485 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get_shallow()
490 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
497 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, in sbitmap_queue_min_shallow_depth() argument
500 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
501 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
505 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) in sbq_wake_ptr() argument
509 if (!atomic_read(&sbq->ws_active)) in sbq_wake_ptr()
512 wake_index = atomic_read(&sbq->wake_index); in sbq_wake_ptr()
514 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbq_wake_ptr()
517 if (wake_index != atomic_read(&sbq->wake_index)) in sbq_wake_ptr()
518 atomic_set(&sbq->wake_index, wake_index); in sbq_wake_ptr()
528 static bool __sbq_wake_up(struct sbitmap_queue *sbq) in __sbq_wake_up() argument
534 ws = sbq_wake_ptr(sbq); in __sbq_wake_up()
542 wake_batch = READ_ONCE(sbq->wake_batch); in __sbq_wake_up()
558 sbq_index_atomic_inc(&sbq->wake_index); in __sbq_wake_up()
569 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) in sbitmap_queue_wake_up() argument
571 while (__sbq_wake_up(sbq)) in sbitmap_queue_wake_up()
576 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, in sbitmap_queue_clear() argument
590 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
599 sbitmap_queue_wake_up(sbq); in sbitmap_queue_clear()
601 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) in sbitmap_queue_clear()
602 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; in sbitmap_queue_clear()
606 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) in sbitmap_queue_wake_all() argument
615 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
617 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
627 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) in sbitmap_queue_show() argument
632 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
640 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); in sbitmap_queue_show()
644 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
645 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
646 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
650 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
658 seq_printf(m, "round_robin=%d\n", sbq->round_robin); in sbitmap_queue_show()
659 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
663 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, in sbitmap_add_wait_queue() argument
667 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
668 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
669 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
678 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
679 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
680 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
685 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, in sbitmap_prepare_to_wait() argument
689 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
690 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
691 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
697 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, in sbitmap_finish_wait() argument
701 if (sbq_wait->sbq) { in sbitmap_finish_wait()
702 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
703 sbq_wait->sbq = NULL; in sbitmap_finish_wait()