Lines Matching refs:sbq

391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,  in sbq_calc_wake_batch()  argument
413 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
414 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
415 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
422 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
428 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, in sbitmap_queue_init_node()
433 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
434 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
435 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
436 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
438 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
439 if (!sbq->ws) { in sbitmap_queue_init_node()
440 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
445 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
446 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); in sbitmap_queue_init_node()
453 static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, in __sbitmap_queue_update_wake_batch() argument
458 if (sbq->wake_batch != wake_batch) { in __sbitmap_queue_update_wake_batch()
459 WRITE_ONCE(sbq->wake_batch, wake_batch); in __sbitmap_queue_update_wake_batch()
467 atomic_set(&sbq->ws[i].wait_cnt, 1); in __sbitmap_queue_update_wake_batch()
471 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_update_wake_batch() argument
476 wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
477 __sbitmap_queue_update_wake_batch(sbq, wake_batch); in sbitmap_queue_update_wake_batch()
480 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_recalculate_wake_batch() argument
485 unsigned int depth = (sbq->sb.depth + users - 1) / users; in sbitmap_queue_recalculate_wake_batch()
487 min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; in sbitmap_queue_recalculate_wake_batch()
491 __sbitmap_queue_update_wake_batch(sbq, wake_batch); in sbitmap_queue_recalculate_wake_batch()
495 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
497 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
498 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
502 int __sbitmap_queue_get(struct sbitmap_queue *sbq) in __sbitmap_queue_get() argument
504 return sbitmap_get(&sbq->sb); in __sbitmap_queue_get()
508 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, in __sbitmap_queue_get_batch() argument
511 struct sbitmap *sb = &sbq->sb; in __sbitmap_queue_get_batch()
562 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, in sbitmap_queue_get_shallow() argument
565 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in sbitmap_queue_get_shallow()
567 return sbitmap_get_shallow(&sbq->sb, shallow_depth); in sbitmap_queue_get_shallow()
571 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, in sbitmap_queue_min_shallow_depth() argument
574 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
575 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
579 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) in sbq_wake_ptr() argument
583 if (!atomic_read(&sbq->ws_active)) in sbq_wake_ptr()
586 wake_index = atomic_read(&sbq->wake_index); in sbq_wake_ptr()
588 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbq_wake_ptr()
591 if (wake_index != atomic_read(&sbq->wake_index)) in sbq_wake_ptr()
592 atomic_set(&sbq->wake_index, wake_index); in sbq_wake_ptr()
602 static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr) in __sbq_wake_up() argument
612 ws = sbq_wake_ptr(sbq); in __sbq_wake_up()
644 wake_batch = READ_ONCE(sbq->wake_batch); in __sbq_wake_up()
669 sbq_index_atomic_inc(&sbq->wake_index); in __sbq_wake_up()
675 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) in sbitmap_queue_wake_up() argument
677 while (__sbq_wake_up(sbq, &nr)) in sbitmap_queue_wake_up()
688 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, in sbitmap_queue_clear_batch() argument
691 struct sbitmap *sb = &sbq->sb; in sbitmap_queue_clear_batch()
717 sbitmap_queue_wake_up(sbq, nr_tags); in sbitmap_queue_clear_batch()
718 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), in sbitmap_queue_clear_batch()
722 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, in sbitmap_queue_clear() argument
736 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
745 sbitmap_queue_wake_up(sbq, 1); in sbitmap_queue_clear()
746 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); in sbitmap_queue_clear()
750 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) in sbitmap_queue_wake_all() argument
759 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
761 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
771 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) in sbitmap_queue_show() argument
776 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
784 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); in sbitmap_queue_show()
788 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
789 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
790 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
794 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
802 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); in sbitmap_queue_show()
803 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
807 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, in sbitmap_add_wait_queue() argument
811 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
812 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
813 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
822 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
823 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
824 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
829 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, in sbitmap_prepare_to_wait() argument
833 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
834 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
835 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
841 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, in sbitmap_finish_wait() argument
845 if (sbq_wait->sbq) { in sbitmap_finish_wait()
846 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
847 sbq_wait->sbq = NULL; in sbitmap_finish_wait()