Lines Matching full:rb

17  * pblk-rb.c - pblk's write buffer
26 static void pblk_rb_data_free(struct pblk_rb *rb) in pblk_rb_data_free() argument
31 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free()
39 void pblk_rb_free(struct pblk_rb *rb) in pblk_rb_free() argument
41 pblk_rb_data_free(rb); in pblk_rb_free()
42 vfree(rb->entries); in pblk_rb_free()
72 int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold, in pblk_rb_init() argument
75 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_init()
92 rb->entries = entries; in pblk_rb_init()
93 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init()
94 rb->nr_entries = (1 << power_size); in pblk_rb_init()
95 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; in pblk_rb_init()
96 rb->back_thres = threshold; in pblk_rb_init()
97 rb->flush_point = EMPTY_ENTRY; in pblk_rb_init()
99 spin_lock_init(&rb->w_lock); in pblk_rb_init()
100 spin_lock_init(&rb->s_lock); in pblk_rb_init()
102 INIT_LIST_HEAD(&rb->pages); in pblk_rb_init()
131 pblk_rb_data_free(rb); in pblk_rb_init()
138 entry = &rb->entries[init_entry]; in pblk_rb_init()
145 entry = &rb->entries[init_entry]; in pblk_rb_init()
147 entry->data = kaddr + (i * rb->seg_size); in pblk_rb_init()
152 list_add_tail(&page_set->list, &rb->pages); in pblk_rb_init()
158 atomic_set(&rb->inflight_flush_point, 0); in pblk_rb_init()
165 pblk_rl_init(&pblk->rl, rb->nr_entries, threshold); in pblk_rb_init()
185 #define pblk_rb_ring_space(rb, head, tail, size) \ argument
192 static unsigned int pblk_rb_space(struct pblk_rb *rb) in pblk_rb_space() argument
194 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space()
195 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space()
197 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
200 unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p, in pblk_rb_ptr_wrap() argument
203 return (p + nr_entries) & (rb->nr_entries - 1); in pblk_rb_ptr_wrap()
210 unsigned int pblk_rb_read_count(struct pblk_rb *rb) in pblk_rb_read_count() argument
212 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count()
213 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count()
215 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count()
218 unsigned int pblk_rb_sync_count(struct pblk_rb *rb) in pblk_rb_sync_count() argument
220 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count()
221 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count()
223 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count()
226 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_read_commit() argument
230 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit()
232 smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries)); in pblk_rb_read_commit()
237 static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update) in __pblk_rb_update_l2p() argument
239 struct pblk *pblk = container_of(rb, struct pblk, rwb); in __pblk_rb_update_l2p()
248 entry = &rb->entries[rb->l2p_update]; in __pblk_rb_update_l2p()
266 rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1); in __pblk_rb_update_l2p()
279 static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_update_l2p() argument
285 lockdep_assert_held(&rb->w_lock); in pblk_rb_update_l2p()
288 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries); in pblk_rb_update_l2p()
293 /* l2p_update used exclusively under rb->w_lock */ in pblk_rb_update_l2p()
294 ret = __pblk_rb_update_l2p(rb, count); in pblk_rb_update_l2p()
305 void pblk_rb_sync_l2p(struct pblk_rb *rb) in pblk_rb_sync_l2p() argument
310 spin_lock(&rb->w_lock); in pblk_rb_sync_l2p()
313 sync = smp_load_acquire(&rb->sync); in pblk_rb_sync_l2p()
315 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); in pblk_rb_sync_l2p()
316 __pblk_rb_update_l2p(rb, to_update); in pblk_rb_sync_l2p()
318 spin_unlock(&rb->w_lock); in pblk_rb_sync_l2p()
327 static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data, in __pblk_rb_write_entry() argument
331 memcpy(entry->data, data, rb->seg_size); in __pblk_rb_write_entry()
337 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data, in pblk_rb_write_entry_user() argument
340 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_write_entry_user()
344 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_user()
351 __pblk_rb_write_entry(rb, data, w_ctx, entry); in pblk_rb_write_entry_user()
360 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data, in pblk_rb_write_entry_gc() argument
364 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_write_entry_gc()
368 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_gc()
375 __pblk_rb_write_entry(rb, data, w_ctx, entry); in pblk_rb_write_entry_gc()
386 static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio, in pblk_rb_flush_point_set() argument
392 pblk_rb_sync_init(rb, NULL); in pblk_rb_flush_point_set()
393 sync = READ_ONCE(rb->sync); in pblk_rb_flush_point_set()
396 pblk_rb_sync_end(rb, NULL); in pblk_rb_flush_point_set()
401 atomic_inc(&rb->inflight_flush_point); in pblk_rb_flush_point_set()
404 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); in pblk_rb_flush_point_set()
405 entry = &rb->entries[flush_point]; in pblk_rb_flush_point_set()
408 smp_store_release(&rb->flush_point, flush_point); in pblk_rb_flush_point_set()
413 pblk_rb_sync_end(rb, NULL); in pblk_rb_flush_point_set()
418 static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, in __pblk_rb_may_write() argument
425 sync = READ_ONCE(rb->sync); in __pblk_rb_may_write()
426 mem = READ_ONCE(rb->mem); in __pblk_rb_may_write()
428 threshold = nr_entries + rb->back_thres; in __pblk_rb_may_write()
430 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold) in __pblk_rb_may_write()
433 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync)) in __pblk_rb_may_write()
441 static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write() argument
444 if (!__pblk_rb_may_write(rb, nr_entries, pos)) in pblk_rb_may_write()
448 smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries)); in pblk_rb_may_write()
452 void pblk_rb_flush(struct pblk_rb *rb) in pblk_rb_flush() argument
454 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_flush()
455 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_flush()
457 if (pblk_rb_flush_point_set(rb, NULL, mem)) in pblk_rb_flush()
463 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write_flush() argument
469 if (!__pblk_rb_may_write(rb, nr_entries, pos)) in pblk_rb_may_write_flush()
472 mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries); in pblk_rb_may_write_flush()
476 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_flush()
484 smp_store_release(&rb->mem, mem); in pblk_rb_may_write_flush()
494 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio, in pblk_rb_may_write_user() argument
497 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_user()
500 spin_lock(&rb->w_lock); in pblk_rb_may_write_user()
503 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
507 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) { in pblk_rb_may_write_user()
508 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
513 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
521 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write_gc() argument
524 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_gc()
526 spin_lock(&rb->w_lock); in pblk_rb_may_write_gc()
528 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
532 if (!pblk_rb_may_write(rb, nr_entries, pos)) { in pblk_rb_may_write_gc()
533 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
538 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
544 * Read available entries on rb and add them to the given bio. To avoid a memory
550 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, in pblk_rb_read_to_bio() argument
554 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_read_to_bio()
577 entry = &rb->entries[pos]; in pblk_rb_read_to_bio()
599 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != in pblk_rb_read_to_bio()
600 rb->seg_size) { in pblk_rb_read_to_bio()
615 pos = pblk_rb_ptr_wrap(rb, pos, 1); in pblk_rb_read_to_bio()
644 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, in pblk_rb_copy_to_bio() argument
647 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_copy_to_bio()
659 BUG_ON(pos >= rb->nr_entries); in pblk_rb_copy_to_bio()
661 entry = &rb->entries[pos]; in pblk_rb_copy_to_bio()
665 spin_lock(&rb->w_lock); in pblk_rb_copy_to_bio()
677 memcpy(data, entry->data, rb->seg_size); in pblk_rb_copy_to_bio()
680 spin_unlock(&rb->w_lock); in pblk_rb_copy_to_bio()
684 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos) in pblk_rb_w_ctx() argument
686 unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0); in pblk_rb_w_ctx()
688 return &rb->entries[entry].w_ctx; in pblk_rb_w_ctx()
691 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags) in pblk_rb_sync_init() argument
692 __acquires(&rb->s_lock) in pblk_rb_sync_init()
695 spin_lock_irqsave(&rb->s_lock, *flags); in pblk_rb_sync_init()
697 spin_lock_irq(&rb->s_lock); in pblk_rb_sync_init()
699 return rb->sync; in pblk_rb_sync_init()
702 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags) in pblk_rb_sync_end() argument
703 __releases(&rb->s_lock) in pblk_rb_sync_end()
705 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_end()
708 spin_unlock_irqrestore(&rb->s_lock, *flags); in pblk_rb_sync_end()
710 spin_unlock_irq(&rb->s_lock); in pblk_rb_sync_end()
713 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_sync_advance() argument
716 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_advance()
718 sync = READ_ONCE(rb->sync); in pblk_rb_sync_advance()
719 flush_point = READ_ONCE(rb->flush_point); in pblk_rb_sync_advance()
725 rb->nr_entries); in pblk_rb_sync_advance()
728 smp_store_release(&rb->flush_point, EMPTY_ENTRY); in pblk_rb_sync_advance()
732 sync = pblk_rb_ptr_wrap(rb, sync, nr_entries); in pblk_rb_sync_advance()
735 smp_store_release(&rb->sync, sync); in pblk_rb_sync_advance()
741 unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb) in pblk_rb_flush_point_count() argument
747 flush_point = smp_load_acquire(&rb->flush_point); in pblk_rb_flush_point_count()
752 sync = smp_load_acquire(&rb->sync); in pblk_rb_flush_point_count()
754 subm = READ_ONCE(rb->subm); in pblk_rb_flush_point_count()
755 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_flush_point_count()
758 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1; in pblk_rb_flush_point_count()
763 int pblk_rb_tear_down_check(struct pblk_rb *rb) in pblk_rb_tear_down_check() argument
769 spin_lock(&rb->w_lock); in pblk_rb_tear_down_check()
770 spin_lock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
772 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) && in pblk_rb_tear_down_check()
773 (rb->sync == rb->l2p_update) && in pblk_rb_tear_down_check()
774 (rb->flush_point == EMPTY_ENTRY)) { in pblk_rb_tear_down_check()
778 if (!rb->entries) { in pblk_rb_tear_down_check()
783 for (i = 0; i < rb->nr_entries; i++) { in pblk_rb_tear_down_check()
784 entry = &rb->entries[i]; in pblk_rb_tear_down_check()
793 spin_unlock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
794 spin_unlock(&rb->w_lock); in pblk_rb_tear_down_check()
799 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos) in pblk_rb_wrap_pos() argument
801 return (pos & (rb->nr_entries - 1)); in pblk_rb_wrap_pos()
804 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos) in pblk_rb_pos_oob() argument
806 return (pos >= rb->nr_entries); in pblk_rb_pos_oob()
809 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf) in pblk_rb_sysfs() argument
811 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_sysfs()
816 spin_lock_irq(&rb->s_lock); in pblk_rb_sysfs()
819 spin_unlock_irq(&rb->s_lock); in pblk_rb_sysfs()
821 if (rb->flush_point != EMPTY_ENTRY) in pblk_rb_sysfs()
824 rb->nr_entries, in pblk_rb_sysfs()
825 rb->mem, in pblk_rb_sysfs()
826 rb->subm, in pblk_rb_sysfs()
827 rb->sync, in pblk_rb_sysfs()
828 rb->l2p_update, in pblk_rb_sysfs()
830 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
834 rb->flush_point, in pblk_rb_sysfs()
835 pblk_rb_read_count(rb), in pblk_rb_sysfs()
836 pblk_rb_space(rb), in pblk_rb_sysfs()
837 pblk_rb_flush_point_count(rb), in pblk_rb_sysfs()
842 rb->nr_entries, in pblk_rb_sysfs()
843 rb->mem, in pblk_rb_sysfs()
844 rb->subm, in pblk_rb_sysfs()
845 rb->sync, in pblk_rb_sysfs()
846 rb->l2p_update, in pblk_rb_sysfs()
848 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
852 pblk_rb_read_count(rb), in pblk_rb_sysfs()
853 pblk_rb_space(rb), in pblk_rb_sysfs()
854 pblk_rb_flush_point_count(rb), in pblk_rb_sysfs()