Searched refs:backoffs (Results 1 – 4 of 4) sorted by relevance
85 int backoffs; in jffs2_rtime_decompress() local91 backoffs = positions[value]; in jffs2_rtime_decompress()95 if (backoffs + repeat >= outpos) { in jffs2_rtime_decompress()97 cpage_out[outpos++] = cpage_out[backoffs++]; in jffs2_rtime_decompress()101 memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); in jffs2_rtime_decompress()
292 struct rb_root backoffs; member
1535 spg->backoffs = RB_ROOT; in alloc_spg_mapping()1542 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); in free_spg_mapping()1788 while (!RB_EMPTY_ROOT(&spg->backoffs)) { in DEFINE_RB_FUNCS()1790 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS()1793 erase_backoff(&spg->backoffs, backoff); in DEFINE_RB_FUNCS()1838 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); in should_plug_request()4136 insert_backoff(&spg->backoffs, backoff); in handle_backoff_block()4192 erase_backoff(&spg->backoffs, backoff); in handle_backoff_unblock()4196 if (RB_EMPTY_ROOT(&spg->backoffs)) { in handle_backoff_unblock()
47 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs