Searched refs:backoffs (Results 1 – 4 of 4) sorted by relevance
85 int backoffs; in jffs2_rtime_decompress() local91 backoffs = positions[value]; in jffs2_rtime_decompress()95 if (backoffs + repeat >= outpos) { in jffs2_rtime_decompress()97 cpage_out[outpos++] = cpage_out[backoffs++]; in jffs2_rtime_decompress()101 memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); in jffs2_rtime_decompress()
304 struct rb_root backoffs; member
1683 spg->backoffs = RB_ROOT; in alloc_spg_mapping()1690 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); in free_spg_mapping()1936 while (!RB_EMPTY_ROOT(&spg->backoffs)) { in DEFINE_RB_FUNCS()1938 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS()1941 erase_backoff(&spg->backoffs, backoff); in DEFINE_RB_FUNCS()1986 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); in should_plug_request()4326 insert_backoff(&spg->backoffs, backoff); in handle_backoff_block()4382 erase_backoff(&spg->backoffs, backoff); in handle_backoff_unblock()4386 if (RB_EMPTY_ROOT(&spg->backoffs)) { in handle_backoff_unblock()
48 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs