Lines Matching refs:percpu
1398 struct raid5_percpu *percpu, int i) in to_addr_conv() argument
1402 addr = flex_array_get(percpu->scribble, i); in to_addr_conv()
1407 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) in to_addr_page() argument
1411 addr = flex_array_get(percpu->scribble, i); in to_addr_page()
1416 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1419 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5()
1441 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1498 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1501 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_1()
1535 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1548 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1556 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1567 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_2()
1610 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1634 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1641 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1648 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1679 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1683 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_prexor5()
1704 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1711 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1714 struct page **blocks = to_addr_page(percpu, 0); in ops_run_prexor6()
1724 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1849 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1880 xor_srcs = to_addr_page(percpu, j); in ops_run_reconstruct5()
1916 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1920 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1936 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1964 blocks = to_addr_page(percpu, j); in ops_run_reconstruct6()
1982 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1985 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2007 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2013 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_check_p()
2033 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2042 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2044 struct page **srcs = to_addr_page(percpu, 0); in ops_run_check_pq()
2058 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2060 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
2069 struct raid5_percpu *percpu; in raid_run_ops() local
2073 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2081 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2084 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2086 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2095 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2097 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2101 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2110 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2112 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2117 ops_run_check_p(sh, percpu); in raid_run_ops()
2119 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2121 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2274 struct raid5_percpu *percpu; in resize_chunks() local
2277 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2283 flex_array_free(percpu->scribble); in resize_chunks()
2284 percpu->scribble = scribble; in resize_chunks()
6726 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
6728 safe_put_page(percpu->spare_page); in free_scratch_buffer()
6729 if (percpu->scribble) in free_scratch_buffer()
6730 flex_array_free(percpu->scribble); in free_scratch_buffer()
6731 percpu->spare_page = NULL; in free_scratch_buffer()
6732 percpu->scribble = NULL; in free_scratch_buffer()
6735 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
6737 if (conf->level == 6 && !percpu->spare_page) in alloc_scratch_buffer()
6738 percpu->spare_page = alloc_page(GFP_KERNEL); in alloc_scratch_buffer()
6739 if (!percpu->scribble) in alloc_scratch_buffer()
6740 percpu->scribble = scribble_alloc(max(conf->raid_disks, in alloc_scratch_buffer()
6747 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { in alloc_scratch_buffer()
6748 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
6759 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
6765 if (!conf->percpu) in raid5_free_percpu()
6769 free_percpu(conf->percpu); in raid5_free_percpu()
6795 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare() local
6797 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
6809 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
6810 if (!conf->percpu) in raid5_alloc_percpu()