Lines Matching refs:disks
36 const unsigned char *scfs, int disks, in do_async_gen_syndrome() argument
46 int src_cnt = disks - 2; in do_async_gen_syndrome()
76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome()
77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome()
107 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument
112 int start = -1, stop = disks - 3; in do_sync_gen_syndrome()
119 for (i = 0; i < disks; i++) { in do_sync_gen_syndrome()
121 BUG_ON(i > disks - 3); /* P or Q can't be zero */ in do_sync_gen_syndrome()
125 if (i < disks - 2) { in do_sync_gen_syndrome()
135 raid6_call.xor_syndrome(disks, start, stop, len, srcs); in do_sync_gen_syndrome()
137 raid6_call.gen_syndrome(disks, len, srcs); in do_sync_gen_syndrome()
163 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument
166 int src_cnt = disks - 2; in async_gen_syndrome()
168 &P(blocks, disks), 2, in async_gen_syndrome()
173 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); in async_gen_syndrome()
176 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome()
190 __func__, disks, len); in async_gen_syndrome()
211 if (P(blocks, disks)) in async_gen_syndrome()
212 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), in async_gen_syndrome()
220 if (Q(blocks, disks)) in async_gen_syndrome()
221 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), in async_gen_syndrome()
236 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); in async_gen_syndrome()
241 if (!P(blocks, disks)) { in async_gen_syndrome()
242 P(blocks, disks) = pq_scribble_page; in async_gen_syndrome()
245 if (!Q(blocks, disks)) { in async_gen_syndrome()
246 Q(blocks, disks) = pq_scribble_page; in async_gen_syndrome()
249 do_sync_gen_syndrome(blocks, offset, disks, len, submit); in async_gen_syndrome()
256 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) in pq_val_chan() argument
262 disks, len); in pq_val_chan()
281 async_syndrome_val(struct page **blocks, unsigned int offset, int disks, in async_syndrome_val() argument
285 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); in async_syndrome_val()
292 BUG_ON(disks < 4 || disks > MAX_DISKS); in async_syndrome_val()
295 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_syndrome_val()
297 if (unmap && disks <= dma_maxpq(device, 0) && in async_syndrome_val()
304 __func__, disks, len); in async_syndrome_val()
307 for (i = 0; i < disks-2; i++) in async_syndrome_val()
318 if (!P(blocks, disks)) { in async_syndrome_val()
322 pq[0] = dma_map_page(dev, P(blocks, disks), in async_syndrome_val()
328 if (!Q(blocks, disks)) { in async_syndrome_val()
332 pq[1] = dma_map_page(dev, Q(blocks, disks), in async_syndrome_val()
357 struct page *p_src = P(blocks, disks); in async_syndrome_val()
358 struct page *q_src = Q(blocks, disks); in async_syndrome_val()
366 __func__, disks, len); in async_syndrome_val()
384 tx = async_xor(spare, blocks, offset, disks-2, len, submit); in async_syndrome_val()
392 P(blocks, disks) = NULL; in async_syndrome_val()
393 Q(blocks, disks) = spare; in async_syndrome_val()
395 tx = async_gen_syndrome(blocks, offset, disks, len, submit); in async_syndrome_val()
403 P(blocks, disks) = p_src; in async_syndrome_val()
404 Q(blocks, disks) = q_src; in async_syndrome_val()