1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xor offload engine api
4 *
5 * Copyright © 2006, Intel Corporation.
6 *
7 * Dan Williams <dan.j.williams@intel.com>
8 *
9 * with architecture considerations by:
10 * Neil Brown <neilb@suse.de>
11 * Jeff Garzik <jeff@garzik.org>
12 */
13 #include <linux/kernel.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/raid/xor.h>
19 #include <linux/async_tx.h>
20
21 /* do_async_xor - dma map the pages and perform the xor with an engine */
22 static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan * chan,struct dmaengine_unmap_data * unmap,struct async_submit_ctl * submit)23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
24 struct async_submit_ctl *submit)
25 {
26 struct dma_device *dma = chan->device;
27 struct dma_async_tx_descriptor *tx = NULL;
28 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
29 void *cb_param_orig = submit->cb_param;
30 enum async_tx_flags flags_orig = submit->flags;
31 enum dma_ctrl_flags dma_flags = 0;
32 int src_cnt = unmap->to_cnt;
33 int xor_src_cnt;
34 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
35 dma_addr_t *src_list = unmap->addr;
36
37 while (src_cnt) {
38 dma_addr_t tmp;
39
40 submit->flags = flags_orig;
41 xor_src_cnt = min(src_cnt, (int)dma->max_xor);
42 /* if we are submitting additional xors, leave the chain open
43 * and clear the callback parameters
44 */
45 if (src_cnt > xor_src_cnt) {
46 submit->flags &= ~ASYNC_TX_ACK;
47 submit->flags |= ASYNC_TX_FENCE;
48 submit->cb_fn = NULL;
49 submit->cb_param = NULL;
50 } else {
51 submit->cb_fn = cb_fn_orig;
52 submit->cb_param = cb_param_orig;
53 }
54 if (submit->cb_fn)
55 dma_flags |= DMA_PREP_INTERRUPT;
56 if (submit->flags & ASYNC_TX_FENCE)
57 dma_flags |= DMA_PREP_FENCE;
58
59 /* Drivers force forward progress in case they can not provide a
60 * descriptor
61 */
62 tmp = src_list[0];
63 if (src_list > unmap->addr)
64 src_list[0] = dma_dest;
65 tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
66 xor_src_cnt, unmap->len,
67 dma_flags);
68
69 if (unlikely(!tx))
70 async_tx_quiesce(&submit->depend_tx);
71
72 /* spin wait for the preceding transactions to complete */
73 while (unlikely(!tx)) {
74 dma_async_issue_pending(chan);
75 tx = dma->device_prep_dma_xor(chan, dma_dest,
76 src_list,
77 xor_src_cnt, unmap->len,
78 dma_flags);
79 }
80 src_list[0] = tmp;
81
82 dma_set_unmap(tx, unmap);
83 async_tx_submit(chan, tx, submit);
84 submit->depend_tx = tx;
85
86 if (src_cnt > xor_src_cnt) {
87 /* drop completed sources */
88 src_cnt -= xor_src_cnt;
89 /* use the intermediate result a source */
90 src_cnt++;
91 src_list += xor_src_cnt - 1;
92 } else
93 break;
94 }
95
96 return tx;
97 }
98
99 static void
do_sync_xor_offs(struct page * dest,unsigned int offset,struct page ** src_list,unsigned int * src_offs,int src_cnt,size_t len,struct async_submit_ctl * submit)100 do_sync_xor_offs(struct page *dest, unsigned int offset,
101 struct page **src_list, unsigned int *src_offs,
102 int src_cnt, size_t len, struct async_submit_ctl *submit)
103 {
104 int i;
105 int xor_src_cnt = 0;
106 int src_off = 0;
107 void *dest_buf;
108 void **srcs;
109
110 if (submit->scribble)
111 srcs = submit->scribble;
112 else
113 srcs = (void **) src_list;
114
115 /* convert to buffer pointers */
116 for (i = 0; i < src_cnt; i++)
117 if (src_list[i])
118 srcs[xor_src_cnt++] = page_address(src_list[i]) +
119 (src_offs ? src_offs[i] : offset);
120 src_cnt = xor_src_cnt;
121 /* set destination address */
122 dest_buf = page_address(dest) + offset;
123
124 if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
125 memset(dest_buf, 0, len);
126
127 while (src_cnt > 0) {
128 /* process up to 'MAX_XOR_BLOCKS' sources */
129 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
130 xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
131
132 /* drop completed sources */
133 src_cnt -= xor_src_cnt;
134 src_off += xor_src_cnt;
135 }
136
137 async_tx_sync_epilog(submit);
138 }
139
140 static inline bool
dma_xor_aligned_offsets(struct dma_device * device,unsigned int offset,unsigned int * src_offs,int src_cnt,int len)141 dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset,
142 unsigned int *src_offs, int src_cnt, int len)
143 {
144 int i;
145
146 if (!is_dma_xor_aligned(device, offset, 0, len))
147 return false;
148
149 if (!src_offs)
150 return true;
151
152 for (i = 0; i < src_cnt; i++) {
153 if (!is_dma_xor_aligned(device, src_offs[i], 0, len))
154 return false;
155 }
156 return true;
157 }
158
159 /**
160 * async_xor_offs - attempt to xor a set of blocks with a dma engine.
161 * @dest: destination page
162 * @offset: dst offset to start transaction
163 * @src_list: array of source pages
164 * @src_offs: array of source pages offset, NULL means common src/dst offset
165 * @src_cnt: number of source pages
166 * @len: length in bytes
167 * @submit: submission / completion modifiers
168 *
169 * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
170 *
171 * xor_blocks always uses the dest as a source so the
172 * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
173 * the calculation. The assumption with dma eninges is that they only
174 * use the destination buffer as a source when it is explicity specified
175 * in the source list.
176 *
177 * src_list note: if the dest is also a source it must be at index zero.
178 * The contents of this array will be overwritten if a scribble region
179 * is not specified.
180 */
181 struct dma_async_tx_descriptor *
async_xor_offs(struct page * dest,unsigned int offset,struct page ** src_list,unsigned int * src_offs,int src_cnt,size_t len,struct async_submit_ctl * submit)182 async_xor_offs(struct page *dest, unsigned int offset,
183 struct page **src_list, unsigned int *src_offs,
184 int src_cnt, size_t len, struct async_submit_ctl *submit)
185 {
186 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
187 &dest, 1, src_list,
188 src_cnt, len);
189 struct dma_device *device = chan ? chan->device : NULL;
190 struct dmaengine_unmap_data *unmap = NULL;
191
192 BUG_ON(src_cnt <= 1);
193
194 if (device)
195 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
196
197 if (unmap && dma_xor_aligned_offsets(device, offset,
198 src_offs, src_cnt, len)) {
199 struct dma_async_tx_descriptor *tx;
200 int i, j;
201
202 /* run the xor asynchronously */
203 pr_debug("%s (async): len: %zu\n", __func__, len);
204
205 unmap->len = len;
206 for (i = 0, j = 0; i < src_cnt; i++) {
207 if (!src_list[i])
208 continue;
209 unmap->to_cnt++;
210 unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
211 src_offs ? src_offs[i] : offset,
212 len, DMA_TO_DEVICE);
213 }
214
215 /* map it bidirectional as it may be re-used as a source */
216 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
217 DMA_BIDIRECTIONAL);
218 unmap->bidi_cnt = 1;
219
220 tx = do_async_xor(chan, unmap, submit);
221 dmaengine_unmap_put(unmap);
222 return tx;
223 } else {
224 dmaengine_unmap_put(unmap);
225 /* run the xor synchronously */
226 pr_debug("%s (sync): len: %zu\n", __func__, len);
227 WARN_ONCE(chan, "%s: no space for dma address conversion\n",
228 __func__);
229
230 /* in the sync case the dest is an implied source
231 * (assumes the dest is the first source)
232 */
233 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
234 src_cnt--;
235 src_list++;
236 }
237
238 /* wait for any prerequisite operations */
239 async_tx_quiesce(&submit->depend_tx);
240
241 do_sync_xor_offs(dest, offset, src_list, src_offs,
242 src_cnt, len, submit);
243
244 return NULL;
245 }
246 }
247 EXPORT_SYMBOL_GPL(async_xor_offs);
248
249 /**
250 * async_xor - attempt to xor a set of blocks with a dma engine.
251 * @dest: destination page
252 * @src_list: array of source pages
253 * @offset: common src/dst offset to start transaction
254 * @src_cnt: number of source pages
255 * @len: length in bytes
256 * @submit: submission / completion modifiers
257 *
258 * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
259 *
260 * xor_blocks always uses the dest as a source so the
261 * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
262 * the calculation. The assumption with dma eninges is that they only
263 * use the destination buffer as a source when it is explicity specified
264 * in the source list.
265 *
266 * src_list note: if the dest is also a source it must be at index zero.
267 * The contents of this array will be overwritten if a scribble region
268 * is not specified.
269 */
270 struct dma_async_tx_descriptor *
async_xor(struct page * dest,struct page ** src_list,unsigned int offset,int src_cnt,size_t len,struct async_submit_ctl * submit)271 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
272 int src_cnt, size_t len, struct async_submit_ctl *submit)
273 {
274 return async_xor_offs(dest, offset, src_list, NULL,
275 src_cnt, len, submit);
276 }
277 EXPORT_SYMBOL_GPL(async_xor);
278
page_is_zero(struct page * p,unsigned int offset,size_t len)279 static int page_is_zero(struct page *p, unsigned int offset, size_t len)
280 {
281 return !memchr_inv(page_address(p) + offset, 0, len);
282 }
283
284 static inline struct dma_chan *
xor_val_chan(struct async_submit_ctl * submit,struct page * dest,struct page ** src_list,int src_cnt,size_t len)285 xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
286 struct page **src_list, int src_cnt, size_t len)
287 {
288 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
289 return NULL;
290 #endif
291 return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
292 src_cnt, len);
293 }
294
295 /**
296 * async_xor_val_offs - attempt a xor parity check with a dma engine.
297 * @dest: destination page used if the xor is performed synchronously
298 * @offset: des offset in pages to start transaction
299 * @src_list: array of source pages
300 * @src_offs: array of source pages offset, NULL means common src/det offset
301 * @src_cnt: number of source pages
302 * @len: length in bytes
303 * @result: 0 if sum == 0 else non-zero
304 * @submit: submission / completion modifiers
305 *
306 * honored flags: ASYNC_TX_ACK
307 *
308 * src_list note: if the dest is also a source it must be at index zero.
309 * The contents of this array will be overwritten if a scribble region
310 * is not specified.
311 */
312 struct dma_async_tx_descriptor *
async_xor_val_offs(struct page * dest,unsigned int offset,struct page ** src_list,unsigned int * src_offs,int src_cnt,size_t len,enum sum_check_flags * result,struct async_submit_ctl * submit)313 async_xor_val_offs(struct page *dest, unsigned int offset,
314 struct page **src_list, unsigned int *src_offs,
315 int src_cnt, size_t len, enum sum_check_flags *result,
316 struct async_submit_ctl *submit)
317 {
318 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
319 struct dma_device *device = chan ? chan->device : NULL;
320 struct dma_async_tx_descriptor *tx = NULL;
321 struct dmaengine_unmap_data *unmap = NULL;
322
323 BUG_ON(src_cnt <= 1);
324
325 if (device)
326 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
327
328 if (unmap && src_cnt <= device->max_xor &&
329 dma_xor_aligned_offsets(device, offset, src_offs, src_cnt, len)) {
330 unsigned long dma_prep_flags = 0;
331 int i;
332
333 pr_debug("%s: (async) len: %zu\n", __func__, len);
334
335 if (submit->cb_fn)
336 dma_prep_flags |= DMA_PREP_INTERRUPT;
337 if (submit->flags & ASYNC_TX_FENCE)
338 dma_prep_flags |= DMA_PREP_FENCE;
339
340 for (i = 0; i < src_cnt; i++) {
341 unmap->addr[i] = dma_map_page(device->dev, src_list[i],
342 src_offs ? src_offs[i] : offset,
343 len, DMA_TO_DEVICE);
344 unmap->to_cnt++;
345 }
346 unmap->len = len;
347
348 tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
349 len, result,
350 dma_prep_flags);
351 if (unlikely(!tx)) {
352 async_tx_quiesce(&submit->depend_tx);
353
354 while (!tx) {
355 dma_async_issue_pending(chan);
356 tx = device->device_prep_dma_xor_val(chan,
357 unmap->addr, src_cnt, len, result,
358 dma_prep_flags);
359 }
360 }
361 dma_set_unmap(tx, unmap);
362 async_tx_submit(chan, tx, submit);
363 } else {
364 enum async_tx_flags flags_orig = submit->flags;
365
366 pr_debug("%s: (sync) len: %zu\n", __func__, len);
367 WARN_ONCE(device && src_cnt <= device->max_xor,
368 "%s: no space for dma address conversion\n",
369 __func__);
370
371 submit->flags |= ASYNC_TX_XOR_DROP_DST;
372 submit->flags &= ~ASYNC_TX_ACK;
373
374 tx = async_xor_offs(dest, offset, src_list, src_offs,
375 src_cnt, len, submit);
376
377 async_tx_quiesce(&tx);
378
379 *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
380
381 async_tx_sync_epilog(submit);
382 submit->flags = flags_orig;
383 }
384 dmaengine_unmap_put(unmap);
385
386 return tx;
387 }
388 EXPORT_SYMBOL_GPL(async_xor_val_offs);
389
390 /**
391 * async_xor_val - attempt a xor parity check with a dma engine.
392 * @dest: destination page used if the xor is performed synchronously
393 * @src_list: array of source pages
394 * @offset: offset in pages to start transaction
395 * @src_cnt: number of source pages
396 * @len: length in bytes
397 * @result: 0 if sum == 0 else non-zero
398 * @submit: submission / completion modifiers
399 *
400 * honored flags: ASYNC_TX_ACK
401 *
402 * src_list note: if the dest is also a source it must be at index zero.
403 * The contents of this array will be overwritten if a scribble region
404 * is not specified.
405 */
406 struct dma_async_tx_descriptor *
async_xor_val(struct page * dest,struct page ** src_list,unsigned int offset,int src_cnt,size_t len,enum sum_check_flags * result,struct async_submit_ctl * submit)407 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
408 int src_cnt, size_t len, enum sum_check_flags *result,
409 struct async_submit_ctl *submit)
410 {
411 return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
412 len, result, submit);
413 }
414 EXPORT_SYMBOL_GPL(async_xor_val);
415
416 MODULE_AUTHOR("Intel Corporation");
417 MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
418 MODULE_LICENSE("GPL");
419