1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel MIC Platform Software Stack (MPSS)
4  *
5  * Copyright(c) 2015 Intel Corporation.
6  *
7  * Intel SCIF driver.
8  */
9 #include "scif_main.h"
10 #include "scif_map.h"
11 
12 /*
13  * struct scif_dma_comp_cb - SCIF DMA completion callback
14  *
15  * @dma_completion_func: DMA completion callback
16  * @cb_cookie: DMA completion callback cookie
17  * @temp_buf: Temporary buffer
18  * @temp_buf_to_free: Temporary buffer to be freed
19  * @is_cache: Is a kmem_cache allocated buffer
20  * @dst_offset: Destination registration offset
21  * @dst_window: Destination registration window
22  * @len: Length of the temp buffer
23  * @temp_phys: DMA address of the temp buffer
24  * @sdev: The SCIF device
25  * @header_padding: padding for cache line alignment
26  */
27 struct scif_dma_comp_cb {
28 	void (*dma_completion_func)(void *cookie);
29 	void *cb_cookie;
30 	u8 *temp_buf;
31 	u8 *temp_buf_to_free;
32 	bool is_cache;
33 	s64 dst_offset;
34 	struct scif_window *dst_window;
35 	size_t len;
36 	dma_addr_t temp_phys;
37 	struct scif_dev *sdev;
38 	int header_padding;
39 };
40 
41 /**
42  * struct scif_copy_work - Work for DMA copy
43  *
44  * @src_offset: Starting source offset
45  * @dst_offset: Starting destination offset
46  * @src_window: Starting src registered window
47  * @dst_window: Starting dst registered window
48  * @loopback: true if this is a loopback DMA transfer
49  * @len: Length of the transfer
50  * @comp_cb: DMA copy completion callback
51  * @remote_dev: The remote SCIF peer device
52  * @fence_type: polling or interrupt based
53  * @ordered: is this a tail byte ordered DMA transfer
54  */
55 struct scif_copy_work {
56 	s64 src_offset;
57 	s64 dst_offset;
58 	struct scif_window *src_window;
59 	struct scif_window *dst_window;
60 	int loopback;
61 	size_t len;
62 	struct scif_dma_comp_cb   *comp_cb;
63 	struct scif_dev	*remote_dev;
64 	int fence_type;
65 	bool ordered;
66 };
67 
68 /**
69  * scif_reserve_dma_chan:
70  * @ep: Endpoint Descriptor.
71  *
72  * This routine reserves a DMA channel for a particular
73  * endpoint. All DMA transfers for an endpoint are always
74  * programmed on the same DMA channel.
75  */
scif_reserve_dma_chan(struct scif_endpt * ep)76 int scif_reserve_dma_chan(struct scif_endpt *ep)
77 {
78 	int err = 0;
79 	struct scif_dev *scifdev;
80 	struct scif_hw_dev *sdev;
81 	struct dma_chan *chan;
82 
83 	/* Loopback DMAs are not supported on the management node */
84 	if (!scif_info.nodeid && scifdev_self(ep->remote_dev))
85 		return 0;
86 	if (scif_info.nodeid)
87 		scifdev = &scif_dev[0];
88 	else
89 		scifdev = ep->remote_dev;
90 	sdev = scifdev->sdev;
91 	if (!sdev->num_dma_ch)
92 		return -ENODEV;
93 	chan = sdev->dma_ch[scifdev->dma_ch_idx];
94 	scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch;
95 	mutex_lock(&ep->rma_info.rma_lock);
96 	ep->rma_info.dma_chan = chan;
97 	mutex_unlock(&ep->rma_info.rma_lock);
98 	return err;
99 }
100 
101 #ifdef CONFIG_MMU_NOTIFIER
102 /**
103  * scif_rma_destroy_tcw:
104  *
105  * This routine destroys temporary cached windows
106  */
107 static
__scif_rma_destroy_tcw(struct scif_mmu_notif * mmn,u64 start,u64 len)108 void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
109 			    u64 start, u64 len)
110 {
111 	struct list_head *item, *tmp;
112 	struct scif_window *window;
113 	u64 start_va, end_va;
114 	u64 end = start + len;
115 
116 	if (end <= start)
117 		return;
118 
119 	list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
120 		window = list_entry(item, struct scif_window, list);
121 		if (!len)
122 			break;
123 		start_va = window->va_for_temp;
124 		end_va = start_va + (window->nr_pages << PAGE_SHIFT);
125 		if (start < start_va && end <= start_va)
126 			break;
127 		if (start >= end_va)
128 			continue;
129 		__scif_rma_destroy_tcw_helper(window);
130 	}
131 }
132 
scif_rma_destroy_tcw(struct scif_mmu_notif * mmn,u64 start,u64 len)133 static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
134 {
135 	struct scif_endpt *ep = mmn->ep;
136 
137 	spin_lock(&ep->rma_info.tc_lock);
138 	__scif_rma_destroy_tcw(mmn, start, len);
139 	spin_unlock(&ep->rma_info.tc_lock);
140 }
141 
scif_rma_destroy_tcw_ep(struct scif_endpt * ep)142 static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
143 {
144 	struct list_head *item, *tmp;
145 	struct scif_mmu_notif *mmn;
146 
147 	list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
148 		mmn = list_entry(item, struct scif_mmu_notif, list);
149 		scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
150 	}
151 }
152 
__scif_rma_destroy_tcw_ep(struct scif_endpt * ep)153 static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
154 {
155 	struct list_head *item, *tmp;
156 	struct scif_mmu_notif *mmn;
157 
158 	spin_lock(&ep->rma_info.tc_lock);
159 	list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
160 		mmn = list_entry(item, struct scif_mmu_notif, list);
161 		__scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
162 	}
163 	spin_unlock(&ep->rma_info.tc_lock);
164 }
165 
scif_rma_tc_can_cache(struct scif_endpt * ep,size_t cur_bytes)166 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
167 {
168 	if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit)
169 		return false;
170 	if ((atomic_read(&ep->rma_info.tcw_total_pages)
171 			+ (cur_bytes >> PAGE_SHIFT)) >
172 			scif_info.rma_tc_limit) {
173 		dev_info(scif_info.mdev.this_device,
174 			 "%s %d total=%d, current=%zu reached max\n",
175 			 __func__, __LINE__,
176 			 atomic_read(&ep->rma_info.tcw_total_pages),
177 			 (1 + (cur_bytes >> PAGE_SHIFT)));
178 		scif_rma_destroy_tcw_invalid();
179 		__scif_rma_destroy_tcw_ep(ep);
180 	}
181 	return true;
182 }
183 
scif_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)184 static void scif_mmu_notifier_release(struct mmu_notifier *mn,
185 				      struct mm_struct *mm)
186 {
187 	struct scif_mmu_notif	*mmn;
188 
189 	mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
190 	scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
191 	schedule_work(&scif_info.misc_work);
192 }
193 
scif_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)194 static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
195 					const struct mmu_notifier_range *range)
196 {
197 	struct scif_mmu_notif	*mmn;
198 
199 	mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
200 	scif_rma_destroy_tcw(mmn, range->start, range->end - range->start);
201 
202 	return 0;
203 }
204 
scif_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)205 static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
206 			const struct mmu_notifier_range *range)
207 {
208 	/*
209 	 * Nothing to do here, everything needed was done in
210 	 * invalidate_range_start.
211 	 */
212 }
213 
214 static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
215 	.release = scif_mmu_notifier_release,
216 	.clear_flush_young = NULL,
217 	.invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
218 	.invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
219 
scif_ep_unregister_mmu_notifier(struct scif_endpt * ep)220 static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep)
221 {
222 	struct scif_endpt_rma_info *rma = &ep->rma_info;
223 	struct scif_mmu_notif *mmn = NULL;
224 	struct list_head *item, *tmp;
225 
226 	mutex_lock(&ep->rma_info.mmn_lock);
227 	list_for_each_safe(item, tmp, &rma->mmn_list) {
228 		mmn = list_entry(item, struct scif_mmu_notif, list);
229 		mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
230 		list_del(item);
231 		kfree(mmn);
232 	}
233 	mutex_unlock(&ep->rma_info.mmn_lock);
234 }
235 
scif_init_mmu_notifier(struct scif_mmu_notif * mmn,struct mm_struct * mm,struct scif_endpt * ep)236 static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn,
237 				   struct mm_struct *mm, struct scif_endpt *ep)
238 {
239 	mmn->ep = ep;
240 	mmn->mm = mm;
241 	mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops;
242 	INIT_LIST_HEAD(&mmn->list);
243 	INIT_LIST_HEAD(&mmn->tc_reg_list);
244 }
245 
246 static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct * mm,struct scif_endpt_rma_info * rma)247 scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
248 {
249 	struct scif_mmu_notif *mmn;
250 
251 	list_for_each_entry(mmn, &rma->mmn_list, list)
252 		if (mmn->mm == mm)
253 			return mmn;
254 	return NULL;
255 }
256 
257 static struct scif_mmu_notif *
scif_add_mmu_notifier(struct mm_struct * mm,struct scif_endpt * ep)258 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
259 {
260 	struct scif_mmu_notif *mmn
261 		 = kzalloc(sizeof(*mmn), GFP_KERNEL);
262 
263 	if (!mmn)
264 		return ERR_PTR(-ENOMEM);
265 
266 	scif_init_mmu_notifier(mmn, current->mm, ep);
267 	if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
268 		kfree(mmn);
269 		return ERR_PTR(-EBUSY);
270 	}
271 	list_add(&mmn->list, &ep->rma_info.mmn_list);
272 	return mmn;
273 }
274 
275 /*
276  * Called from the misc thread to destroy temporary cached windows and
277  * unregister the MMU notifier for the SCIF endpoint.
278  */
scif_mmu_notif_handler(struct work_struct * work)279 void scif_mmu_notif_handler(struct work_struct *work)
280 {
281 	struct list_head *pos, *tmpq;
282 	struct scif_endpt *ep;
283 restart:
284 	scif_rma_destroy_tcw_invalid();
285 	spin_lock(&scif_info.rmalock);
286 	list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) {
287 		ep = list_entry(pos, struct scif_endpt, mmu_list);
288 		list_del(&ep->mmu_list);
289 		spin_unlock(&scif_info.rmalock);
290 		scif_rma_destroy_tcw_ep(ep);
291 		scif_ep_unregister_mmu_notifier(ep);
292 		goto restart;
293 	}
294 	spin_unlock(&scif_info.rmalock);
295 }
296 
scif_is_set_reg_cache(int flags)297 static bool scif_is_set_reg_cache(int flags)
298 {
299 	return !!(flags & SCIF_RMA_USECACHE);
300 }
301 #else
302 static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct * mm,struct scif_endpt_rma_info * rma)303 scif_find_mmu_notifier(struct mm_struct *mm,
304 		       struct scif_endpt_rma_info *rma)
305 {
306 	return NULL;
307 }
308 
309 static struct scif_mmu_notif *
scif_add_mmu_notifier(struct mm_struct * mm,struct scif_endpt * ep)310 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
311 {
312 	return NULL;
313 }
314 
scif_mmu_notif_handler(struct work_struct * work)315 void scif_mmu_notif_handler(struct work_struct *work)
316 {
317 }
318 
scif_is_set_reg_cache(int flags)319 static bool scif_is_set_reg_cache(int flags)
320 {
321 	return false;
322 }
323 
scif_rma_tc_can_cache(struct scif_endpt * ep,size_t cur_bytes)324 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
325 {
326 	return false;
327 }
328 #endif
329 
330 /**
331  * scif_register_temp:
332  * @epd: End Point Descriptor.
333  * @addr: virtual address to/from which to copy
334  * @len: length of range to copy
335  * @out_offset: computed offset returned by reference.
336  * @out_window: allocated registered window returned by reference.
337  *
338  * Create a temporary registered window. The peer will not know about this
339  * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
340  */
341 static int
scif_register_temp(scif_epd_t epd,unsigned long addr,size_t len,int prot,off_t * out_offset,struct scif_window ** out_window)342 scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot,
343 		   off_t *out_offset, struct scif_window **out_window)
344 {
345 	struct scif_endpt *ep = (struct scif_endpt *)epd;
346 	int err;
347 	scif_pinned_pages_t pinned_pages;
348 	size_t aligned_len;
349 
350 	aligned_len = ALIGN(len, PAGE_SIZE);
351 
352 	err = __scif_pin_pages((void *)(addr & PAGE_MASK),
353 			       aligned_len, &prot, 0, &pinned_pages);
354 	if (err)
355 		return err;
356 
357 	pinned_pages->prot = prot;
358 
359 	/* Compute the offset for this registration */
360 	err = scif_get_window_offset(ep, 0, 0,
361 				     aligned_len >> PAGE_SHIFT,
362 				     (s64 *)out_offset);
363 	if (err)
364 		goto error_unpin;
365 
366 	/* Allocate and prepare self registration window */
367 	*out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT,
368 					*out_offset, true);
369 	if (!*out_window) {
370 		scif_free_window_offset(ep, NULL, *out_offset);
371 		err = -ENOMEM;
372 		goto error_unpin;
373 	}
374 
375 	(*out_window)->pinned_pages = pinned_pages;
376 	(*out_window)->nr_pages = pinned_pages->nr_pages;
377 	(*out_window)->prot = pinned_pages->prot;
378 
379 	(*out_window)->va_for_temp = addr & PAGE_MASK;
380 	err = scif_map_window(ep->remote_dev, *out_window);
381 	if (err) {
382 		/* Something went wrong! Rollback */
383 		scif_destroy_window(ep, *out_window);
384 		*out_window = NULL;
385 	} else {
386 		*out_offset |= (addr - (*out_window)->va_for_temp);
387 	}
388 	return err;
389 error_unpin:
390 	if (err)
391 		dev_err(&ep->remote_dev->sdev->dev,
392 			"%s %d err %d\n", __func__, __LINE__, err);
393 	scif_unpin_pages(pinned_pages);
394 	return err;
395 }
396 
397 #define SCIF_DMA_TO (3 * HZ)
398 
399 /*
400  * scif_sync_dma - Program a DMA without an interrupt descriptor
401  *
402  * @dev - The address of the pointer to the device instance used
403  * for DMA registration.
404  * @chan - DMA channel to be used.
405  * @sync_wait: Wait for DMA to complete?
406  *
407  * Return 0 on success and -errno on error.
408  */
scif_sync_dma(struct scif_hw_dev * sdev,struct dma_chan * chan,bool sync_wait)409 static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan,
410 			 bool sync_wait)
411 {
412 	int err = 0;
413 	struct dma_async_tx_descriptor *tx = NULL;
414 	enum dma_ctrl_flags flags = DMA_PREP_FENCE;
415 	dma_cookie_t cookie;
416 	struct dma_device *ddev;
417 
418 	if (!chan) {
419 		err = -EIO;
420 		dev_err(&sdev->dev, "%s %d err %d\n",
421 			__func__, __LINE__, err);
422 		return err;
423 	}
424 	ddev = chan->device;
425 
426 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
427 	if (!tx) {
428 		err = -ENOMEM;
429 		dev_err(&sdev->dev, "%s %d err %d\n",
430 			__func__, __LINE__, err);
431 		goto release;
432 	}
433 	cookie = tx->tx_submit(tx);
434 
435 	if (dma_submit_error(cookie)) {
436 		err = -ENOMEM;
437 		dev_err(&sdev->dev, "%s %d err %d\n",
438 			__func__, __LINE__, err);
439 		goto release;
440 	}
441 	if (!sync_wait) {
442 		dma_async_issue_pending(chan);
443 	} else {
444 		if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) {
445 			err = 0;
446 		} else {
447 			err = -EIO;
448 			dev_err(&sdev->dev, "%s %d err %d\n",
449 				__func__, __LINE__, err);
450 		}
451 	}
452 release:
453 	return err;
454 }
455 
scif_dma_callback(void * arg)456 static void scif_dma_callback(void *arg)
457 {
458 	struct completion *done = (struct completion *)arg;
459 
460 	complete(done);
461 }
462 
463 #define SCIF_DMA_SYNC_WAIT true
464 #define SCIF_DMA_POLL BIT(0)
465 #define SCIF_DMA_INTR BIT(1)
466 
467 /*
468  * scif_async_dma - Program a DMA with an interrupt descriptor
469  *
470  * @dev - The address of the pointer to the device instance used
471  * for DMA registration.
472  * @chan - DMA channel to be used.
473  * Return 0 on success and -errno on error.
474  */
scif_async_dma(struct scif_hw_dev * sdev,struct dma_chan * chan)475 static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan)
476 {
477 	int err = 0;
478 	struct dma_device *ddev;
479 	struct dma_async_tx_descriptor *tx = NULL;
480 	enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
481 	DECLARE_COMPLETION_ONSTACK(done_wait);
482 	dma_cookie_t cookie;
483 	enum dma_status status;
484 
485 	if (!chan) {
486 		err = -EIO;
487 		dev_err(&sdev->dev, "%s %d err %d\n",
488 			__func__, __LINE__, err);
489 		return err;
490 	}
491 	ddev = chan->device;
492 
493 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
494 	if (!tx) {
495 		err = -ENOMEM;
496 		dev_err(&sdev->dev, "%s %d err %d\n",
497 			__func__, __LINE__, err);
498 		goto release;
499 	}
500 	reinit_completion(&done_wait);
501 	tx->callback = scif_dma_callback;
502 	tx->callback_param = &done_wait;
503 	cookie = tx->tx_submit(tx);
504 
505 	if (dma_submit_error(cookie)) {
506 		err = -ENOMEM;
507 		dev_err(&sdev->dev, "%s %d err %d\n",
508 			__func__, __LINE__, err);
509 		goto release;
510 	}
511 	dma_async_issue_pending(chan);
512 
513 	err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO);
514 	if (!err) {
515 		err = -EIO;
516 		dev_err(&sdev->dev, "%s %d err %d\n",
517 			__func__, __LINE__, err);
518 		goto release;
519 	}
520 	err = 0;
521 	status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
522 	if (status != DMA_COMPLETE) {
523 		err = -EIO;
524 		dev_err(&sdev->dev, "%s %d err %d\n",
525 			__func__, __LINE__, err);
526 		goto release;
527 	}
528 release:
529 	return err;
530 }
531 
532 /*
533  * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
534  * DMA channel via polling.
535  *
536  * @sdev - The SCIF device
537  * @chan - DMA channel
538  * Return 0 on success and -errno on error.
539  */
scif_drain_dma_poll(struct scif_hw_dev * sdev,struct dma_chan * chan)540 static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan)
541 {
542 	if (!chan)
543 		return -EINVAL;
544 	return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT);
545 }
546 
547 /*
548  * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
549  * DMA channel via interrupt based blocking wait.
550  *
551  * @sdev - The SCIF device
552  * @chan - DMA channel
553  * Return 0 on success and -errno on error.
554  */
scif_drain_dma_intr(struct scif_hw_dev * sdev,struct dma_chan * chan)555 int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan)
556 {
557 	if (!chan)
558 		return -EINVAL;
559 	return scif_async_dma(sdev, chan);
560 }
561 
562 /**
563  * scif_rma_destroy_windows:
564  *
565  * This routine destroys all windows queued for cleanup
566  */
scif_rma_destroy_windows(void)567 void scif_rma_destroy_windows(void)
568 {
569 	struct list_head *item, *tmp;
570 	struct scif_window *window;
571 	struct scif_endpt *ep;
572 	struct dma_chan *chan;
573 
574 	might_sleep();
575 restart:
576 	spin_lock(&scif_info.rmalock);
577 	list_for_each_safe(item, tmp, &scif_info.rma) {
578 		window = list_entry(item, struct scif_window,
579 				    list);
580 		ep = (struct scif_endpt *)window->ep;
581 		chan = ep->rma_info.dma_chan;
582 
583 		list_del_init(&window->list);
584 		spin_unlock(&scif_info.rmalock);
585 		if (!chan || !scifdev_alive(ep) ||
586 		    !scif_drain_dma_intr(ep->remote_dev->sdev,
587 					 ep->rma_info.dma_chan))
588 			/* Remove window from global list */
589 			window->unreg_state = OP_COMPLETED;
590 		else
591 			dev_warn(&ep->remote_dev->sdev->dev,
592 				 "DMA engine hung?\n");
593 		if (window->unreg_state == OP_COMPLETED) {
594 			if (window->type == SCIF_WINDOW_SELF)
595 				scif_destroy_window(ep, window);
596 			else
597 				scif_destroy_remote_window(window);
598 			atomic_dec(&ep->rma_info.tw_refcount);
599 		}
600 		goto restart;
601 	}
602 	spin_unlock(&scif_info.rmalock);
603 }
604 
605 /**
606  * scif_rma_destroy_tcw:
607  *
608  * This routine destroys temporary cached registered windows
609  * which have been queued for cleanup.
610  */
scif_rma_destroy_tcw_invalid(void)611 void scif_rma_destroy_tcw_invalid(void)
612 {
613 	struct list_head *item, *tmp;
614 	struct scif_window *window;
615 	struct scif_endpt *ep;
616 	struct dma_chan *chan;
617 
618 	might_sleep();
619 restart:
620 	spin_lock(&scif_info.rmalock);
621 	list_for_each_safe(item, tmp, &scif_info.rma_tc) {
622 		window = list_entry(item, struct scif_window, list);
623 		ep = (struct scif_endpt *)window->ep;
624 		chan = ep->rma_info.dma_chan;
625 		list_del_init(&window->list);
626 		spin_unlock(&scif_info.rmalock);
627 		mutex_lock(&ep->rma_info.rma_lock);
628 		if (!chan || !scifdev_alive(ep) ||
629 		    !scif_drain_dma_intr(ep->remote_dev->sdev,
630 					 ep->rma_info.dma_chan)) {
631 			atomic_sub(window->nr_pages,
632 				   &ep->rma_info.tcw_total_pages);
633 			scif_destroy_window(ep, window);
634 			atomic_dec(&ep->rma_info.tcw_refcount);
635 		} else {
636 			dev_warn(&ep->remote_dev->sdev->dev,
637 				 "DMA engine hung?\n");
638 		}
639 		mutex_unlock(&ep->rma_info.rma_lock);
640 		goto restart;
641 	}
642 	spin_unlock(&scif_info.rmalock);
643 }
644 
645 static inline
_get_local_va(off_t off,struct scif_window * window,size_t len)646 void *_get_local_va(off_t off, struct scif_window *window, size_t len)
647 {
648 	int page_nr = (off - window->offset) >> PAGE_SHIFT;
649 	off_t page_off = off & ~PAGE_MASK;
650 	void *va = NULL;
651 
652 	if (window->type == SCIF_WINDOW_SELF) {
653 		struct page **pages = window->pinned_pages->pages;
654 
655 		va = page_address(pages[page_nr]) + page_off;
656 	}
657 	return va;
658 }
659 
660 static inline
ioremap_remote(off_t off,struct scif_window * window,size_t len,struct scif_dev * dev,struct scif_window_iter * iter)661 void *ioremap_remote(off_t off, struct scif_window *window,
662 		     size_t len, struct scif_dev *dev,
663 		     struct scif_window_iter *iter)
664 {
665 	dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
666 
667 	/*
668 	 * If the DMA address is not card relative then we need the DMA
669 	 * addresses to be an offset into the bar. The aperture base was already
670 	 * added so subtract it here since scif_ioremap is going to add it again
671 	 */
672 	if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
673 	    dev->sdev->aper && !dev->sdev->card_rel_da)
674 		phys = phys - dev->sdev->aper->pa;
675 	return scif_ioremap(phys, len, dev);
676 }
677 
678 static inline void
iounmap_remote(void * virt,size_t size,struct scif_copy_work * work)679 iounmap_remote(void *virt, size_t size, struct scif_copy_work *work)
680 {
681 	scif_iounmap(virt, size, work->remote_dev);
682 }
683 
684 /*
685  * Takes care of ordering issue caused by
686  * 1. Hardware:  Only in the case of cpu copy from mgmt node to card
687  * because of WC memory.
688  * 2. Software: If memcpy reorders copy instructions for optimization.
689  * This could happen at both mgmt node and card.
690  */
691 static inline void
scif_ordered_memcpy_toio(char * dst,const char * src,size_t count)692 scif_ordered_memcpy_toio(char *dst, const char *src, size_t count)
693 {
694 	if (!count)
695 		return;
696 
697 	memcpy_toio((void __iomem __force *)dst, src, --count);
698 	/* Order the last byte with the previous stores */
699 	wmb();
700 	*(dst + count) = *(src + count);
701 }
702 
scif_unaligned_cpy_toio(char * dst,const char * src,size_t count,bool ordered)703 static inline void scif_unaligned_cpy_toio(char *dst, const char *src,
704 					   size_t count, bool ordered)
705 {
706 	if (ordered)
707 		scif_ordered_memcpy_toio(dst, src, count);
708 	else
709 		memcpy_toio((void __iomem __force *)dst, src, count);
710 }
711 
712 static inline
scif_ordered_memcpy_fromio(char * dst,const char * src,size_t count)713 void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count)
714 {
715 	if (!count)
716 		return;
717 
718 	memcpy_fromio(dst, (void __iomem __force *)src, --count);
719 	/* Order the last byte with the previous loads */
720 	rmb();
721 	*(dst + count) = *(src + count);
722 }
723 
scif_unaligned_cpy_fromio(char * dst,const char * src,size_t count,bool ordered)724 static inline void scif_unaligned_cpy_fromio(char *dst, const char *src,
725 					     size_t count, bool ordered)
726 {
727 	if (ordered)
728 		scif_ordered_memcpy_fromio(dst, src, count);
729 	else
730 		memcpy_fromio(dst, (void __iomem __force *)src, count);
731 }
732 
733 #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
734 
735 /*
736  * scif_off_to_dma_addr:
737  * Obtain the dma_addr given the window and the offset.
738  * @window: Registered window.
739  * @off: Window offset.
740  * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
741  * @index: Return the index of the dma_addr array found.
742  * @start_off: start offset of index of the dma addr array found.
743  * The nr_bytes provides the callee an estimate of the maximum possible
744  * DMA xfer possible while the index/start_off provide faster lookups
745  * for the next iteration.
746  */
scif_off_to_dma_addr(struct scif_window * window,s64 off,size_t * nr_bytes,struct scif_window_iter * iter)747 dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
748 				size_t *nr_bytes, struct scif_window_iter *iter)
749 {
750 	int i, page_nr;
751 	s64 start, end;
752 	off_t page_off;
753 
754 	if (window->nr_pages == window->nr_contig_chunks) {
755 		page_nr = (off - window->offset) >> PAGE_SHIFT;
756 		page_off = off & ~PAGE_MASK;
757 
758 		if (nr_bytes)
759 			*nr_bytes = PAGE_SIZE - page_off;
760 		return window->dma_addr[page_nr] | page_off;
761 	}
762 	if (iter) {
763 		i = iter->index;
764 		start = iter->offset;
765 	} else {
766 		i =  0;
767 		start =  window->offset;
768 	}
769 	for (; i < window->nr_contig_chunks; i++) {
770 		end = start + (window->num_pages[i] << PAGE_SHIFT);
771 		if (off >= start && off < end) {
772 			if (iter) {
773 				iter->index = i;
774 				iter->offset = start;
775 			}
776 			if (nr_bytes)
777 				*nr_bytes = end - off;
778 			return (window->dma_addr[i] + (off - start));
779 		}
780 		start += (window->num_pages[i] << PAGE_SHIFT);
781 	}
782 	dev_err(scif_info.mdev.this_device,
783 		"%s %d BUG. Addr not found? window %p off 0x%llx\n",
784 		__func__, __LINE__, window, off);
785 	return SCIF_RMA_ERROR_CODE;
786 }
787 
788 /*
789  * Copy between rma window and temporary buffer
790  */
scif_rma_local_cpu_copy(s64 offset,struct scif_window * window,u8 * temp,size_t rem_len,bool to_temp)791 static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
792 				    u8 *temp, size_t rem_len, bool to_temp)
793 {
794 	void *window_virt;
795 	size_t loop_len;
796 	int offset_in_page;
797 	s64 end_offset;
798 
799 	offset_in_page = offset & ~PAGE_MASK;
800 	loop_len = PAGE_SIZE - offset_in_page;
801 
802 	if (rem_len < loop_len)
803 		loop_len = rem_len;
804 
805 	window_virt = _get_local_va(offset, window, loop_len);
806 	if (!window_virt)
807 		return;
808 	if (to_temp)
809 		memcpy(temp, window_virt, loop_len);
810 	else
811 		memcpy(window_virt, temp, loop_len);
812 
813 	offset += loop_len;
814 	temp += loop_len;
815 	rem_len -= loop_len;
816 
817 	end_offset = window->offset +
818 		(window->nr_pages << PAGE_SHIFT);
819 	while (rem_len) {
820 		if (offset == end_offset) {
821 			window = list_next_entry(window, list);
822 			end_offset = window->offset +
823 				(window->nr_pages << PAGE_SHIFT);
824 		}
825 		loop_len = min(PAGE_SIZE, rem_len);
826 		window_virt = _get_local_va(offset, window, loop_len);
827 		if (!window_virt)
828 			return;
829 		if (to_temp)
830 			memcpy(temp, window_virt, loop_len);
831 		else
832 			memcpy(window_virt, temp, loop_len);
833 		offset	+= loop_len;
834 		temp	+= loop_len;
835 		rem_len	-= loop_len;
836 	}
837 }
838 
839 /**
840  * scif_rma_completion_cb:
841  * @data: RMA cookie
842  *
843  * RMA interrupt completion callback.
844  */
scif_rma_completion_cb(void * data)845 static void scif_rma_completion_cb(void *data)
846 {
847 	struct scif_dma_comp_cb *comp_cb = data;
848 
849 	/* Free DMA Completion CB. */
850 	if (comp_cb->dst_window)
851 		scif_rma_local_cpu_copy(comp_cb->dst_offset,
852 					comp_cb->dst_window,
853 					comp_cb->temp_buf +
854 					comp_cb->header_padding,
855 					comp_cb->len, false);
856 	scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev,
857 			  SCIF_KMEM_UNALIGNED_BUF_SIZE);
858 	if (comp_cb->is_cache)
859 		kmem_cache_free(unaligned_cache,
860 				comp_cb->temp_buf_to_free);
861 	else
862 		kfree(comp_cb->temp_buf_to_free);
863 }
864 
865 /* Copies between temporary buffer and offsets provided in work */
866 static int
scif_rma_list_dma_copy_unaligned(struct scif_copy_work * work,u8 * temp,struct dma_chan * chan,bool src_local)867 scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
868 				 u8 *temp, struct dma_chan *chan,
869 				 bool src_local)
870 {
871 	struct scif_dma_comp_cb *comp_cb = work->comp_cb;
872 	dma_addr_t window_dma_addr, temp_dma_addr;
873 	dma_addr_t temp_phys = comp_cb->temp_phys;
874 	size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len;
875 	int offset_in_ca, ret = 0;
876 	s64 end_offset, offset;
877 	struct scif_window *window;
878 	void *window_virt_addr;
879 	size_t tail_len;
880 	struct dma_async_tx_descriptor *tx;
881 	struct dma_device *dev = chan->device;
882 	dma_cookie_t cookie;
883 
884 	if (src_local) {
885 		offset = work->dst_offset;
886 		window = work->dst_window;
887 	} else {
888 		offset = work->src_offset;
889 		window = work->src_window;
890 	}
891 
892 	offset_in_ca = offset & (L1_CACHE_BYTES - 1);
893 	if (offset_in_ca) {
894 		loop_len = L1_CACHE_BYTES - offset_in_ca;
895 		loop_len = min(loop_len, remaining_len);
896 		window_virt_addr = ioremap_remote(offset, window,
897 						  loop_len,
898 						  work->remote_dev,
899 						  NULL);
900 		if (!window_virt_addr)
901 			return -ENOMEM;
902 		if (src_local)
903 			scif_unaligned_cpy_toio(window_virt_addr, temp,
904 						loop_len,
905 						work->ordered &&
906 						!(remaining_len - loop_len));
907 		else
908 			scif_unaligned_cpy_fromio(temp, window_virt_addr,
909 						  loop_len, work->ordered &&
910 						  !(remaining_len - loop_len));
911 		iounmap_remote(window_virt_addr, loop_len, work);
912 
913 		offset += loop_len;
914 		temp += loop_len;
915 		temp_phys += loop_len;
916 		remaining_len -= loop_len;
917 	}
918 
919 	offset_in_ca = offset & ~PAGE_MASK;
920 	end_offset = window->offset +
921 		(window->nr_pages << PAGE_SHIFT);
922 
923 	tail_len = remaining_len & (L1_CACHE_BYTES - 1);
924 	remaining_len -= tail_len;
925 	while (remaining_len) {
926 		if (offset == end_offset) {
927 			window = list_next_entry(window, list);
928 			end_offset = window->offset +
929 				(window->nr_pages << PAGE_SHIFT);
930 		}
931 		if (scif_is_mgmt_node())
932 			temp_dma_addr = temp_phys;
933 		else
934 			/* Fix if we ever enable IOMMU on the card */
935 			temp_dma_addr = (dma_addr_t)virt_to_phys(temp);
936 		window_dma_addr = scif_off_to_dma_addr(window, offset,
937 						       &nr_contig_bytes,
938 						       NULL);
939 		loop_len = min(nr_contig_bytes, remaining_len);
940 		if (src_local) {
941 			if (work->ordered && !tail_len &&
942 			    !(remaining_len - loop_len) &&
943 			    loop_len != L1_CACHE_BYTES) {
944 				/*
945 				 * Break up the last chunk of the transfer into
946 				 * two steps. if there is no tail to guarantee
947 				 * DMA ordering. SCIF_DMA_POLLING inserts
948 				 * a status update descriptor in step 1 which
949 				 * acts as a double sided synchronization fence
950 				 * for the DMA engine to ensure that the last
951 				 * cache line in step 2 is updated last.
952 				 */
953 				/* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
954 				tx =
955 				dev->device_prep_dma_memcpy(chan,
956 							    window_dma_addr,
957 							    temp_dma_addr,
958 							    loop_len -
959 							    L1_CACHE_BYTES,
960 							    DMA_PREP_FENCE);
961 				if (!tx) {
962 					ret = -ENOMEM;
963 					goto err;
964 				}
965 				cookie = tx->tx_submit(tx);
966 				if (dma_submit_error(cookie)) {
967 					ret = -ENOMEM;
968 					goto err;
969 				}
970 				dma_async_issue_pending(chan);
971 				offset += (loop_len - L1_CACHE_BYTES);
972 				temp_dma_addr += (loop_len - L1_CACHE_BYTES);
973 				window_dma_addr += (loop_len - L1_CACHE_BYTES);
974 				remaining_len -= (loop_len - L1_CACHE_BYTES);
975 				loop_len = remaining_len;
976 
977 				/* Step 2) DMA: L1_CACHE_BYTES */
978 				tx =
979 				dev->device_prep_dma_memcpy(chan,
980 							    window_dma_addr,
981 							    temp_dma_addr,
982 							    loop_len, 0);
983 				if (!tx) {
984 					ret = -ENOMEM;
985 					goto err;
986 				}
987 				cookie = tx->tx_submit(tx);
988 				if (dma_submit_error(cookie)) {
989 					ret = -ENOMEM;
990 					goto err;
991 				}
992 				dma_async_issue_pending(chan);
993 			} else {
994 				tx =
995 				dev->device_prep_dma_memcpy(chan,
996 							    window_dma_addr,
997 							    temp_dma_addr,
998 							    loop_len, 0);
999 				if (!tx) {
1000 					ret = -ENOMEM;
1001 					goto err;
1002 				}
1003 				cookie = tx->tx_submit(tx);
1004 				if (dma_submit_error(cookie)) {
1005 					ret = -ENOMEM;
1006 					goto err;
1007 				}
1008 				dma_async_issue_pending(chan);
1009 			}
1010 		} else {
1011 			tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr,
1012 					window_dma_addr, loop_len, 0);
1013 			if (!tx) {
1014 				ret = -ENOMEM;
1015 				goto err;
1016 			}
1017 			cookie = tx->tx_submit(tx);
1018 			if (dma_submit_error(cookie)) {
1019 				ret = -ENOMEM;
1020 				goto err;
1021 			}
1022 			dma_async_issue_pending(chan);
1023 		}
1024 		offset += loop_len;
1025 		temp += loop_len;
1026 		temp_phys += loop_len;
1027 		remaining_len -= loop_len;
1028 		offset_in_ca = 0;
1029 	}
1030 	if (tail_len) {
1031 		if (offset == end_offset) {
1032 			window = list_next_entry(window, list);
1033 			end_offset = window->offset +
1034 				(window->nr_pages << PAGE_SHIFT);
1035 		}
1036 		window_virt_addr = ioremap_remote(offset, window, tail_len,
1037 						  work->remote_dev,
1038 						  NULL);
1039 		if (!window_virt_addr)
1040 			return -ENOMEM;
1041 		/*
1042 		 * The CPU copy for the tail bytes must be initiated only once
1043 		 * previous DMA transfers for this endpoint have completed
1044 		 * to guarantee ordering.
1045 		 */
1046 		if (work->ordered) {
1047 			struct scif_dev *rdev = work->remote_dev;
1048 
1049 			ret = scif_drain_dma_intr(rdev->sdev, chan);
1050 			if (ret)
1051 				return ret;
1052 		}
1053 		if (src_local)
1054 			scif_unaligned_cpy_toio(window_virt_addr, temp,
1055 						tail_len, work->ordered);
1056 		else
1057 			scif_unaligned_cpy_fromio(temp, window_virt_addr,
1058 						  tail_len, work->ordered);
1059 		iounmap_remote(window_virt_addr, tail_len, work);
1060 	}
1061 	tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT);
1062 	if (!tx) {
1063 		ret = -ENOMEM;
1064 		return ret;
1065 	}
1066 	tx->callback = &scif_rma_completion_cb;
1067 	tx->callback_param = comp_cb;
1068 	cookie = tx->tx_submit(tx);
1069 
1070 	if (dma_submit_error(cookie)) {
1071 		ret = -ENOMEM;
1072 		return ret;
1073 	}
1074 	dma_async_issue_pending(chan);
1075 	return 0;
1076 err:
1077 	dev_err(scif_info.mdev.this_device,
1078 		"%s %d Desc Prog Failed ret %d\n",
1079 		__func__, __LINE__, ret);
1080 	return ret;
1081 }
1082 
1083 /*
1084  * _scif_rma_list_dma_copy_aligned:
1085  *
1086  * Traverse all the windows and perform DMA copy.
1087  */
_scif_rma_list_dma_copy_aligned(struct scif_copy_work * work,struct dma_chan * chan)1088 static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1089 					   struct dma_chan *chan)
1090 {
1091 	dma_addr_t src_dma_addr, dst_dma_addr;
1092 	size_t loop_len, remaining_len, src_contig_bytes = 0;
1093 	size_t dst_contig_bytes = 0;
1094 	struct scif_window_iter src_win_iter;
1095 	struct scif_window_iter dst_win_iter;
1096 	s64 end_src_offset, end_dst_offset;
1097 	struct scif_window *src_window = work->src_window;
1098 	struct scif_window *dst_window = work->dst_window;
1099 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1100 	int ret = 0;
1101 	struct dma_async_tx_descriptor *tx;
1102 	struct dma_device *dev = chan->device;
1103 	dma_cookie_t cookie;
1104 
1105 	remaining_len = work->len;
1106 
1107 	scif_init_window_iter(src_window, &src_win_iter);
1108 	scif_init_window_iter(dst_window, &dst_win_iter);
1109 	end_src_offset = src_window->offset +
1110 		(src_window->nr_pages << PAGE_SHIFT);
1111 	end_dst_offset = dst_window->offset +
1112 		(dst_window->nr_pages << PAGE_SHIFT);
1113 	while (remaining_len) {
1114 		if (src_offset == end_src_offset) {
1115 			src_window = list_next_entry(src_window, list);
1116 			end_src_offset = src_window->offset +
1117 				(src_window->nr_pages << PAGE_SHIFT);
1118 			scif_init_window_iter(src_window, &src_win_iter);
1119 		}
1120 		if (dst_offset == end_dst_offset) {
1121 			dst_window = list_next_entry(dst_window, list);
1122 			end_dst_offset = dst_window->offset +
1123 				(dst_window->nr_pages << PAGE_SHIFT);
1124 			scif_init_window_iter(dst_window, &dst_win_iter);
1125 		}
1126 
1127 		/* compute dma addresses for transfer */
1128 		src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1129 						    &src_contig_bytes,
1130 						    &src_win_iter);
1131 		dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1132 						    &dst_contig_bytes,
1133 						    &dst_win_iter);
1134 		loop_len = min(src_contig_bytes, dst_contig_bytes);
1135 		loop_len = min(loop_len, remaining_len);
1136 		if (work->ordered && !(remaining_len - loop_len)) {
1137 			/*
1138 			 * Break up the last chunk of the transfer into two
1139 			 * steps to ensure that the last byte in step 2 is
1140 			 * updated last.
1141 			 */
1142 			/* Step 1) DMA: Body Length - 1 */
1143 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1144 							 src_dma_addr,
1145 							 loop_len - 1,
1146 							 DMA_PREP_FENCE);
1147 			if (!tx) {
1148 				ret = -ENOMEM;
1149 				goto err;
1150 			}
1151 			cookie = tx->tx_submit(tx);
1152 			if (dma_submit_error(cookie)) {
1153 				ret = -ENOMEM;
1154 				goto err;
1155 			}
1156 			src_offset += (loop_len - 1);
1157 			dst_offset += (loop_len - 1);
1158 			src_dma_addr += (loop_len - 1);
1159 			dst_dma_addr += (loop_len - 1);
1160 			remaining_len -= (loop_len - 1);
1161 			loop_len = remaining_len;
1162 
1163 			/* Step 2) DMA: 1 BYTES */
1164 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1165 					src_dma_addr, loop_len, 0);
1166 			if (!tx) {
1167 				ret = -ENOMEM;
1168 				goto err;
1169 			}
1170 			cookie = tx->tx_submit(tx);
1171 			if (dma_submit_error(cookie)) {
1172 				ret = -ENOMEM;
1173 				goto err;
1174 			}
1175 			dma_async_issue_pending(chan);
1176 		} else {
1177 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1178 					src_dma_addr, loop_len, 0);
1179 			if (!tx) {
1180 				ret = -ENOMEM;
1181 				goto err;
1182 			}
1183 			cookie = tx->tx_submit(tx);
1184 			if (dma_submit_error(cookie)) {
1185 				ret = -ENOMEM;
1186 				goto err;
1187 			}
1188 		}
1189 		src_offset += loop_len;
1190 		dst_offset += loop_len;
1191 		remaining_len -= loop_len;
1192 	}
1193 	return ret;
1194 err:
1195 	dev_err(scif_info.mdev.this_device,
1196 		"%s %d Desc Prog Failed ret %d\n",
1197 		__func__, __LINE__, ret);
1198 	return ret;
1199 }
1200 
1201 /*
1202  * scif_rma_list_dma_copy_aligned:
1203  *
1204  * Traverse all the windows and perform DMA copy.
1205  */
scif_rma_list_dma_copy_aligned(struct scif_copy_work * work,struct dma_chan * chan)1206 static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1207 					  struct dma_chan *chan)
1208 {
1209 	dma_addr_t src_dma_addr, dst_dma_addr;
1210 	size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0;
1211 	size_t dst_contig_bytes = 0;
1212 	int src_cache_off;
1213 	s64 end_src_offset, end_dst_offset;
1214 	struct scif_window_iter src_win_iter;
1215 	struct scif_window_iter dst_win_iter;
1216 	void *src_virt, *dst_virt;
1217 	struct scif_window *src_window = work->src_window;
1218 	struct scif_window *dst_window = work->dst_window;
1219 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1220 	int ret = 0;
1221 	struct dma_async_tx_descriptor *tx;
1222 	struct dma_device *dev = chan->device;
1223 	dma_cookie_t cookie;
1224 
1225 	remaining_len = work->len;
1226 	scif_init_window_iter(src_window, &src_win_iter);
1227 	scif_init_window_iter(dst_window, &dst_win_iter);
1228 
1229 	src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1230 	if (src_cache_off != 0) {
1231 		/* Head */
1232 		loop_len = L1_CACHE_BYTES - src_cache_off;
1233 		loop_len = min(loop_len, remaining_len);
1234 		src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1235 		dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1236 		if (src_window->type == SCIF_WINDOW_SELF)
1237 			src_virt = _get_local_va(src_offset, src_window,
1238 						 loop_len);
1239 		else
1240 			src_virt = ioremap_remote(src_offset, src_window,
1241 						  loop_len,
1242 						  work->remote_dev, NULL);
1243 		if (!src_virt)
1244 			return -ENOMEM;
1245 		if (dst_window->type == SCIF_WINDOW_SELF)
1246 			dst_virt = _get_local_va(dst_offset, dst_window,
1247 						 loop_len);
1248 		else
1249 			dst_virt = ioremap_remote(dst_offset, dst_window,
1250 						  loop_len,
1251 						  work->remote_dev, NULL);
1252 		if (!dst_virt) {
1253 			if (src_window->type != SCIF_WINDOW_SELF)
1254 				iounmap_remote(src_virt, loop_len, work);
1255 			return -ENOMEM;
1256 		}
1257 		if (src_window->type == SCIF_WINDOW_SELF)
1258 			scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1259 						remaining_len == loop_len ?
1260 						work->ordered : false);
1261 		else
1262 			scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len,
1263 						  remaining_len == loop_len ?
1264 						  work->ordered : false);
1265 		if (src_window->type != SCIF_WINDOW_SELF)
1266 			iounmap_remote(src_virt, loop_len, work);
1267 		if (dst_window->type != SCIF_WINDOW_SELF)
1268 			iounmap_remote(dst_virt, loop_len, work);
1269 		src_offset += loop_len;
1270 		dst_offset += loop_len;
1271 		remaining_len -= loop_len;
1272 	}
1273 
1274 	end_src_offset = src_window->offset +
1275 		(src_window->nr_pages << PAGE_SHIFT);
1276 	end_dst_offset = dst_window->offset +
1277 		(dst_window->nr_pages << PAGE_SHIFT);
1278 	tail_len = remaining_len & (L1_CACHE_BYTES - 1);
1279 	remaining_len -= tail_len;
1280 	while (remaining_len) {
1281 		if (src_offset == end_src_offset) {
1282 			src_window = list_next_entry(src_window, list);
1283 			end_src_offset = src_window->offset +
1284 				(src_window->nr_pages << PAGE_SHIFT);
1285 			scif_init_window_iter(src_window, &src_win_iter);
1286 		}
1287 		if (dst_offset == end_dst_offset) {
1288 			dst_window = list_next_entry(dst_window, list);
1289 			end_dst_offset = dst_window->offset +
1290 				(dst_window->nr_pages << PAGE_SHIFT);
1291 			scif_init_window_iter(dst_window, &dst_win_iter);
1292 		}
1293 
1294 		/* compute dma addresses for transfer */
1295 		src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1296 						    &src_contig_bytes,
1297 						    &src_win_iter);
1298 		dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1299 						    &dst_contig_bytes,
1300 						    &dst_win_iter);
1301 		loop_len = min(src_contig_bytes, dst_contig_bytes);
1302 		loop_len = min(loop_len, remaining_len);
1303 		if (work->ordered && !tail_len &&
1304 		    !(remaining_len - loop_len)) {
1305 			/*
1306 			 * Break up the last chunk of the transfer into two
1307 			 * steps. if there is no tail to gurantee DMA ordering.
1308 			 * Passing SCIF_DMA_POLLING inserts a status update
1309 			 * descriptor in step 1 which acts as a double sided
1310 			 * synchronization fence for the DMA engine to ensure
1311 			 * that the last cache line in step 2 is updated last.
1312 			 */
1313 			/* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
1314 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1315 							 src_dma_addr,
1316 							 loop_len -
1317 							 L1_CACHE_BYTES,
1318 							 DMA_PREP_FENCE);
1319 			if (!tx) {
1320 				ret = -ENOMEM;
1321 				goto err;
1322 			}
1323 			cookie = tx->tx_submit(tx);
1324 			if (dma_submit_error(cookie)) {
1325 				ret = -ENOMEM;
1326 				goto err;
1327 			}
1328 			dma_async_issue_pending(chan);
1329 			src_offset += (loop_len - L1_CACHE_BYTES);
1330 			dst_offset += (loop_len - L1_CACHE_BYTES);
1331 			src_dma_addr += (loop_len - L1_CACHE_BYTES);
1332 			dst_dma_addr += (loop_len - L1_CACHE_BYTES);
1333 			remaining_len -= (loop_len - L1_CACHE_BYTES);
1334 			loop_len = remaining_len;
1335 
1336 			/* Step 2) DMA: L1_CACHE_BYTES */
1337 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1338 							 src_dma_addr,
1339 							 loop_len, 0);
1340 			if (!tx) {
1341 				ret = -ENOMEM;
1342 				goto err;
1343 			}
1344 			cookie = tx->tx_submit(tx);
1345 			if (dma_submit_error(cookie)) {
1346 				ret = -ENOMEM;
1347 				goto err;
1348 			}
1349 			dma_async_issue_pending(chan);
1350 		} else {
1351 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1352 							 src_dma_addr,
1353 							 loop_len, 0);
1354 			if (!tx) {
1355 				ret = -ENOMEM;
1356 				goto err;
1357 			}
1358 			cookie = tx->tx_submit(tx);
1359 			if (dma_submit_error(cookie)) {
1360 				ret = -ENOMEM;
1361 				goto err;
1362 			}
1363 			dma_async_issue_pending(chan);
1364 		}
1365 		src_offset += loop_len;
1366 		dst_offset += loop_len;
1367 		remaining_len -= loop_len;
1368 	}
1369 	remaining_len = tail_len;
1370 	if (remaining_len) {
1371 		loop_len = remaining_len;
1372 		if (src_offset == end_src_offset)
1373 			src_window = list_next_entry(src_window, list);
1374 		if (dst_offset == end_dst_offset)
1375 			dst_window = list_next_entry(dst_window, list);
1376 
1377 		src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1378 		dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1379 		/*
1380 		 * The CPU copy for the tail bytes must be initiated only once
1381 		 * previous DMA transfers for this endpoint have completed to
1382 		 * guarantee ordering.
1383 		 */
1384 		if (work->ordered) {
1385 			struct scif_dev *rdev = work->remote_dev;
1386 
1387 			ret = scif_drain_dma_poll(rdev->sdev, chan);
1388 			if (ret)
1389 				return ret;
1390 		}
1391 		if (src_window->type == SCIF_WINDOW_SELF)
1392 			src_virt = _get_local_va(src_offset, src_window,
1393 						 loop_len);
1394 		else
1395 			src_virt = ioremap_remote(src_offset, src_window,
1396 						  loop_len,
1397 						  work->remote_dev, NULL);
1398 		if (!src_virt)
1399 			return -ENOMEM;
1400 
1401 		if (dst_window->type == SCIF_WINDOW_SELF)
1402 			dst_virt = _get_local_va(dst_offset, dst_window,
1403 						 loop_len);
1404 		else
1405 			dst_virt = ioremap_remote(dst_offset, dst_window,
1406 						  loop_len,
1407 						  work->remote_dev, NULL);
1408 		if (!dst_virt) {
1409 			if (src_window->type != SCIF_WINDOW_SELF)
1410 				iounmap_remote(src_virt, loop_len, work);
1411 			return -ENOMEM;
1412 		}
1413 
1414 		if (src_window->type == SCIF_WINDOW_SELF)
1415 			scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1416 						work->ordered);
1417 		else
1418 			scif_unaligned_cpy_fromio(dst_virt, src_virt,
1419 						  loop_len, work->ordered);
1420 		if (src_window->type != SCIF_WINDOW_SELF)
1421 			iounmap_remote(src_virt, loop_len, work);
1422 
1423 		if (dst_window->type != SCIF_WINDOW_SELF)
1424 			iounmap_remote(dst_virt, loop_len, work);
1425 		remaining_len -= loop_len;
1426 	}
1427 	return ret;
1428 err:
1429 	dev_err(scif_info.mdev.this_device,
1430 		"%s %d Desc Prog Failed ret %d\n",
1431 		__func__, __LINE__, ret);
1432 	return ret;
1433 }
1434 
1435 /*
1436  * scif_rma_list_cpu_copy:
1437  *
1438  * Traverse all the windows and perform CPU copy.
1439  */
scif_rma_list_cpu_copy(struct scif_copy_work * work)1440 static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
1441 {
1442 	void *src_virt, *dst_virt;
1443 	size_t loop_len, remaining_len;
1444 	int src_page_off, dst_page_off;
1445 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1446 	struct scif_window *src_window = work->src_window;
1447 	struct scif_window *dst_window = work->dst_window;
1448 	s64 end_src_offset, end_dst_offset;
1449 	int ret = 0;
1450 	struct scif_window_iter src_win_iter;
1451 	struct scif_window_iter dst_win_iter;
1452 
1453 	remaining_len = work->len;
1454 
1455 	scif_init_window_iter(src_window, &src_win_iter);
1456 	scif_init_window_iter(dst_window, &dst_win_iter);
1457 	while (remaining_len) {
1458 		src_page_off = src_offset & ~PAGE_MASK;
1459 		dst_page_off = dst_offset & ~PAGE_MASK;
1460 		loop_len = min(PAGE_SIZE -
1461 			       max(src_page_off, dst_page_off),
1462 			       remaining_len);
1463 
1464 		if (src_window->type == SCIF_WINDOW_SELF)
1465 			src_virt = _get_local_va(src_offset, src_window,
1466 						 loop_len);
1467 		else
1468 			src_virt = ioremap_remote(src_offset, src_window,
1469 						  loop_len,
1470 						  work->remote_dev,
1471 						  &src_win_iter);
1472 		if (!src_virt) {
1473 			ret = -ENOMEM;
1474 			goto error;
1475 		}
1476 
1477 		if (dst_window->type == SCIF_WINDOW_SELF)
1478 			dst_virt = _get_local_va(dst_offset, dst_window,
1479 						 loop_len);
1480 		else
1481 			dst_virt = ioremap_remote(dst_offset, dst_window,
1482 						  loop_len,
1483 						  work->remote_dev,
1484 						  &dst_win_iter);
1485 		if (!dst_virt) {
1486 			if (src_window->type == SCIF_WINDOW_PEER)
1487 				iounmap_remote(src_virt, loop_len, work);
1488 			ret = -ENOMEM;
1489 			goto error;
1490 		}
1491 
1492 		if (work->loopback) {
1493 			memcpy(dst_virt, src_virt, loop_len);
1494 		} else {
1495 			if (src_window->type == SCIF_WINDOW_SELF)
1496 				memcpy_toio((void __iomem __force *)dst_virt,
1497 					    src_virt, loop_len);
1498 			else
1499 				memcpy_fromio(dst_virt,
1500 					      (void __iomem __force *)src_virt,
1501 					      loop_len);
1502 		}
1503 		if (src_window->type == SCIF_WINDOW_PEER)
1504 			iounmap_remote(src_virt, loop_len, work);
1505 
1506 		if (dst_window->type == SCIF_WINDOW_PEER)
1507 			iounmap_remote(dst_virt, loop_len, work);
1508 
1509 		src_offset += loop_len;
1510 		dst_offset += loop_len;
1511 		remaining_len -= loop_len;
1512 		if (remaining_len) {
1513 			end_src_offset = src_window->offset +
1514 				(src_window->nr_pages << PAGE_SHIFT);
1515 			end_dst_offset = dst_window->offset +
1516 				(dst_window->nr_pages << PAGE_SHIFT);
1517 			if (src_offset == end_src_offset) {
1518 				src_window = list_next_entry(src_window, list);
1519 				scif_init_window_iter(src_window,
1520 						      &src_win_iter);
1521 			}
1522 			if (dst_offset == end_dst_offset) {
1523 				dst_window = list_next_entry(dst_window, list);
1524 				scif_init_window_iter(dst_window,
1525 						      &dst_win_iter);
1526 			}
1527 		}
1528 	}
1529 error:
1530 	return ret;
1531 }
1532 
scif_rma_list_dma_copy_wrapper(struct scif_endpt * epd,struct scif_copy_work * work,struct dma_chan * chan,off_t loffset)1533 static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
1534 					  struct scif_copy_work *work,
1535 					  struct dma_chan *chan, off_t loffset)
1536 {
1537 	int src_cache_off, dst_cache_off;
1538 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1539 	u8 *temp = NULL;
1540 	bool src_local = true;
1541 	struct scif_dma_comp_cb *comp_cb;
1542 	int err;
1543 
1544 	if (is_dma_copy_aligned(chan->device, 1, 1, 1))
1545 		return _scif_rma_list_dma_copy_aligned(work, chan);
1546 
1547 	src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1548 	dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1);
1549 
1550 	if (dst_cache_off == src_cache_off)
1551 		return scif_rma_list_dma_copy_aligned(work, chan);
1552 
1553 	if (work->loopback)
1554 		return scif_rma_list_cpu_copy(work);
1555 	src_local = work->src_window->type == SCIF_WINDOW_SELF;
1556 
1557 	/* Allocate dma_completion cb */
1558 	comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
1559 	if (!comp_cb)
1560 		goto error;
1561 
1562 	work->comp_cb = comp_cb;
1563 	comp_cb->cb_cookie = comp_cb;
1564 	comp_cb->dma_completion_func = &scif_rma_completion_cb;
1565 
1566 	if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) {
1567 		comp_cb->is_cache = false;
1568 		/* Allocate padding bytes to align to a cache line */
1569 		temp = kmalloc(work->len + (L1_CACHE_BYTES << 1),
1570 			       GFP_KERNEL);
1571 		if (!temp)
1572 			goto free_comp_cb;
1573 		comp_cb->temp_buf_to_free = temp;
1574 		/* kmalloc(..) does not guarantee cache line alignment */
1575 		if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES))
1576 			temp = PTR_ALIGN(temp, L1_CACHE_BYTES);
1577 	} else {
1578 		comp_cb->is_cache = true;
1579 		temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL);
1580 		if (!temp)
1581 			goto free_comp_cb;
1582 		comp_cb->temp_buf_to_free = temp;
1583 	}
1584 
1585 	if (src_local) {
1586 		temp += dst_cache_off;
1587 		scif_rma_local_cpu_copy(work->src_offset, work->src_window,
1588 					temp, work->len, true);
1589 	} else {
1590 		comp_cb->dst_window = work->dst_window;
1591 		comp_cb->dst_offset = work->dst_offset;
1592 		work->src_offset = work->src_offset - src_cache_off;
1593 		comp_cb->len = work->len;
1594 		work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES);
1595 		comp_cb->header_padding = src_cache_off;
1596 	}
1597 	comp_cb->temp_buf = temp;
1598 
1599 	err = scif_map_single(&comp_cb->temp_phys, temp,
1600 			      work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE);
1601 	if (err)
1602 		goto free_temp_buf;
1603 	comp_cb->sdev = work->remote_dev;
1604 	if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0)
1605 		goto free_temp_buf;
1606 	if (!src_local)
1607 		work->fence_type = SCIF_DMA_INTR;
1608 	return 0;
1609 free_temp_buf:
1610 	if (comp_cb->is_cache)
1611 		kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free);
1612 	else
1613 		kfree(comp_cb->temp_buf_to_free);
1614 free_comp_cb:
1615 	kfree(comp_cb);
1616 error:
1617 	return -ENOMEM;
1618 }
1619 
1620 /**
1621  * scif_rma_copy:
1622  * @epd: end point descriptor.
1623  * @loffset: offset in local registered address space to/from which to copy
1624  * @addr: user virtual address to/from which to copy
1625  * @len: length of range to copy
1626  * @roffset: offset in remote registered address space to/from which to copy
1627  * @flags: flags
1628  * @dir: LOCAL->REMOTE or vice versa.
1629  * @last_chunk: true if this is the last chunk of a larger transfer
1630  *
1631  * Validate parameters, check if src/dst registered ranges requested for copy
1632  * are valid and initiate either CPU or DMA copy.
1633  */
scif_rma_copy(scif_epd_t epd,off_t loffset,unsigned long addr,size_t len,off_t roffset,int flags,enum scif_rma_dir dir,bool last_chunk)1634 static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
1635 			 size_t len, off_t roffset, int flags,
1636 			 enum scif_rma_dir dir, bool last_chunk)
1637 {
1638 	struct scif_endpt *ep = (struct scif_endpt *)epd;
1639 	struct scif_rma_req remote_req;
1640 	struct scif_rma_req req;
1641 	struct scif_window *local_window = NULL;
1642 	struct scif_window *remote_window = NULL;
1643 	struct scif_copy_work copy_work;
1644 	bool loopback;
1645 	int err = 0;
1646 	struct dma_chan *chan;
1647 	struct scif_mmu_notif *mmn = NULL;
1648 	bool cache = false;
1649 	struct device *spdev;
1650 
1651 	err = scif_verify_epd(ep);
1652 	if (err)
1653 		return err;
1654 
1655 	if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE |
1656 				SCIF_RMA_SYNC | SCIF_RMA_ORDERED)))
1657 		return -EINVAL;
1658 
1659 	loopback = scifdev_self(ep->remote_dev) ? true : false;
1660 	copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ?
1661 				SCIF_DMA_POLL : 0;
1662 	copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk);
1663 
1664 	/* Use CPU for Mgmt node <-> Mgmt node copies */
1665 	if (loopback && scif_is_mgmt_node()) {
1666 		flags |= SCIF_RMA_USECPU;
1667 		copy_work.fence_type = 0x0;
1668 	}
1669 
1670 	cache = scif_is_set_reg_cache(flags);
1671 
1672 	remote_req.out_window = &remote_window;
1673 	remote_req.offset = roffset;
1674 	remote_req.nr_bytes = len;
1675 	/*
1676 	 * If transfer is from local to remote then the remote window
1677 	 * must be writeable and vice versa.
1678 	 */
1679 	remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ;
1680 	remote_req.type = SCIF_WINDOW_PARTIAL;
1681 	remote_req.head = &ep->rma_info.remote_reg_list;
1682 
1683 	spdev = scif_get_peer_dev(ep->remote_dev);
1684 	if (IS_ERR(spdev)) {
1685 		err = PTR_ERR(spdev);
1686 		return err;
1687 	}
1688 
1689 	if (addr && cache) {
1690 		mutex_lock(&ep->rma_info.mmn_lock);
1691 		mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
1692 		if (!mmn)
1693 			mmn = scif_add_mmu_notifier(current->mm, ep);
1694 		mutex_unlock(&ep->rma_info.mmn_lock);
1695 		if (IS_ERR(mmn)) {
1696 			scif_put_peer_dev(spdev);
1697 			return PTR_ERR(mmn);
1698 		}
1699 		cache = cache && !scif_rma_tc_can_cache(ep, len);
1700 	}
1701 	mutex_lock(&ep->rma_info.rma_lock);
1702 	if (addr) {
1703 		req.out_window = &local_window;
1704 		req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
1705 				     PAGE_SIZE);
1706 		req.va_for_temp = addr & PAGE_MASK;
1707 		req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
1708 			    VM_READ : VM_WRITE | VM_READ);
1709 		/* Does a valid local window exist? */
1710 		if (mmn) {
1711 			spin_lock(&ep->rma_info.tc_lock);
1712 			req.head = &mmn->tc_reg_list;
1713 			err = scif_query_tcw(ep, &req);
1714 			spin_unlock(&ep->rma_info.tc_lock);
1715 		}
1716 		if (!mmn || err) {
1717 			err = scif_register_temp(epd, req.va_for_temp,
1718 						 req.nr_bytes, req.prot,
1719 						 &loffset, &local_window);
1720 			if (err) {
1721 				mutex_unlock(&ep->rma_info.rma_lock);
1722 				goto error;
1723 			}
1724 			if (!cache)
1725 				goto skip_cache;
1726 			atomic_inc(&ep->rma_info.tcw_refcount);
1727 			atomic_add_return(local_window->nr_pages,
1728 					  &ep->rma_info.tcw_total_pages);
1729 			if (mmn) {
1730 				spin_lock(&ep->rma_info.tc_lock);
1731 				scif_insert_tcw(local_window,
1732 						&mmn->tc_reg_list);
1733 				spin_unlock(&ep->rma_info.tc_lock);
1734 			}
1735 		}
1736 skip_cache:
1737 		loffset = local_window->offset +
1738 				(addr - local_window->va_for_temp);
1739 	} else {
1740 		req.out_window = &local_window;
1741 		req.offset = loffset;
1742 		/*
1743 		 * If transfer is from local to remote then the self window
1744 		 * must be readable and vice versa.
1745 		 */
1746 		req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
1747 		req.nr_bytes = len;
1748 		req.type = SCIF_WINDOW_PARTIAL;
1749 		req.head = &ep->rma_info.reg_list;
1750 		/* Does a valid local window exist? */
1751 		err = scif_query_window(&req);
1752 		if (err) {
1753 			mutex_unlock(&ep->rma_info.rma_lock);
1754 			goto error;
1755 		}
1756 	}
1757 
1758 	/* Does a valid remote window exist? */
1759 	err = scif_query_window(&remote_req);
1760 	if (err) {
1761 		mutex_unlock(&ep->rma_info.rma_lock);
1762 		goto error;
1763 	}
1764 
1765 	/*
1766 	 * Prepare copy_work for submitting work to the DMA kernel thread
1767 	 * or CPU copy routine.
1768 	 */
1769 	copy_work.len = len;
1770 	copy_work.loopback = loopback;
1771 	copy_work.remote_dev = ep->remote_dev;
1772 	if (dir == SCIF_LOCAL_TO_REMOTE) {
1773 		copy_work.src_offset = loffset;
1774 		copy_work.src_window = local_window;
1775 		copy_work.dst_offset = roffset;
1776 		copy_work.dst_window = remote_window;
1777 	} else {
1778 		copy_work.src_offset = roffset;
1779 		copy_work.src_window = remote_window;
1780 		copy_work.dst_offset = loffset;
1781 		copy_work.dst_window = local_window;
1782 	}
1783 
1784 	if (flags & SCIF_RMA_USECPU) {
1785 		scif_rma_list_cpu_copy(&copy_work);
1786 	} else {
1787 		chan = ep->rma_info.dma_chan;
1788 		err = scif_rma_list_dma_copy_wrapper(epd, &copy_work,
1789 						     chan, loffset);
1790 	}
1791 	if (addr && !cache)
1792 		atomic_inc(&ep->rma_info.tw_refcount);
1793 
1794 	mutex_unlock(&ep->rma_info.rma_lock);
1795 
1796 	if (last_chunk) {
1797 		struct scif_dev *rdev = ep->remote_dev;
1798 
1799 		if (copy_work.fence_type == SCIF_DMA_POLL)
1800 			err = scif_drain_dma_poll(rdev->sdev,
1801 						  ep->rma_info.dma_chan);
1802 		else if (copy_work.fence_type == SCIF_DMA_INTR)
1803 			err = scif_drain_dma_intr(rdev->sdev,
1804 						  ep->rma_info.dma_chan);
1805 	}
1806 
1807 	if (addr && !cache)
1808 		scif_queue_for_cleanup(local_window, &scif_info.rma);
1809 	scif_put_peer_dev(spdev);
1810 	return err;
1811 error:
1812 	if (err) {
1813 		if (addr && local_window && !cache)
1814 			scif_destroy_window(ep, local_window);
1815 		dev_err(scif_info.mdev.this_device,
1816 			"%s %d err %d len 0x%lx\n",
1817 			__func__, __LINE__, err, len);
1818 	}
1819 	scif_put_peer_dev(spdev);
1820 	return err;
1821 }
1822 
scif_readfrom(scif_epd_t epd,off_t loffset,size_t len,off_t roffset,int flags)1823 int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len,
1824 		  off_t roffset, int flags)
1825 {
1826 	int err;
1827 
1828 	dev_dbg(scif_info.mdev.this_device,
1829 		"SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
1830 		epd, loffset, len, roffset, flags);
1831 	if (scif_unaligned(loffset, roffset)) {
1832 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1833 			err = scif_rma_copy(epd, loffset, 0x0,
1834 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1835 					    roffset, flags,
1836 					    SCIF_REMOTE_TO_LOCAL, false);
1837 			if (err)
1838 				goto readfrom_err;
1839 			loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1840 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1841 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1842 		}
1843 	}
1844 	err = scif_rma_copy(epd, loffset, 0x0, len,
1845 			    roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1846 readfrom_err:
1847 	return err;
1848 }
1849 EXPORT_SYMBOL_GPL(scif_readfrom);
1850 
scif_writeto(scif_epd_t epd,off_t loffset,size_t len,off_t roffset,int flags)1851 int scif_writeto(scif_epd_t epd, off_t loffset, size_t len,
1852 		 off_t roffset, int flags)
1853 {
1854 	int err;
1855 
1856 	dev_dbg(scif_info.mdev.this_device,
1857 		"SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
1858 		epd, loffset, len, roffset, flags);
1859 	if (scif_unaligned(loffset, roffset)) {
1860 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1861 			err = scif_rma_copy(epd, loffset, 0x0,
1862 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1863 					    roffset, flags,
1864 					    SCIF_LOCAL_TO_REMOTE, false);
1865 			if (err)
1866 				goto writeto_err;
1867 			loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1868 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1869 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1870 		}
1871 	}
1872 	err = scif_rma_copy(epd, loffset, 0x0, len,
1873 			    roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1874 writeto_err:
1875 	return err;
1876 }
1877 EXPORT_SYMBOL_GPL(scif_writeto);
1878 
scif_vreadfrom(scif_epd_t epd,void * addr,size_t len,off_t roffset,int flags)1879 int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len,
1880 		   off_t roffset, int flags)
1881 {
1882 	int err;
1883 
1884 	dev_dbg(scif_info.mdev.this_device,
1885 		"SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1886 		epd, addr, len, roffset, flags);
1887 	if (scif_unaligned((off_t __force)addr, roffset)) {
1888 		if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1889 			flags &= ~SCIF_RMA_USECACHE;
1890 
1891 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1892 			err = scif_rma_copy(epd, 0, (u64)addr,
1893 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1894 					    roffset, flags,
1895 					    SCIF_REMOTE_TO_LOCAL, false);
1896 			if (err)
1897 				goto vreadfrom_err;
1898 			addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1899 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1900 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1901 		}
1902 	}
1903 	err = scif_rma_copy(epd, 0, (u64)addr, len,
1904 			    roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1905 vreadfrom_err:
1906 	return err;
1907 }
1908 EXPORT_SYMBOL_GPL(scif_vreadfrom);
1909 
scif_vwriteto(scif_epd_t epd,void * addr,size_t len,off_t roffset,int flags)1910 int scif_vwriteto(scif_epd_t epd, void *addr, size_t len,
1911 		  off_t roffset, int flags)
1912 {
1913 	int err;
1914 
1915 	dev_dbg(scif_info.mdev.this_device,
1916 		"SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1917 		epd, addr, len, roffset, flags);
1918 	if (scif_unaligned((off_t __force)addr, roffset)) {
1919 		if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1920 			flags &= ~SCIF_RMA_USECACHE;
1921 
1922 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1923 			err = scif_rma_copy(epd, 0, (u64)addr,
1924 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1925 					    roffset, flags,
1926 					    SCIF_LOCAL_TO_REMOTE, false);
1927 			if (err)
1928 				goto vwriteto_err;
1929 			addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1930 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1931 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1932 		}
1933 	}
1934 	err = scif_rma_copy(epd, 0, (u64)addr, len,
1935 			    roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1936 vwriteto_err:
1937 	return err;
1938 }
1939 EXPORT_SYMBOL_GPL(scif_vwriteto);
1940