1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * Intel SCIF driver.
16  *
17  */
18 
19 #include "scif_main.h"
20 
21 /**
22  * scif_recv_mark: Handle SCIF_MARK request
23  * @msg:	Interrupt message
24  *
25  * The peer has requested a mark.
26  */
scif_recv_mark(struct scif_dev * scifdev,struct scifmsg * msg)27 void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
28 {
29 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
30 	int mark = 0;
31 	int err;
32 
33 	err = _scif_fence_mark(ep, &mark);
34 	if (err)
35 		msg->uop = SCIF_MARK_NACK;
36 	else
37 		msg->uop = SCIF_MARK_ACK;
38 	msg->payload[0] = ep->remote_ep;
39 	msg->payload[2] = mark;
40 	scif_nodeqp_send(ep->remote_dev, msg);
41 }
42 
43 /**
44  * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
45  * @msg:	Interrupt message
46  *
47  * The peer has responded to a SCIF_MARK message.
48  */
scif_recv_mark_resp(struct scif_dev * scifdev,struct scifmsg * msg)49 void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
50 {
51 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
52 	struct scif_fence_info *fence_req =
53 		(struct scif_fence_info *)msg->payload[1];
54 
55 	mutex_lock(&ep->rma_info.rma_lock);
56 	if (msg->uop == SCIF_MARK_ACK) {
57 		fence_req->state = OP_COMPLETED;
58 		fence_req->dma_mark = (int)msg->payload[2];
59 	} else {
60 		fence_req->state = OP_FAILED;
61 	}
62 	mutex_unlock(&ep->rma_info.rma_lock);
63 	complete(&fence_req->comp);
64 }
65 
66 /**
67  * scif_recv_wait: Handle SCIF_WAIT request
68  * @msg:	Interrupt message
69  *
70  * The peer has requested waiting on a fence.
71  */
scif_recv_wait(struct scif_dev * scifdev,struct scifmsg * msg)72 void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
73 {
74 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
75 	struct scif_remote_fence_info *fence;
76 
77 	/*
78 	 * Allocate structure for remote fence information and
79 	 * send a NACK if the allocation failed. The peer will
80 	 * return ENOMEM upon receiving a NACK.
81 	 */
82 	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
83 	if (!fence) {
84 		msg->payload[0] = ep->remote_ep;
85 		msg->uop = SCIF_WAIT_NACK;
86 		scif_nodeqp_send(ep->remote_dev, msg);
87 		return;
88 	}
89 
90 	/* Prepare the fence request */
91 	memcpy(&fence->msg, msg, sizeof(struct scifmsg));
92 	INIT_LIST_HEAD(&fence->list);
93 
94 	/* Insert to the global remote fence request list */
95 	mutex_lock(&scif_info.fencelock);
96 	atomic_inc(&ep->rma_info.fence_refcount);
97 	list_add_tail(&fence->list, &scif_info.fence);
98 	mutex_unlock(&scif_info.fencelock);
99 
100 	schedule_work(&scif_info.misc_work);
101 }
102 
103 /**
104  * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
105  * @msg:	Interrupt message
106  *
107  * The peer has responded to a SCIF_WAIT message.
108  */
scif_recv_wait_resp(struct scif_dev * scifdev,struct scifmsg * msg)109 void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
110 {
111 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
112 	struct scif_fence_info *fence_req =
113 		(struct scif_fence_info *)msg->payload[1];
114 
115 	mutex_lock(&ep->rma_info.rma_lock);
116 	if (msg->uop == SCIF_WAIT_ACK)
117 		fence_req->state = OP_COMPLETED;
118 	else
119 		fence_req->state = OP_FAILED;
120 	mutex_unlock(&ep->rma_info.rma_lock);
121 	complete(&fence_req->comp);
122 }
123 
124 /**
125  * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
126  * @msg:	Interrupt message
127  *
128  * The peer has requested a signal on a local offset.
129  */
scif_recv_sig_local(struct scif_dev * scifdev,struct scifmsg * msg)130 void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
131 {
132 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
133 	int err;
134 
135 	err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
136 			       SCIF_WINDOW_SELF);
137 	if (err)
138 		msg->uop = SCIF_SIG_NACK;
139 	else
140 		msg->uop = SCIF_SIG_ACK;
141 	msg->payload[0] = ep->remote_ep;
142 	scif_nodeqp_send(ep->remote_dev, msg);
143 }
144 
145 /**
146  * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
147  * @msg:	Interrupt message
148  *
149  * The peer has requested a signal on a remote offset.
150  */
scif_recv_sig_remote(struct scif_dev * scifdev,struct scifmsg * msg)151 void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
152 {
153 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
154 	int err;
155 
156 	err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
157 			       SCIF_WINDOW_PEER);
158 	if (err)
159 		msg->uop = SCIF_SIG_NACK;
160 	else
161 		msg->uop = SCIF_SIG_ACK;
162 	msg->payload[0] = ep->remote_ep;
163 	scif_nodeqp_send(ep->remote_dev, msg);
164 }
165 
166 /**
167  * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
168  * @msg:	Interrupt message
169  *
170  * The peer has responded to a signal request.
171  */
scif_recv_sig_resp(struct scif_dev * scifdev,struct scifmsg * msg)172 void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
173 {
174 	struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
175 	struct scif_fence_info *fence_req =
176 		(struct scif_fence_info *)msg->payload[3];
177 
178 	mutex_lock(&ep->rma_info.rma_lock);
179 	if (msg->uop == SCIF_SIG_ACK)
180 		fence_req->state = OP_COMPLETED;
181 	else
182 		fence_req->state = OP_FAILED;
183 	mutex_unlock(&ep->rma_info.rma_lock);
184 	complete(&fence_req->comp);
185 }
186 
scif_get_local_va(off_t off,struct scif_window * window)187 static inline void *scif_get_local_va(off_t off, struct scif_window *window)
188 {
189 	struct page **pages = window->pinned_pages->pages;
190 	int page_nr = (off - window->offset) >> PAGE_SHIFT;
191 	off_t page_off = off & ~PAGE_MASK;
192 
193 	return page_address(pages[page_nr]) + page_off;
194 }
195 
scif_prog_signal_cb(void * arg)196 static void scif_prog_signal_cb(void *arg)
197 {
198 	struct scif_status *status = arg;
199 
200 	dma_pool_free(status->ep->remote_dev->signal_pool, status,
201 		      status->src_dma_addr);
202 }
203 
_scif_prog_signal(scif_epd_t epd,dma_addr_t dst,u64 val)204 static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
205 {
206 	struct scif_endpt *ep = (struct scif_endpt *)epd;
207 	struct dma_chan *chan = ep->rma_info.dma_chan;
208 	struct dma_device *ddev = chan->device;
209 	bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
210 	struct dma_async_tx_descriptor *tx;
211 	struct scif_status *status = NULL;
212 	dma_addr_t src;
213 	dma_cookie_t cookie;
214 	int err;
215 
216 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
217 	if (!tx) {
218 		err = -ENOMEM;
219 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
220 			__func__, __LINE__, err);
221 		goto alloc_fail;
222 	}
223 	cookie = tx->tx_submit(tx);
224 	if (dma_submit_error(cookie)) {
225 		err = (int)cookie;
226 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
227 			__func__, __LINE__, err);
228 		goto alloc_fail;
229 	}
230 	dma_async_issue_pending(chan);
231 	if (x100) {
232 		/*
233 		 * For X100 use the status descriptor to write the value to
234 		 * the destination.
235 		 */
236 		tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
237 	} else {
238 		status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
239 					&src);
240 		if (!status) {
241 			err = -ENOMEM;
242 			dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
243 				__func__, __LINE__, err);
244 			goto alloc_fail;
245 		}
246 		status->val = val;
247 		status->src_dma_addr = src;
248 		status->ep = ep;
249 		src += offsetof(struct scif_status, val);
250 		tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
251 						  DMA_PREP_INTERRUPT);
252 	}
253 	if (!tx) {
254 		err = -ENOMEM;
255 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
256 			__func__, __LINE__, err);
257 		goto dma_fail;
258 	}
259 	if (!x100) {
260 		tx->callback = scif_prog_signal_cb;
261 		tx->callback_param = status;
262 	}
263 	cookie = tx->tx_submit(tx);
264 	if (dma_submit_error(cookie)) {
265 		err = -EIO;
266 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
267 			__func__, __LINE__, err);
268 		goto dma_fail;
269 	}
270 	dma_async_issue_pending(chan);
271 	return 0;
272 dma_fail:
273 	if (!x100)
274 		dma_pool_free(ep->remote_dev->signal_pool, status,
275 			      status->src_dma_addr);
276 alloc_fail:
277 	return err;
278 }
279 
280 /*
281  * scif_prog_signal:
282  * @epd - Endpoint Descriptor
283  * @offset - registered address to write @val to
284  * @val - Value to be written at @offset
285  * @type - Type of the window.
286  *
287  * Arrange to write a value to the registered offset after ensuring that the
288  * offset provided is indeed valid.
289  */
scif_prog_signal(scif_epd_t epd,off_t offset,u64 val,enum scif_window_type type)290 int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
291 		     enum scif_window_type type)
292 {
293 	struct scif_endpt *ep = (struct scif_endpt *)epd;
294 	struct scif_window *window = NULL;
295 	struct scif_rma_req req;
296 	dma_addr_t dst_dma_addr;
297 	int err;
298 
299 	mutex_lock(&ep->rma_info.rma_lock);
300 	req.out_window = &window;
301 	req.offset = offset;
302 	req.nr_bytes = sizeof(u64);
303 	req.prot = SCIF_PROT_WRITE;
304 	req.type = SCIF_WINDOW_SINGLE;
305 	if (type == SCIF_WINDOW_SELF)
306 		req.head = &ep->rma_info.reg_list;
307 	else
308 		req.head = &ep->rma_info.remote_reg_list;
309 	/* Does a valid window exist? */
310 	err = scif_query_window(&req);
311 	if (err) {
312 		dev_err(scif_info.mdev.this_device,
313 			"%s %d err %d\n", __func__, __LINE__, err);
314 		goto unlock_ret;
315 	}
316 
317 	if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
318 		u64 *dst_virt;
319 
320 		if (type == SCIF_WINDOW_SELF)
321 			dst_virt = scif_get_local_va(offset, window);
322 		else
323 			dst_virt =
324 			scif_get_local_va(offset, (struct scif_window *)
325 					  window->peer_window);
326 		*dst_virt = val;
327 	} else {
328 		dst_dma_addr = __scif_off_to_dma_addr(window, offset);
329 		err = _scif_prog_signal(epd, dst_dma_addr, val);
330 	}
331 unlock_ret:
332 	mutex_unlock(&ep->rma_info.rma_lock);
333 	return err;
334 }
335 
_scif_fence_wait(scif_epd_t epd,int mark)336 static int _scif_fence_wait(scif_epd_t epd, int mark)
337 {
338 	struct scif_endpt *ep = (struct scif_endpt *)epd;
339 	dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
340 	int err;
341 
342 	/* Wait for DMA callback in scif_fence_mark_cb(..) */
343 	err = wait_event_interruptible_timeout(ep->rma_info.markwq,
344 					       dma_async_is_tx_complete(
345 					       ep->rma_info.dma_chan,
346 					       cookie, NULL, NULL) ==
347 					       DMA_COMPLETE,
348 					       SCIF_NODE_ALIVE_TIMEOUT);
349 	if (!err)
350 		err = -ETIMEDOUT;
351 	else if (err > 0)
352 		err = 0;
353 	return err;
354 }
355 
356 /**
357  * scif_rma_handle_remote_fences:
358  *
359  * This routine services remote fence requests.
360  */
scif_rma_handle_remote_fences(void)361 void scif_rma_handle_remote_fences(void)
362 {
363 	struct list_head *item, *tmp;
364 	struct scif_remote_fence_info *fence;
365 	struct scif_endpt *ep;
366 	int mark, err;
367 
368 	might_sleep();
369 	mutex_lock(&scif_info.fencelock);
370 	list_for_each_safe(item, tmp, &scif_info.fence) {
371 		fence = list_entry(item, struct scif_remote_fence_info,
372 				   list);
373 		/* Remove fence from global list */
374 		list_del(&fence->list);
375 
376 		/* Initiate the fence operation */
377 		ep = (struct scif_endpt *)fence->msg.payload[0];
378 		mark = fence->msg.payload[2];
379 		err = _scif_fence_wait(ep, mark);
380 		if (err)
381 			fence->msg.uop = SCIF_WAIT_NACK;
382 		else
383 			fence->msg.uop = SCIF_WAIT_ACK;
384 		fence->msg.payload[0] = ep->remote_ep;
385 		scif_nodeqp_send(ep->remote_dev, &fence->msg);
386 		kfree(fence);
387 		if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
388 			schedule_work(&scif_info.misc_work);
389 	}
390 	mutex_unlock(&scif_info.fencelock);
391 }
392 
_scif_send_fence(scif_epd_t epd,int uop,int mark,int * out_mark)393 static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
394 {
395 	int err;
396 	struct scifmsg msg;
397 	struct scif_fence_info *fence_req;
398 	struct scif_endpt *ep = (struct scif_endpt *)epd;
399 
400 	fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
401 	if (!fence_req) {
402 		err = -ENOMEM;
403 		goto error;
404 	}
405 
406 	fence_req->state = OP_IN_PROGRESS;
407 	init_completion(&fence_req->comp);
408 
409 	msg.src = ep->port;
410 	msg.uop = uop;
411 	msg.payload[0] = ep->remote_ep;
412 	msg.payload[1] = (u64)fence_req;
413 	if (uop == SCIF_WAIT)
414 		msg.payload[2] = mark;
415 	spin_lock(&ep->lock);
416 	if (ep->state == SCIFEP_CONNECTED)
417 		err = scif_nodeqp_send(ep->remote_dev, &msg);
418 	else
419 		err = -ENOTCONN;
420 	spin_unlock(&ep->lock);
421 	if (err)
422 		goto error_free;
423 retry:
424 	/* Wait for a SCIF_WAIT_(N)ACK message */
425 	err = wait_for_completion_timeout(&fence_req->comp,
426 					  SCIF_NODE_ALIVE_TIMEOUT);
427 	if (!err && scifdev_alive(ep))
428 		goto retry;
429 	if (!err)
430 		err = -ENODEV;
431 	if (err > 0)
432 		err = 0;
433 	mutex_lock(&ep->rma_info.rma_lock);
434 	if (err < 0) {
435 		if (fence_req->state == OP_IN_PROGRESS)
436 			fence_req->state = OP_FAILED;
437 	}
438 	if (fence_req->state == OP_FAILED && !err)
439 		err = -ENOMEM;
440 	if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
441 		*out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
442 	mutex_unlock(&ep->rma_info.rma_lock);
443 error_free:
444 	kfree(fence_req);
445 error:
446 	return err;
447 }
448 
449 /**
450  * scif_send_fence_mark:
451  * @epd: end point descriptor.
452  * @out_mark: Output DMA mark reported by peer.
453  *
454  * Send a remote fence mark request.
455  */
scif_send_fence_mark(scif_epd_t epd,int * out_mark)456 static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
457 {
458 	return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
459 }
460 
461 /**
462  * scif_send_fence_wait:
463  * @epd: end point descriptor.
464  * @mark: DMA mark to wait for.
465  *
466  * Send a remote fence wait request.
467  */
scif_send_fence_wait(scif_epd_t epd,int mark)468 static int scif_send_fence_wait(scif_epd_t epd, int mark)
469 {
470 	return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
471 }
472 
_scif_send_fence_signal_wait(struct scif_endpt * ep,struct scif_fence_info * fence_req)473 static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
474 					struct scif_fence_info *fence_req)
475 {
476 	int err;
477 
478 retry:
479 	/* Wait for a SCIF_SIG_(N)ACK message */
480 	err = wait_for_completion_timeout(&fence_req->comp,
481 					  SCIF_NODE_ALIVE_TIMEOUT);
482 	if (!err && scifdev_alive(ep))
483 		goto retry;
484 	if (!err)
485 		err = -ENODEV;
486 	if (err > 0)
487 		err = 0;
488 	if (err < 0) {
489 		mutex_lock(&ep->rma_info.rma_lock);
490 		if (fence_req->state == OP_IN_PROGRESS)
491 			fence_req->state = OP_FAILED;
492 		mutex_unlock(&ep->rma_info.rma_lock);
493 	}
494 	if (fence_req->state == OP_FAILED && !err)
495 		err = -ENXIO;
496 	return err;
497 }
498 
499 /**
500  * scif_send_fence_signal:
501  * @epd - endpoint descriptor
502  * @loff - local offset
503  * @lval - local value to write to loffset
504  * @roff - remote offset
505  * @rval - remote value to write to roffset
506  * @flags - flags
507  *
508  * Sends a remote fence signal request
509  */
scif_send_fence_signal(scif_epd_t epd,off_t roff,u64 rval,off_t loff,u64 lval,int flags)510 static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
511 				  off_t loff, u64 lval, int flags)
512 {
513 	int err = 0;
514 	struct scifmsg msg;
515 	struct scif_fence_info *fence_req;
516 	struct scif_endpt *ep = (struct scif_endpt *)epd;
517 
518 	fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
519 	if (!fence_req) {
520 		err = -ENOMEM;
521 		goto error;
522 	}
523 
524 	fence_req->state = OP_IN_PROGRESS;
525 	init_completion(&fence_req->comp);
526 	msg.src = ep->port;
527 	if (flags & SCIF_SIGNAL_LOCAL) {
528 		msg.uop = SCIF_SIG_LOCAL;
529 		msg.payload[0] = ep->remote_ep;
530 		msg.payload[1] = roff;
531 		msg.payload[2] = rval;
532 		msg.payload[3] = (u64)fence_req;
533 		spin_lock(&ep->lock);
534 		if (ep->state == SCIFEP_CONNECTED)
535 			err = scif_nodeqp_send(ep->remote_dev, &msg);
536 		else
537 			err = -ENOTCONN;
538 		spin_unlock(&ep->lock);
539 		if (err)
540 			goto error_free;
541 		err = _scif_send_fence_signal_wait(ep, fence_req);
542 		if (err)
543 			goto error_free;
544 	}
545 	fence_req->state = OP_IN_PROGRESS;
546 
547 	if (flags & SCIF_SIGNAL_REMOTE) {
548 		msg.uop = SCIF_SIG_REMOTE;
549 		msg.payload[0] = ep->remote_ep;
550 		msg.payload[1] = loff;
551 		msg.payload[2] = lval;
552 		msg.payload[3] = (u64)fence_req;
553 		spin_lock(&ep->lock);
554 		if (ep->state == SCIFEP_CONNECTED)
555 			err = scif_nodeqp_send(ep->remote_dev, &msg);
556 		else
557 			err = -ENOTCONN;
558 		spin_unlock(&ep->lock);
559 		if (err)
560 			goto error_free;
561 		err = _scif_send_fence_signal_wait(ep, fence_req);
562 	}
563 error_free:
564 	kfree(fence_req);
565 error:
566 	return err;
567 }
568 
scif_fence_mark_cb(void * arg)569 static void scif_fence_mark_cb(void *arg)
570 {
571 	struct scif_endpt *ep = (struct scif_endpt *)arg;
572 
573 	wake_up_interruptible(&ep->rma_info.markwq);
574 	atomic_dec(&ep->rma_info.fence_refcount);
575 }
576 
577 /*
578  * _scif_fence_mark:
579  *
580  * @epd - endpoint descriptor
581  * Set up a mark for this endpoint and return the value of the mark.
582  */
_scif_fence_mark(scif_epd_t epd,int * mark)583 int _scif_fence_mark(scif_epd_t epd, int *mark)
584 {
585 	struct scif_endpt *ep = (struct scif_endpt *)epd;
586 	struct dma_chan *chan = ep->rma_info.dma_chan;
587 	struct dma_device *ddev = chan->device;
588 	struct dma_async_tx_descriptor *tx;
589 	dma_cookie_t cookie;
590 	int err;
591 
592 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
593 	if (!tx) {
594 		err = -ENOMEM;
595 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
596 			__func__, __LINE__, err);
597 		return err;
598 	}
599 	cookie = tx->tx_submit(tx);
600 	if (dma_submit_error(cookie)) {
601 		err = (int)cookie;
602 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
603 			__func__, __LINE__, err);
604 		return err;
605 	}
606 	dma_async_issue_pending(chan);
607 	tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
608 	if (!tx) {
609 		err = -ENOMEM;
610 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
611 			__func__, __LINE__, err);
612 		return err;
613 	}
614 	tx->callback = scif_fence_mark_cb;
615 	tx->callback_param = ep;
616 	*mark = cookie = tx->tx_submit(tx);
617 	if (dma_submit_error(cookie)) {
618 		err = (int)cookie;
619 		dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
620 			__func__, __LINE__, err);
621 		return err;
622 	}
623 	atomic_inc(&ep->rma_info.fence_refcount);
624 	dma_async_issue_pending(chan);
625 	return 0;
626 }
627 
628 #define SCIF_LOOPB_MAGIC_MARK 0xdead
629 
scif_fence_mark(scif_epd_t epd,int flags,int * mark)630 int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
631 {
632 	struct scif_endpt *ep = (struct scif_endpt *)epd;
633 	int err = 0;
634 
635 	dev_dbg(scif_info.mdev.this_device,
636 		"SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
637 		ep, flags, *mark);
638 	err = scif_verify_epd(ep);
639 	if (err)
640 		return err;
641 
642 	/* Invalid flags? */
643 	if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
644 		return -EINVAL;
645 
646 	/* At least one of init self or peer RMA should be set */
647 	if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
648 		return -EINVAL;
649 
650 	/* Exactly one of init self or peer RMA should be set but not both */
651 	if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
652 		return -EINVAL;
653 
654 	/*
655 	 * Management node loopback does not need to use DMA.
656 	 * Return a valid mark to be symmetric.
657 	 */
658 	if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
659 		*mark = SCIF_LOOPB_MAGIC_MARK;
660 		return 0;
661 	}
662 
663 	if (flags & SCIF_FENCE_INIT_SELF)
664 		err = _scif_fence_mark(epd, mark);
665 	else
666 		err = scif_send_fence_mark(ep, mark);
667 
668 	if (err)
669 		dev_err(scif_info.mdev.this_device,
670 			"%s %d err %d\n", __func__, __LINE__, err);
671 	dev_dbg(scif_info.mdev.this_device,
672 		"SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
673 		ep, flags, *mark, err);
674 	return err;
675 }
676 EXPORT_SYMBOL_GPL(scif_fence_mark);
677 
scif_fence_wait(scif_epd_t epd,int mark)678 int scif_fence_wait(scif_epd_t epd, int mark)
679 {
680 	struct scif_endpt *ep = (struct scif_endpt *)epd;
681 	int err = 0;
682 
683 	dev_dbg(scif_info.mdev.this_device,
684 		"SCIFAPI fence_wait: ep %p mark 0x%x\n",
685 		ep, mark);
686 	err = scif_verify_epd(ep);
687 	if (err)
688 		return err;
689 	/*
690 	 * Management node loopback does not need to use DMA.
691 	 * The only valid mark provided is 0 so simply
692 	 * return success if the mark is valid.
693 	 */
694 	if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
695 		if (mark == SCIF_LOOPB_MAGIC_MARK)
696 			return 0;
697 		else
698 			return -EINVAL;
699 	}
700 	if (mark & SCIF_REMOTE_FENCE)
701 		err = scif_send_fence_wait(epd, mark);
702 	else
703 		err = _scif_fence_wait(epd, mark);
704 	if (err < 0)
705 		dev_err(scif_info.mdev.this_device,
706 			"%s %d err %d\n", __func__, __LINE__, err);
707 	return err;
708 }
709 EXPORT_SYMBOL_GPL(scif_fence_wait);
710 
scif_fence_signal(scif_epd_t epd,off_t loff,u64 lval,off_t roff,u64 rval,int flags)711 int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
712 		      off_t roff, u64 rval, int flags)
713 {
714 	struct scif_endpt *ep = (struct scif_endpt *)epd;
715 	int err = 0;
716 
717 	dev_dbg(scif_info.mdev.this_device,
718 		"SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
719 		ep, loff, lval, roff, rval, flags);
720 	err = scif_verify_epd(ep);
721 	if (err)
722 		return err;
723 
724 	/* Invalid flags? */
725 	if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
726 			SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
727 		return -EINVAL;
728 
729 	/* At least one of init self or peer RMA should be set */
730 	if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
731 		return -EINVAL;
732 
733 	/* Exactly one of init self or peer RMA should be set but not both */
734 	if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
735 		return -EINVAL;
736 
737 	/* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
738 	if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
739 		return -EINVAL;
740 
741 	/* Only Dword offsets allowed */
742 	if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
743 		return -EINVAL;
744 
745 	/* Only Dword aligned offsets allowed */
746 	if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
747 		return -EINVAL;
748 
749 	if (flags & SCIF_FENCE_INIT_PEER) {
750 		err = scif_send_fence_signal(epd, roff, rval, loff,
751 					     lval, flags);
752 	} else {
753 		/* Local Signal in Local RAS */
754 		if (flags & SCIF_SIGNAL_LOCAL) {
755 			err = scif_prog_signal(epd, loff, lval,
756 					       SCIF_WINDOW_SELF);
757 			if (err)
758 				goto error_ret;
759 		}
760 
761 		/* Signal in Remote RAS */
762 		if (flags & SCIF_SIGNAL_REMOTE)
763 			err = scif_prog_signal(epd, roff,
764 					       rval, SCIF_WINDOW_PEER);
765 	}
766 error_ret:
767 	if (err)
768 		dev_err(scif_info.mdev.this_device,
769 			"%s %d err %d\n", __func__, __LINE__, err);
770 	return err;
771 }
772 EXPORT_SYMBOL_GPL(scif_fence_signal);
773