1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel MIC Platform Software Stack (MPSS)
4  *
5  * Copyright(c) 2014 Intel Corporation.
6  *
7  * Intel SCIF driver.
8  */
9 #include "scif_main.h"
10 
scif_fdopen(struct inode * inode,struct file * f)11 static int scif_fdopen(struct inode *inode, struct file *f)
12 {
13 	struct scif_endpt *priv = scif_open();
14 
15 	if (!priv)
16 		return -ENOMEM;
17 	f->private_data = priv;
18 	return 0;
19 }
20 
scif_fdclose(struct inode * inode,struct file * f)21 static int scif_fdclose(struct inode *inode, struct file *f)
22 {
23 	struct scif_endpt *priv = f->private_data;
24 
25 	return scif_close(priv);
26 }
27 
scif_fdmmap(struct file * f,struct vm_area_struct * vma)28 static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
29 {
30 	struct scif_endpt *priv = f->private_data;
31 
32 	return scif_mmap(vma, priv);
33 }
34 
scif_fdpoll(struct file * f,poll_table * wait)35 static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
36 {
37 	struct scif_endpt *priv = f->private_data;
38 
39 	return __scif_pollfd(f, wait, priv);
40 }
41 
scif_fdflush(struct file * f,fl_owner_t id)42 static int scif_fdflush(struct file *f, fl_owner_t id)
43 {
44 	struct scif_endpt *ep = f->private_data;
45 
46 	spin_lock(&ep->lock);
47 	/*
48 	 * The listening endpoint stashes the open file information before
49 	 * waiting for incoming connections. The release callback would never be
50 	 * called if the application closed the endpoint, while waiting for
51 	 * incoming connections from a separate thread since the file descriptor
52 	 * reference count is bumped up in the accept IOCTL. Call the flush
53 	 * routine if the id matches the endpoint open file information so that
54 	 * the listening endpoint can be woken up and the fd released.
55 	 */
56 	if (ep->files == id)
57 		__scif_flush(ep);
58 	spin_unlock(&ep->lock);
59 	return 0;
60 }
61 
scif_err_debug(int err,const char * str)62 static __always_inline void scif_err_debug(int err, const char *str)
63 {
64 	/*
65 	 * ENOTCONN is a common uninteresting error which is
66 	 * flooding debug messages to the console unnecessarily.
67 	 */
68 	if (err < 0 && err != -ENOTCONN)
69 		dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
70 }
71 
scif_fdioctl(struct file * f,unsigned int cmd,unsigned long arg)72 static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
73 {
74 	struct scif_endpt *priv = f->private_data;
75 	void __user *argp = (void __user *)arg;
76 	int err = 0;
77 	struct scifioctl_msg request;
78 	bool non_block = false;
79 
80 	non_block = !!(f->f_flags & O_NONBLOCK);
81 
82 	switch (cmd) {
83 	case SCIF_BIND:
84 	{
85 		int pn;
86 
87 		if (copy_from_user(&pn, argp, sizeof(pn)))
88 			return -EFAULT;
89 
90 		pn = scif_bind(priv, pn);
91 		if (pn < 0)
92 			return pn;
93 
94 		if (copy_to_user(argp, &pn, sizeof(pn)))
95 			return -EFAULT;
96 
97 		return 0;
98 	}
99 	case SCIF_LISTEN:
100 		return scif_listen(priv, arg);
101 	case SCIF_CONNECT:
102 	{
103 		struct scifioctl_connect req;
104 		struct scif_endpt *ep = (struct scif_endpt *)priv;
105 
106 		if (copy_from_user(&req, argp, sizeof(req)))
107 			return -EFAULT;
108 
109 		err = __scif_connect(priv, &req.peer, non_block);
110 		if (err < 0)
111 			return err;
112 
113 		req.self.node = ep->port.node;
114 		req.self.port = ep->port.port;
115 
116 		if (copy_to_user(argp, &req, sizeof(req)))
117 			return -EFAULT;
118 
119 		return 0;
120 	}
121 	/*
122 	 * Accept is done in two halves.  The request ioctl does the basic
123 	 * functionality of accepting the request and returning the information
124 	 * about it including the internal ID of the end point.  The register
125 	 * is done with the internal ID on a new file descriptor opened by the
126 	 * requesting process.
127 	 */
128 	case SCIF_ACCEPTREQ:
129 	{
130 		struct scifioctl_accept request;
131 		scif_epd_t *ep = (scif_epd_t *)&request.endpt;
132 
133 		if (copy_from_user(&request, argp, sizeof(request)))
134 			return -EFAULT;
135 
136 		err = scif_accept(priv, &request.peer, ep, request.flags);
137 		if (err < 0)
138 			return err;
139 
140 		if (copy_to_user(argp, &request, sizeof(request))) {
141 			scif_close(*ep);
142 			return -EFAULT;
143 		}
144 		/*
145 		 * Add to the list of user mode eps where the second half
146 		 * of the accept is not yet completed.
147 		 */
148 		mutex_lock(&scif_info.eplock);
149 		list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
150 		list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
151 		(*ep)->listenep = priv;
152 		priv->acceptcnt++;
153 		mutex_unlock(&scif_info.eplock);
154 
155 		return 0;
156 	}
157 	case SCIF_ACCEPTREG:
158 	{
159 		struct scif_endpt *priv = f->private_data;
160 		struct scif_endpt *newep;
161 		struct scif_endpt *lisep;
162 		struct scif_endpt *fep = NULL;
163 		struct scif_endpt *tmpep;
164 		struct list_head *pos, *tmpq;
165 
166 		/* Finally replace the pointer to the accepted endpoint */
167 		if (copy_from_user(&newep, argp, sizeof(void *)))
168 			return -EFAULT;
169 
170 		/* Remove form the user accept queue */
171 		mutex_lock(&scif_info.eplock);
172 		list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
173 			tmpep = list_entry(pos,
174 					   struct scif_endpt, miacceptlist);
175 			if (tmpep == newep) {
176 				list_del(pos);
177 				fep = tmpep;
178 				break;
179 			}
180 		}
181 
182 		if (!fep) {
183 			mutex_unlock(&scif_info.eplock);
184 			return -ENOENT;
185 		}
186 
187 		lisep = newep->listenep;
188 		list_for_each_safe(pos, tmpq, &lisep->li_accept) {
189 			tmpep = list_entry(pos,
190 					   struct scif_endpt, liacceptlist);
191 			if (tmpep == newep) {
192 				list_del(pos);
193 				lisep->acceptcnt--;
194 				break;
195 			}
196 		}
197 
198 		mutex_unlock(&scif_info.eplock);
199 
200 		/* Free the resources automatically created from the open. */
201 		scif_anon_inode_fput(priv);
202 		scif_teardown_ep(priv);
203 		scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
204 		f->private_data = newep;
205 		return 0;
206 	}
207 	case SCIF_SEND:
208 	{
209 		struct scif_endpt *priv = f->private_data;
210 
211 		if (copy_from_user(&request, argp,
212 				   sizeof(struct scifioctl_msg))) {
213 			err = -EFAULT;
214 			goto send_err;
215 		}
216 		err = scif_user_send(priv, (void __user *)request.msg,
217 				     request.len, request.flags);
218 		if (err < 0)
219 			goto send_err;
220 		if (copy_to_user(&
221 				 ((struct scifioctl_msg __user *)argp)->out_len,
222 				 &err, sizeof(err))) {
223 			err = -EFAULT;
224 			goto send_err;
225 		}
226 		err = 0;
227 send_err:
228 		scif_err_debug(err, "scif_send");
229 		return err;
230 	}
231 	case SCIF_RECV:
232 	{
233 		struct scif_endpt *priv = f->private_data;
234 
235 		if (copy_from_user(&request, argp,
236 				   sizeof(struct scifioctl_msg))) {
237 			err = -EFAULT;
238 			goto recv_err;
239 		}
240 
241 		err = scif_user_recv(priv, (void __user *)request.msg,
242 				     request.len, request.flags);
243 		if (err < 0)
244 			goto recv_err;
245 
246 		if (copy_to_user(&
247 				 ((struct scifioctl_msg __user *)argp)->out_len,
248 			&err, sizeof(err))) {
249 			err = -EFAULT;
250 			goto recv_err;
251 		}
252 		err = 0;
253 recv_err:
254 		scif_err_debug(err, "scif_recv");
255 		return err;
256 	}
257 	case SCIF_GET_NODEIDS:
258 	{
259 		struct scifioctl_node_ids node_ids;
260 		int entries;
261 		u16 *nodes;
262 		void __user *unodes, *uself;
263 		u16 self;
264 
265 		if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
266 			err = -EFAULT;
267 			goto getnodes_err2;
268 		}
269 
270 		entries = min_t(int, scif_info.maxid, node_ids.len);
271 		nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
272 		if (entries && !nodes) {
273 			err = -ENOMEM;
274 			goto getnodes_err2;
275 		}
276 		node_ids.len = scif_get_node_ids(nodes, entries, &self);
277 
278 		unodes = (void __user *)node_ids.nodes;
279 		if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
280 			err = -EFAULT;
281 			goto getnodes_err1;
282 		}
283 
284 		uself = (void __user *)node_ids.self;
285 		if (copy_to_user(uself, &self, sizeof(u16))) {
286 			err = -EFAULT;
287 			goto getnodes_err1;
288 		}
289 
290 		if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
291 			err = -EFAULT;
292 			goto getnodes_err1;
293 		}
294 getnodes_err1:
295 		kfree(nodes);
296 getnodes_err2:
297 		return err;
298 	}
299 	case SCIF_REG:
300 	{
301 		struct scif_endpt *priv = f->private_data;
302 		struct scifioctl_reg reg;
303 		off_t ret;
304 
305 		if (copy_from_user(&reg, argp, sizeof(reg))) {
306 			err = -EFAULT;
307 			goto reg_err;
308 		}
309 		if (reg.flags & SCIF_MAP_KERNEL) {
310 			err = -EINVAL;
311 			goto reg_err;
312 		}
313 		ret = scif_register(priv, (void *)reg.addr, reg.len,
314 				    reg.offset, reg.prot, reg.flags);
315 		if (ret < 0) {
316 			err = (int)ret;
317 			goto reg_err;
318 		}
319 
320 		if (copy_to_user(&((struct scifioctl_reg __user *)argp)
321 				 ->out_offset, &ret, sizeof(reg.out_offset))) {
322 			err = -EFAULT;
323 			goto reg_err;
324 		}
325 		err = 0;
326 reg_err:
327 		scif_err_debug(err, "scif_register");
328 		return err;
329 	}
330 	case SCIF_UNREG:
331 	{
332 		struct scif_endpt *priv = f->private_data;
333 		struct scifioctl_unreg unreg;
334 
335 		if (copy_from_user(&unreg, argp, sizeof(unreg))) {
336 			err = -EFAULT;
337 			goto unreg_err;
338 		}
339 		err = scif_unregister(priv, unreg.offset, unreg.len);
340 unreg_err:
341 		scif_err_debug(err, "scif_unregister");
342 		return err;
343 	}
344 	case SCIF_READFROM:
345 	{
346 		struct scif_endpt *priv = f->private_data;
347 		struct scifioctl_copy copy;
348 
349 		if (copy_from_user(&copy, argp, sizeof(copy))) {
350 			err = -EFAULT;
351 			goto readfrom_err;
352 		}
353 		err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
354 				    copy.flags);
355 readfrom_err:
356 		scif_err_debug(err, "scif_readfrom");
357 		return err;
358 	}
359 	case SCIF_WRITETO:
360 	{
361 		struct scif_endpt *priv = f->private_data;
362 		struct scifioctl_copy copy;
363 
364 		if (copy_from_user(&copy, argp, sizeof(copy))) {
365 			err = -EFAULT;
366 			goto writeto_err;
367 		}
368 		err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
369 				   copy.flags);
370 writeto_err:
371 		scif_err_debug(err, "scif_writeto");
372 		return err;
373 	}
374 	case SCIF_VREADFROM:
375 	{
376 		struct scif_endpt *priv = f->private_data;
377 		struct scifioctl_copy copy;
378 
379 		if (copy_from_user(&copy, argp, sizeof(copy))) {
380 			err = -EFAULT;
381 			goto vreadfrom_err;
382 		}
383 		err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
384 				     copy.roffset, copy.flags);
385 vreadfrom_err:
386 		scif_err_debug(err, "scif_vreadfrom");
387 		return err;
388 	}
389 	case SCIF_VWRITETO:
390 	{
391 		struct scif_endpt *priv = f->private_data;
392 		struct scifioctl_copy copy;
393 
394 		if (copy_from_user(&copy, argp, sizeof(copy))) {
395 			err = -EFAULT;
396 			goto vwriteto_err;
397 		}
398 		err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
399 				    copy.roffset, copy.flags);
400 vwriteto_err:
401 		scif_err_debug(err, "scif_vwriteto");
402 		return err;
403 	}
404 	case SCIF_FENCE_MARK:
405 	{
406 		struct scif_endpt *priv = f->private_data;
407 		struct scifioctl_fence_mark mark;
408 		int tmp_mark = 0;
409 
410 		if (copy_from_user(&mark, argp, sizeof(mark))) {
411 			err = -EFAULT;
412 			goto fence_mark_err;
413 		}
414 		err = scif_fence_mark(priv, mark.flags, &tmp_mark);
415 		if (err)
416 			goto fence_mark_err;
417 		if (copy_to_user((void __user *)mark.mark, &tmp_mark,
418 				 sizeof(tmp_mark))) {
419 			err = -EFAULT;
420 			goto fence_mark_err;
421 		}
422 fence_mark_err:
423 		scif_err_debug(err, "scif_fence_mark");
424 		return err;
425 	}
426 	case SCIF_FENCE_WAIT:
427 	{
428 		struct scif_endpt *priv = f->private_data;
429 
430 		err = scif_fence_wait(priv, arg);
431 		scif_err_debug(err, "scif_fence_wait");
432 		return err;
433 	}
434 	case SCIF_FENCE_SIGNAL:
435 	{
436 		struct scif_endpt *priv = f->private_data;
437 		struct scifioctl_fence_signal signal;
438 
439 		if (copy_from_user(&signal, argp, sizeof(signal))) {
440 			err = -EFAULT;
441 			goto fence_signal_err;
442 		}
443 
444 		err = scif_fence_signal(priv, signal.loff, signal.lval,
445 					signal.roff, signal.rval, signal.flags);
446 fence_signal_err:
447 		scif_err_debug(err, "scif_fence_signal");
448 		return err;
449 	}
450 	}
451 	return -EINVAL;
452 }
453 
454 const struct file_operations scif_fops = {
455 	.open = scif_fdopen,
456 	.release = scif_fdclose,
457 	.unlocked_ioctl = scif_fdioctl,
458 	.mmap = scif_fdmmap,
459 	.poll = scif_fdpoll,
460 	.flush = scif_fdflush,
461 	.owner = THIS_MODULE,
462 };
463