1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018 Google, Inc. */
3 #include "gasket.h"
4 #include "gasket_ioctl.h"
5 #include "gasket_constants.h"
6 #include "gasket_core.h"
7 #include "gasket_interrupt.h"
8 #include "gasket_page_table.h"
9 #include <linux/compiler.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/uaccess.h>
13 
14 #ifdef GASKET_KERNEL_TRACE_SUPPORT
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/gasket_ioctl.h>
17 #else
18 #define trace_gasket_ioctl_entry(x, ...)
19 #define trace_gasket_ioctl_exit(x)
20 #define trace_gasket_ioctl_integer_data(x)
21 #define trace_gasket_ioctl_eventfd_data(x, ...)
22 #define trace_gasket_ioctl_page_table_data(x, ...)
23 #define trace_gasket_ioctl_config_coherent_allocator(x, ...)
24 #endif
25 
26 /* Associate an eventfd with an interrupt. */
gasket_set_event_fd(struct gasket_dev * gasket_dev,struct gasket_interrupt_eventfd __user * argp)27 static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
28 			       struct gasket_interrupt_eventfd __user *argp)
29 {
30 	struct gasket_interrupt_eventfd die;
31 
32 	if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
33 		return -EFAULT;
34 
35 	trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
36 
37 	return gasket_interrupt_set_eventfd(
38 		gasket_dev->interrupt_data, die.interrupt, die.event_fd);
39 }
40 
41 /* Read the size of the page table. */
gasket_read_page_table_size(struct gasket_dev * gasket_dev,struct gasket_page_table_ioctl __user * argp)42 static int gasket_read_page_table_size(
43 	struct gasket_dev *gasket_dev,
44 	struct gasket_page_table_ioctl __user *argp)
45 {
46 	int ret = 0;
47 	struct gasket_page_table_ioctl ibuf;
48 
49 	if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
50 		return -EFAULT;
51 
52 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
53 		return -EFAULT;
54 
55 	ibuf.size = gasket_page_table_num_entries(
56 		gasket_dev->page_table[ibuf.page_table_index]);
57 
58 	trace_gasket_ioctl_page_table_data(
59 		ibuf.page_table_index, ibuf.size, ibuf.host_address,
60 		ibuf.device_address);
61 
62 	if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
63 		return -EFAULT;
64 
65 	return ret;
66 }
67 
68 /* Read the size of the simple page table. */
gasket_read_simple_page_table_size(struct gasket_dev * gasket_dev,struct gasket_page_table_ioctl __user * argp)69 static int gasket_read_simple_page_table_size(
70 	struct gasket_dev *gasket_dev,
71 	struct gasket_page_table_ioctl __user *argp)
72 {
73 	int ret = 0;
74 	struct gasket_page_table_ioctl ibuf;
75 
76 	if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
77 		return -EFAULT;
78 
79 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
80 		return -EFAULT;
81 
82 	ibuf.size =
83 		gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
84 
85 	trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
86 					   ibuf.host_address,
87 					   ibuf.device_address);
88 
89 	if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
90 		return -EFAULT;
91 
92 	return ret;
93 }
94 
95 /* Set the boundary between the simple and extended page tables. */
gasket_partition_page_table(struct gasket_dev * gasket_dev,struct gasket_page_table_ioctl __user * argp)96 static int gasket_partition_page_table(
97 	struct gasket_dev *gasket_dev,
98 	struct gasket_page_table_ioctl __user *argp)
99 {
100 	int ret;
101 	struct gasket_page_table_ioctl ibuf;
102 	uint max_page_table_size;
103 
104 	if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
105 		return -EFAULT;
106 
107 	trace_gasket_ioctl_page_table_data(
108 		ibuf.page_table_index, ibuf.size, ibuf.host_address,
109 		ibuf.device_address);
110 
111 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
112 		return -EFAULT;
113 	max_page_table_size = gasket_page_table_max_size(
114 		gasket_dev->page_table[ibuf.page_table_index]);
115 
116 	if (ibuf.size > max_page_table_size) {
117 		dev_dbg(gasket_dev->dev,
118 			"Partition request 0x%llx too large, max is 0x%x\n",
119 			ibuf.size, max_page_table_size);
120 		return -EINVAL;
121 	}
122 
123 	mutex_lock(&gasket_dev->mutex);
124 
125 	ret = gasket_page_table_partition(
126 		gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
127 	mutex_unlock(&gasket_dev->mutex);
128 
129 	return ret;
130 }
131 
132 /* Map a userspace buffer to a device virtual address. */
gasket_map_buffers(struct gasket_dev * gasket_dev,struct gasket_page_table_ioctl __user * argp)133 static int gasket_map_buffers(struct gasket_dev *gasket_dev,
134 			      struct gasket_page_table_ioctl __user *argp)
135 {
136 	struct gasket_page_table_ioctl ibuf;
137 
138 	if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
139 		return -EFAULT;
140 
141 	trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
142 					   ibuf.host_address,
143 					   ibuf.device_address);
144 
145 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
146 		return -EFAULT;
147 
148 	if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
149 					    ibuf.host_address,
150 					    ibuf.device_address, ibuf.size))
151 		return -EINVAL;
152 
153 	return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
154 				     ibuf.host_address, ibuf.device_address,
155 				     ibuf.size / PAGE_SIZE);
156 }
157 
158 /* Unmap a userspace buffer from a device virtual address. */
gasket_unmap_buffers(struct gasket_dev * gasket_dev,struct gasket_page_table_ioctl __user * argp)159 static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
160 				struct gasket_page_table_ioctl __user *argp)
161 {
162 	struct gasket_page_table_ioctl ibuf;
163 
164 	if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
165 		return -EFAULT;
166 
167 	trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
168 					   ibuf.host_address,
169 					   ibuf.device_address);
170 
171 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
172 		return -EFAULT;
173 
174 	if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
175 					      ibuf.device_address, ibuf.size))
176 		return -EINVAL;
177 
178 	gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
179 				ibuf.device_address, ibuf.size / PAGE_SIZE);
180 
181 	return 0;
182 }
183 
184 /*
185  * Reserve structures for coherent allocation, and allocate or free the
186  * corresponding memory.
187  */
gasket_config_coherent_allocator(struct gasket_dev * gasket_dev,struct gasket_coherent_alloc_config_ioctl __user * argp)188 static int gasket_config_coherent_allocator(
189 	struct gasket_dev *gasket_dev,
190 	struct gasket_coherent_alloc_config_ioctl __user *argp)
191 {
192 	int ret;
193 	struct gasket_coherent_alloc_config_ioctl ibuf;
194 
195 	if (copy_from_user(&ibuf, argp,
196 			   sizeof(struct gasket_coherent_alloc_config_ioctl)))
197 		return -EFAULT;
198 
199 	trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
200 						     ibuf.dma_address);
201 
202 	if (ibuf.page_table_index >= gasket_dev->num_page_tables)
203 		return -EFAULT;
204 
205 	if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
206 		return -ENOMEM;
207 
208 	if (ibuf.enable == 0) {
209 		ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
210 						  ibuf.dma_address,
211 						  ibuf.page_table_index);
212 	} else {
213 		ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
214 						   &ibuf.dma_address,
215 						   ibuf.page_table_index);
216 	}
217 	if (ret)
218 		return ret;
219 	if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
220 		return -EFAULT;
221 
222 	return 0;
223 }
224 
225 /* Check permissions for Gasket ioctls. */
gasket_ioctl_check_permissions(struct file * filp,uint cmd)226 static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
227 {
228 	bool alive;
229 	bool read, write;
230 	struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
231 
232 	alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
233 	if (!alive)
234 		dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
235 			__func__, alive, gasket_dev->status);
236 
237 	read = !!(filp->f_mode & FMODE_READ);
238 	write = !!(filp->f_mode & FMODE_WRITE);
239 
240 	switch (cmd) {
241 	case GASKET_IOCTL_RESET:
242 	case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
243 		return write;
244 
245 	case GASKET_IOCTL_PAGE_TABLE_SIZE:
246 	case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
247 	case GASKET_IOCTL_NUMBER_PAGE_TABLES:
248 		return read;
249 
250 	case GASKET_IOCTL_PARTITION_PAGE_TABLE:
251 	case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
252 		return alive && write;
253 
254 	case GASKET_IOCTL_MAP_BUFFER:
255 	case GASKET_IOCTL_UNMAP_BUFFER:
256 		return alive && write;
257 
258 	case GASKET_IOCTL_CLEAR_EVENTFD:
259 	case GASKET_IOCTL_SET_EVENTFD:
260 		return alive && write;
261 	}
262 
263 	return false; /* unknown permissions */
264 }
265 
266 /*
267  * standard ioctl dispatch function.
268  * @filp: File structure pointer describing this node usage session.
269  * @cmd: ioctl number to handle.
270  * @argp: ioctl-specific data pointer.
271  *
272  * Standard ioctl dispatcher; forwards operations to individual handlers.
273  */
gasket_handle_ioctl(struct file * filp,uint cmd,void __user * argp)274 long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
275 {
276 	struct gasket_dev *gasket_dev;
277 	unsigned long arg = (unsigned long)argp;
278 	gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
279 	int retval;
280 
281 	gasket_dev = (struct gasket_dev *)filp->private_data;
282 	trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
283 
284 	ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
285 	if (ioctl_permissions_cb) {
286 		retval = ioctl_permissions_cb(filp, cmd, argp);
287 		if (retval < 0) {
288 			trace_gasket_ioctl_exit(retval);
289 			return retval;
290 		} else if (retval == 0) {
291 			trace_gasket_ioctl_exit(-EPERM);
292 			return -EPERM;
293 		}
294 	} else if (!gasket_ioctl_check_permissions(filp, cmd)) {
295 		trace_gasket_ioctl_exit(-EPERM);
296 		dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
297 		return -EPERM;
298 	}
299 
300 	/* Tracing happens in this switch statement for all ioctls with
301 	 * an integer argrument, but ioctls with a struct argument
302 	 * that needs copying and decoding, that tracing is done within
303 	 * the handler call.
304 	 */
305 	switch (cmd) {
306 	case GASKET_IOCTL_RESET:
307 		retval = gasket_reset(gasket_dev);
308 		break;
309 	case GASKET_IOCTL_SET_EVENTFD:
310 		retval = gasket_set_event_fd(gasket_dev, argp);
311 		break;
312 	case GASKET_IOCTL_CLEAR_EVENTFD:
313 		trace_gasket_ioctl_integer_data(arg);
314 		retval =
315 			gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
316 						       (int)arg);
317 		break;
318 	case GASKET_IOCTL_PARTITION_PAGE_TABLE:
319 		trace_gasket_ioctl_integer_data(arg);
320 		retval = gasket_partition_page_table(gasket_dev, argp);
321 		break;
322 	case GASKET_IOCTL_NUMBER_PAGE_TABLES:
323 		trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
324 		if (copy_to_user(argp, &gasket_dev->num_page_tables,
325 				 sizeof(uint64_t)))
326 			retval = -EFAULT;
327 		else
328 			retval = 0;
329 		break;
330 	case GASKET_IOCTL_PAGE_TABLE_SIZE:
331 		retval = gasket_read_page_table_size(gasket_dev, argp);
332 		break;
333 	case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
334 		retval = gasket_read_simple_page_table_size(gasket_dev, argp);
335 		break;
336 	case GASKET_IOCTL_MAP_BUFFER:
337 		retval = gasket_map_buffers(gasket_dev, argp);
338 		break;
339 	case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
340 		retval = gasket_config_coherent_allocator(gasket_dev, argp);
341 		break;
342 	case GASKET_IOCTL_UNMAP_BUFFER:
343 		retval = gasket_unmap_buffers(gasket_dev, argp);
344 		break;
345 	case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
346 		/* Clear interrupt counts doesn't take an arg, so use 0. */
347 		trace_gasket_ioctl_integer_data(0);
348 		retval = gasket_interrupt_reset_counts(gasket_dev);
349 		break;
350 	default:
351 		/* If we don't understand the ioctl, the best we can do is trace
352 		 * the arg.
353 		 */
354 		trace_gasket_ioctl_integer_data(arg);
355 		dev_dbg(gasket_dev->dev,
356 			"Unknown ioctl cmd=0x%x not caught by "
357 			"gasket_is_supported_ioctl\n",
358 			cmd);
359 		retval = -EINVAL;
360 		break;
361 	}
362 
363 	trace_gasket_ioctl_exit(retval);
364 	return retval;
365 }
366 
367 /*
368  * Determines if an ioctl is part of the standard Gasket framework.
369  * @cmd: The ioctl number to handle.
370  *
371  * Returns 1 if the ioctl is supported and 0 otherwise.
372  */
gasket_is_supported_ioctl(uint cmd)373 long gasket_is_supported_ioctl(uint cmd)
374 {
375 	switch (cmd) {
376 	case GASKET_IOCTL_RESET:
377 	case GASKET_IOCTL_SET_EVENTFD:
378 	case GASKET_IOCTL_CLEAR_EVENTFD:
379 	case GASKET_IOCTL_PARTITION_PAGE_TABLE:
380 	case GASKET_IOCTL_NUMBER_PAGE_TABLES:
381 	case GASKET_IOCTL_PAGE_TABLE_SIZE:
382 	case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
383 	case GASKET_IOCTL_MAP_BUFFER:
384 	case GASKET_IOCTL_UNMAP_BUFFER:
385 	case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
386 	case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
387 		return 1;
388 	default:
389 		return 0;
390 	}
391 }
392