1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Gasket generic driver framework. This file contains the implementation
4  * for the Gasket generic driver framework - the functionality that is common
5  * across Gasket devices.
6  *
7  * Copyright (C) 2018 Google, Inc.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include "gasket_core.h"
13 
14 #include "gasket_interrupt.h"
15 #include "gasket_ioctl.h"
16 #include "gasket_page_table.h"
17 #include "gasket_sysfs.h"
18 
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29 
30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/gasket_mmap.h>
33 #else
34 #define trace_gasket_mmap_exit(x)
35 #define trace_gasket_mmap_entry(x, ...)
36 #endif
37 
38 /*
39  * "Private" members of gasket_driver_desc.
40  *
41  * Contains internal per-device type tracking data, i.e., data not appropriate
42  * as part of the public interface for the generic framework.
43  */
44 struct gasket_internal_desc {
45 	/* Device-specific-driver-provided configuration information. */
46 	const struct gasket_driver_desc *driver_desc;
47 
48 	/* Protects access to per-driver data (i.e. this structure). */
49 	struct mutex mutex;
50 
51 	/* Kernel-internal device class. */
52 	struct class *class;
53 
54 	/* Instantiated / present devices of this type. */
55 	struct gasket_dev *devs[GASKET_DEV_MAX];
56 };
57 
58 /* do_map_region() needs be able to return more than just true/false. */
59 enum do_map_region_status {
60 	/* The region was successfully mapped. */
61 	DO_MAP_REGION_SUCCESS,
62 
63 	/* Attempted to map region and failed. */
64 	DO_MAP_REGION_FAILURE,
65 
66 	/* The requested region to map was not part of a mappable region. */
67 	DO_MAP_REGION_INVALID,
68 };
69 
70 /* Global data definitions. */
71 /* Mutex - only for framework-wide data. Other data should be protected by
72  * finer-grained locks.
73  */
74 static DEFINE_MUTEX(g_mutex);
75 
76 /* List of all registered device descriptions & their supporting data. */
77 static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
78 
79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
80 static const struct gasket_num_name gasket_status_name_table[] = {
81 	{ GASKET_STATUS_DEAD, "DEAD" },
82 	{ GASKET_STATUS_ALIVE, "ALIVE" },
83 	{ GASKET_STATUS_LAMED, "LAMED" },
84 	{ GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
85 	{ 0, NULL },
86 };
87 
88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
89 enum gasket_sysfs_attribute_type {
90 	ATTR_BAR_OFFSETS,
91 	ATTR_BAR_SIZES,
92 	ATTR_DRIVER_VERSION,
93 	ATTR_FRAMEWORK_VERSION,
94 	ATTR_DEVICE_TYPE,
95 	ATTR_HARDWARE_REVISION,
96 	ATTR_PCI_ADDRESS,
97 	ATTR_STATUS,
98 	ATTR_IS_DEVICE_OWNED,
99 	ATTR_DEVICE_OWNER,
100 	ATTR_WRITE_OPEN_COUNT,
101 	ATTR_RESET_COUNT,
102 	ATTR_USER_MEM_RANGES
103 };
104 
105 /* Perform a standard Gasket callback. */
106 static inline int
check_and_invoke_callback(struct gasket_dev * gasket_dev,int (* cb_function)(struct gasket_dev *))107 check_and_invoke_callback(struct gasket_dev *gasket_dev,
108 			  int (*cb_function)(struct gasket_dev *))
109 {
110 	int ret = 0;
111 
112 	if (cb_function) {
113 		mutex_lock(&gasket_dev->mutex);
114 		ret = cb_function(gasket_dev);
115 		mutex_unlock(&gasket_dev->mutex);
116 	}
117 	return ret;
118 }
119 
120 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
121 static inline int
gasket_check_and_invoke_callback_nolock(struct gasket_dev * gasket_dev,int (* cb_function)(struct gasket_dev *))122 gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
123 					int (*cb_function)(struct gasket_dev *))
124 {
125 	int ret = 0;
126 
127 	if (cb_function)
128 		ret = cb_function(gasket_dev);
129 	return ret;
130 }
131 
132 /*
133  * Return nonzero if the gasket_cdev_info is owned by the current thread group
134  * ID.
135  */
gasket_owned_by_current_tgid(struct gasket_cdev_info * info)136 static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
137 {
138 	return (info->ownership.is_owned &&
139 		(info->ownership.owner == current->tgid));
140 }
141 
142 /*
143  * Find the next free gasket_internal_dev slot.
144  *
145  * Returns the located slot number on success or a negative number on failure.
146  */
gasket_find_dev_slot(struct gasket_internal_desc * internal_desc,const char * kobj_name)147 static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
148 				const char *kobj_name)
149 {
150 	int i;
151 
152 	mutex_lock(&internal_desc->mutex);
153 
154 	/* Search for a previous instance of this device. */
155 	for (i = 0; i < GASKET_DEV_MAX; i++) {
156 		if (internal_desc->devs[i] &&
157 		    strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
158 			pr_err("Duplicate device %s\n", kobj_name);
159 			mutex_unlock(&internal_desc->mutex);
160 			return -EBUSY;
161 		}
162 	}
163 
164 	/* Find a free device slot. */
165 	for (i = 0; i < GASKET_DEV_MAX; i++) {
166 		if (!internal_desc->devs[i])
167 			break;
168 	}
169 
170 	if (i == GASKET_DEV_MAX) {
171 		pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
172 		mutex_unlock(&internal_desc->mutex);
173 		return -EBUSY;
174 	}
175 
176 	mutex_unlock(&internal_desc->mutex);
177 	return i;
178 }
179 
180 /*
181  * Allocate and initialize a Gasket device structure, add the device to the
182  * device list.
183  *
184  * Returns 0 if successful, a negative error code otherwise.
185  */
gasket_alloc_dev(struct gasket_internal_desc * internal_desc,struct device * parent,struct gasket_dev ** pdev)186 static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
187 			    struct device *parent, struct gasket_dev **pdev)
188 {
189 	int dev_idx;
190 	const struct gasket_driver_desc *driver_desc =
191 		internal_desc->driver_desc;
192 	struct gasket_dev *gasket_dev;
193 	struct gasket_cdev_info *dev_info;
194 	const char *parent_name = dev_name(parent);
195 
196 	pr_debug("Allocating a Gasket device, parent %s.\n", parent_name);
197 
198 	*pdev = NULL;
199 
200 	dev_idx = gasket_find_dev_slot(internal_desc, parent_name);
201 	if (dev_idx < 0)
202 		return dev_idx;
203 
204 	gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
205 	if (!gasket_dev) {
206 		pr_err("no memory for device, parent %s\n", parent_name);
207 		return -ENOMEM;
208 	}
209 	internal_desc->devs[dev_idx] = gasket_dev;
210 
211 	mutex_init(&gasket_dev->mutex);
212 
213 	gasket_dev->internal_desc = internal_desc;
214 	gasket_dev->dev_idx = dev_idx;
215 	snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", parent_name);
216 	gasket_dev->dev = get_device(parent);
217 	/* gasket_bar_data is uninitialized. */
218 	gasket_dev->num_page_tables = driver_desc->num_page_tables;
219 	/* max_page_table_size and *page table are uninit'ed */
220 	/* interrupt_data is not initialized. */
221 	/* status is 0, or GASKET_STATUS_DEAD */
222 
223 	dev_info = &gasket_dev->dev_info;
224 	snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
225 		 gasket_dev->dev_idx);
226 	dev_info->devt =
227 		MKDEV(driver_desc->major, driver_desc->minor +
228 		      gasket_dev->dev_idx);
229 	dev_info->device =
230 		device_create(internal_desc->class, parent, dev_info->devt,
231 			      gasket_dev, dev_info->name);
232 
233 	/* cdev has not yet been added; cdev_added is 0 */
234 	dev_info->gasket_dev_ptr = gasket_dev;
235 	/* ownership is all 0, indicating no owner or opens. */
236 
237 	return 0;
238 }
239 
240 /* Free a Gasket device. */
gasket_free_dev(struct gasket_dev * gasket_dev)241 static void gasket_free_dev(struct gasket_dev *gasket_dev)
242 {
243 	struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
244 
245 	mutex_lock(&internal_desc->mutex);
246 	internal_desc->devs[gasket_dev->dev_idx] = NULL;
247 	mutex_unlock(&internal_desc->mutex);
248 	put_device(gasket_dev->dev);
249 	kfree(gasket_dev);
250 }
251 
252 /*
253  * Maps the specified bar into kernel space.
254  *
255  * Returns 0 on success, a negative error code otherwise.
256  * A zero-sized BAR will not be mapped, but is not an error.
257  */
gasket_map_pci_bar(struct gasket_dev * gasket_dev,int bar_num)258 static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
259 {
260 	struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
261 	const struct gasket_driver_desc *driver_desc =
262 		internal_desc->driver_desc;
263 	ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
264 	int ret;
265 
266 	if (desc_bytes == 0)
267 		return 0;
268 
269 	if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
270 		/* not PCI: skip this entry */
271 		return 0;
272 	}
273 	/*
274 	 * pci_resource_start and pci_resource_len return a "resource_size_t",
275 	 * which is safely castable to ulong (which itself is the arg to
276 	 * request_mem_region).
277 	 */
278 	gasket_dev->bar_data[bar_num].phys_base =
279 		(ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
280 	if (!gasket_dev->bar_data[bar_num].phys_base) {
281 		dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
282 			bar_num);
283 		return -EINVAL;
284 	}
285 
286 	gasket_dev->bar_data[bar_num].length_bytes =
287 		(ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
288 	if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
289 		dev_err(gasket_dev->dev,
290 			"PCI BAR %u space is too small: %lu; expected >= %lu\n",
291 			bar_num, gasket_dev->bar_data[bar_num].length_bytes,
292 			desc_bytes);
293 		return -ENOMEM;
294 	}
295 
296 	if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
297 				gasket_dev->bar_data[bar_num].length_bytes,
298 				gasket_dev->dev_info.name)) {
299 		dev_err(gasket_dev->dev,
300 			"Cannot get BAR %d memory region %p\n",
301 			bar_num, &gasket_dev->pci_dev->resource[bar_num]);
302 		return -EINVAL;
303 	}
304 
305 	gasket_dev->bar_data[bar_num].virt_base =
306 		ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
307 				gasket_dev->bar_data[bar_num].length_bytes);
308 	if (!gasket_dev->bar_data[bar_num].virt_base) {
309 		dev_err(gasket_dev->dev,
310 			"Cannot remap BAR %d memory region %p\n",
311 			bar_num, &gasket_dev->pci_dev->resource[bar_num]);
312 		ret = -ENOMEM;
313 		goto fail;
314 	}
315 
316 	dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
317 	dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
318 
319 	return 0;
320 
321 fail:
322 	iounmap(gasket_dev->bar_data[bar_num].virt_base);
323 	release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
324 			   gasket_dev->bar_data[bar_num].length_bytes);
325 	return ret;
326 }
327 
328 /*
329  * Releases PCI BAR mapping.
330  *
331  * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
332  */
gasket_unmap_pci_bar(struct gasket_dev * dev,int bar_num)333 static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
334 {
335 	ulong base, bytes;
336 	struct gasket_internal_desc *internal_desc = dev->internal_desc;
337 	const struct gasket_driver_desc *driver_desc =
338 		internal_desc->driver_desc;
339 
340 	if (driver_desc->bar_descriptions[bar_num].size == 0 ||
341 	    !dev->bar_data[bar_num].virt_base)
342 		return;
343 
344 	if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
345 		return;
346 
347 	iounmap(dev->bar_data[bar_num].virt_base);
348 	dev->bar_data[bar_num].virt_base = NULL;
349 
350 	base = pci_resource_start(dev->pci_dev, bar_num);
351 	if (!base) {
352 		dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
353 			bar_num);
354 		return;
355 	}
356 
357 	bytes = pci_resource_len(dev->pci_dev, bar_num);
358 	release_mem_region(base, bytes);
359 }
360 
361 /*
362  * Setup PCI memory mapping for the specified device.
363  *
364  * Reads the BAR registers and sets up pointers to the device's memory mapped
365  * IO space.
366  *
367  * Returns 0 on success and a negative value otherwise.
368  */
gasket_setup_pci(struct pci_dev * pci_dev,struct gasket_dev * gasket_dev)369 static int gasket_setup_pci(struct pci_dev *pci_dev,
370 			    struct gasket_dev *gasket_dev)
371 {
372 	int i, mapped_bars, ret;
373 
374 	for (i = 0; i < GASKET_NUM_BARS; i++) {
375 		ret = gasket_map_pci_bar(gasket_dev, i);
376 		if (ret) {
377 			mapped_bars = i;
378 			goto fail;
379 		}
380 	}
381 
382 	return 0;
383 
384 fail:
385 	for (i = 0; i < mapped_bars; i++)
386 		gasket_unmap_pci_bar(gasket_dev, i);
387 
388 	return -ENOMEM;
389 }
390 
391 /* Unmaps memory for the specified device. */
gasket_cleanup_pci(struct gasket_dev * gasket_dev)392 static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
393 {
394 	int i;
395 
396 	for (i = 0; i < GASKET_NUM_BARS; i++)
397 		gasket_unmap_pci_bar(gasket_dev, i);
398 }
399 
400 /* Determine the health of the Gasket device. */
gasket_get_hw_status(struct gasket_dev * gasket_dev)401 static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
402 {
403 	int status;
404 	int i;
405 	const struct gasket_driver_desc *driver_desc =
406 		gasket_dev->internal_desc->driver_desc;
407 
408 	status = gasket_check_and_invoke_callback_nolock(gasket_dev,
409 							 driver_desc->device_status_cb);
410 	if (status != GASKET_STATUS_ALIVE) {
411 		dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
412 			status);
413 		return status;
414 	}
415 
416 	status = gasket_interrupt_system_status(gasket_dev);
417 	if (status != GASKET_STATUS_ALIVE) {
418 		dev_dbg(gasket_dev->dev,
419 			"Interrupt system reported status %d.\n", status);
420 		return status;
421 	}
422 
423 	for (i = 0; i < driver_desc->num_page_tables; ++i) {
424 		status = gasket_page_table_system_status(gasket_dev->page_table[i]);
425 		if (status != GASKET_STATUS_ALIVE) {
426 			dev_dbg(gasket_dev->dev,
427 				"Page table %d reported status %d.\n",
428 				i, status);
429 			return status;
430 		}
431 	}
432 
433 	return GASKET_STATUS_ALIVE;
434 }
435 
436 static ssize_t
gasket_write_mappable_regions(char * buf,const struct gasket_driver_desc * driver_desc,int bar_index)437 gasket_write_mappable_regions(char *buf,
438 			      const struct gasket_driver_desc *driver_desc,
439 			      int bar_index)
440 {
441 	int i;
442 	ssize_t written;
443 	ssize_t total_written = 0;
444 	ulong min_addr, max_addr;
445 	struct gasket_bar_desc bar_desc =
446 		driver_desc->bar_descriptions[bar_index];
447 
448 	if (bar_desc.permissions == GASKET_NOMAP)
449 		return 0;
450 	for (i = 0;
451 	     i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
452 	     i++) {
453 		min_addr = bar_desc.mappable_regions[i].start -
454 			   driver_desc->legacy_mmap_address_offset;
455 		max_addr = bar_desc.mappable_regions[i].start -
456 			   driver_desc->legacy_mmap_address_offset +
457 			   bar_desc.mappable_regions[i].length_bytes;
458 		written = scnprintf(buf, PAGE_SIZE - total_written,
459 				    "0x%08lx-0x%08lx\n", min_addr, max_addr);
460 		total_written += written;
461 		buf += written;
462 	}
463 	return total_written;
464 }
465 
gasket_sysfs_data_show(struct device * device,struct device_attribute * attr,char * buf)466 static ssize_t gasket_sysfs_data_show(struct device *device,
467 				      struct device_attribute *attr, char *buf)
468 {
469 	int i, ret = 0;
470 	ssize_t current_written = 0;
471 	const struct gasket_driver_desc *driver_desc;
472 	struct gasket_dev *gasket_dev;
473 	struct gasket_sysfs_attribute *gasket_attr;
474 	const struct gasket_bar_desc *bar_desc;
475 	enum gasket_sysfs_attribute_type sysfs_type;
476 
477 	gasket_dev = gasket_sysfs_get_device_data(device);
478 	if (!gasket_dev) {
479 		dev_err(device, "No sysfs mapping found for device\n");
480 		return 0;
481 	}
482 
483 	gasket_attr = gasket_sysfs_get_attr(device, attr);
484 	if (!gasket_attr) {
485 		dev_err(device, "No sysfs attr found for device\n");
486 		gasket_sysfs_put_device_data(device, gasket_dev);
487 		return 0;
488 	}
489 
490 	driver_desc = gasket_dev->internal_desc->driver_desc;
491 
492 	sysfs_type =
493 		(enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
494 	switch (sysfs_type) {
495 	case ATTR_BAR_OFFSETS:
496 		for (i = 0; i < GASKET_NUM_BARS; i++) {
497 			bar_desc = &driver_desc->bar_descriptions[i];
498 			if (bar_desc->size == 0)
499 				continue;
500 			current_written =
501 				snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
502 					 (ulong)bar_desc->base);
503 			buf += current_written;
504 			ret += current_written;
505 		}
506 		break;
507 	case ATTR_BAR_SIZES:
508 		for (i = 0; i < GASKET_NUM_BARS; i++) {
509 			bar_desc = &driver_desc->bar_descriptions[i];
510 			if (bar_desc->size == 0)
511 				continue;
512 			current_written =
513 				snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
514 					 (ulong)bar_desc->size);
515 			buf += current_written;
516 			ret += current_written;
517 		}
518 		break;
519 	case ATTR_DRIVER_VERSION:
520 		ret = snprintf(buf, PAGE_SIZE, "%s\n",
521 			       gasket_dev->internal_desc->driver_desc->driver_version);
522 		break;
523 	case ATTR_FRAMEWORK_VERSION:
524 		ret = snprintf(buf, PAGE_SIZE, "%s\n",
525 			       GASKET_FRAMEWORK_VERSION);
526 		break;
527 	case ATTR_DEVICE_TYPE:
528 		ret = snprintf(buf, PAGE_SIZE, "%s\n",
529 			       gasket_dev->internal_desc->driver_desc->name);
530 		break;
531 	case ATTR_HARDWARE_REVISION:
532 		ret = snprintf(buf, PAGE_SIZE, "%d\n",
533 			       gasket_dev->hardware_revision);
534 		break;
535 	case ATTR_PCI_ADDRESS:
536 		ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
537 		break;
538 	case ATTR_STATUS:
539 		ret = snprintf(buf, PAGE_SIZE, "%s\n",
540 			       gasket_num_name_lookup(gasket_dev->status,
541 						      gasket_status_name_table));
542 		break;
543 	case ATTR_IS_DEVICE_OWNED:
544 		ret = snprintf(buf, PAGE_SIZE, "%d\n",
545 			       gasket_dev->dev_info.ownership.is_owned);
546 		break;
547 	case ATTR_DEVICE_OWNER:
548 		ret = snprintf(buf, PAGE_SIZE, "%d\n",
549 			       gasket_dev->dev_info.ownership.owner);
550 		break;
551 	case ATTR_WRITE_OPEN_COUNT:
552 		ret = snprintf(buf, PAGE_SIZE, "%d\n",
553 			       gasket_dev->dev_info.ownership.write_open_count);
554 		break;
555 	case ATTR_RESET_COUNT:
556 		ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
557 		break;
558 	case ATTR_USER_MEM_RANGES:
559 		for (i = 0; i < GASKET_NUM_BARS; ++i) {
560 			current_written =
561 				gasket_write_mappable_regions(buf, driver_desc,
562 							      i);
563 			buf += current_written;
564 			ret += current_written;
565 		}
566 		break;
567 	default:
568 		dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
569 			attr->attr.name);
570 		ret = 0;
571 		break;
572 	}
573 
574 	gasket_sysfs_put_attr(device, gasket_attr);
575 	gasket_sysfs_put_device_data(device, gasket_dev);
576 	return ret;
577 }
578 
579 /* These attributes apply to all Gasket driver instances. */
580 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
581 	GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
582 	GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
583 	GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
584 			ATTR_DRIVER_VERSION),
585 	GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
586 			ATTR_FRAMEWORK_VERSION),
587 	GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
588 	GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
589 			ATTR_HARDWARE_REVISION),
590 	GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
591 	GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
592 	GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
593 			ATTR_IS_DEVICE_OWNED),
594 	GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
595 			ATTR_DEVICE_OWNER),
596 	GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
597 			ATTR_WRITE_OPEN_COUNT),
598 	GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
599 	GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
600 			ATTR_USER_MEM_RANGES),
601 	GASKET_END_OF_ATTR_ARRAY
602 };
603 
604 /* Add a char device and related info. */
gasket_add_cdev(struct gasket_cdev_info * dev_info,const struct file_operations * file_ops,struct module * owner)605 static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
606 			   const struct file_operations *file_ops,
607 			   struct module *owner)
608 {
609 	int ret;
610 
611 	cdev_init(&dev_info->cdev, file_ops);
612 	dev_info->cdev.owner = owner;
613 	ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
614 	if (ret) {
615 		dev_err(dev_info->gasket_dev_ptr->dev,
616 			"cannot add char device [ret=%d]\n", ret);
617 		return ret;
618 	}
619 	dev_info->cdev_added = 1;
620 
621 	return 0;
622 }
623 
624 /* Disable device operations. */
gasket_disable_device(struct gasket_dev * gasket_dev)625 void gasket_disable_device(struct gasket_dev *gasket_dev)
626 {
627 	const struct gasket_driver_desc *driver_desc =
628 		gasket_dev->internal_desc->driver_desc;
629 	int i;
630 
631 	/* Only delete the device if it has been successfully added. */
632 	if (gasket_dev->dev_info.cdev_added)
633 		cdev_del(&gasket_dev->dev_info.cdev);
634 
635 	gasket_dev->status = GASKET_STATUS_DEAD;
636 
637 	gasket_interrupt_cleanup(gasket_dev);
638 
639 	for (i = 0; i < driver_desc->num_page_tables; ++i) {
640 		if (gasket_dev->page_table[i]) {
641 			gasket_page_table_reset(gasket_dev->page_table[i]);
642 			gasket_page_table_cleanup(gasket_dev->page_table[i]);
643 		}
644 	}
645 }
646 EXPORT_SYMBOL(gasket_disable_device);
647 
648 /*
649  * Registered driver descriptor lookup for PCI devices.
650  *
651  * Precondition: Called with g_mutex held (to avoid a race on return).
652  * Returns NULL if no matching device was found.
653  */
654 static struct gasket_internal_desc *
lookup_pci_internal_desc(struct pci_dev * pci_dev)655 lookup_pci_internal_desc(struct pci_dev *pci_dev)
656 {
657 	int i;
658 
659 	__must_hold(&g_mutex);
660 	for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
661 		if (g_descs[i].driver_desc &&
662 		    g_descs[i].driver_desc->pci_id_table &&
663 		    pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
664 			return &g_descs[i];
665 	}
666 
667 	return NULL;
668 }
669 
670 /*
671  * Verifies that the user has permissions to perform the requested mapping and
672  * that the provided descriptor/range is of adequate size to hold the range to
673  * be mapped.
674  */
gasket_mmap_has_permissions(struct gasket_dev * gasket_dev,struct vm_area_struct * vma,int bar_permissions)675 static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
676 					struct vm_area_struct *vma,
677 					int bar_permissions)
678 {
679 	int requested_permissions;
680 	/* Always allow sysadmin to access. */
681 	if (capable(CAP_SYS_ADMIN))
682 		return true;
683 
684 	/* Never allow non-sysadmins to access to a dead device. */
685 	if (gasket_dev->status != GASKET_STATUS_ALIVE) {
686 		dev_dbg(gasket_dev->dev, "Device is dead.\n");
687 		return false;
688 	}
689 
690 	/* Make sure that no wrong flags are set. */
691 	requested_permissions =
692 		(vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
693 	if (requested_permissions & ~(bar_permissions)) {
694 		dev_dbg(gasket_dev->dev,
695 			"Attempting to map a region with requested permissions "
696 			"0x%x, but region has permissions 0x%x.\n",
697 			requested_permissions, bar_permissions);
698 		return false;
699 	}
700 
701 	/* Do not allow a non-owner to write. */
702 	if ((vma->vm_flags & VM_WRITE) &&
703 	    !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
704 		dev_dbg(gasket_dev->dev,
705 			"Attempting to mmap a region for write without owning device.\n");
706 		return false;
707 	}
708 
709 	return true;
710 }
711 
712 /*
713  * Verifies that the input address is within the region allocated to coherent
714  * buffer.
715  */
716 static bool
gasket_is_coherent_region(const struct gasket_driver_desc * driver_desc,ulong address)717 gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
718 			  ulong address)
719 {
720 	struct gasket_coherent_buffer_desc coh_buff_desc =
721 		driver_desc->coherent_buffer_description;
722 
723 	if (coh_buff_desc.permissions != GASKET_NOMAP) {
724 		if ((address >= coh_buff_desc.base) &&
725 		    (address < coh_buff_desc.base + coh_buff_desc.size)) {
726 			return true;
727 		}
728 	}
729 	return false;
730 }
731 
gasket_get_bar_index(const struct gasket_dev * gasket_dev,ulong phys_addr)732 static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
733 				ulong phys_addr)
734 {
735 	int i;
736 	const struct gasket_driver_desc *driver_desc;
737 
738 	driver_desc = gasket_dev->internal_desc->driver_desc;
739 	for (i = 0; i < GASKET_NUM_BARS; ++i) {
740 		struct gasket_bar_desc bar_desc =
741 			driver_desc->bar_descriptions[i];
742 
743 		if (bar_desc.permissions != GASKET_NOMAP) {
744 			if (phys_addr >= bar_desc.base &&
745 			    phys_addr < (bar_desc.base + bar_desc.size)) {
746 				return i;
747 			}
748 		}
749 	}
750 	/* If we haven't found the address by now, it is invalid. */
751 	return -EINVAL;
752 }
753 
754 /*
755  * Sets the actual bounds to map, given the device's mappable region.
756  *
757  * Given the device's mappable region, along with the user-requested mapping
758  * start offset and length of the user region, determine how much of this
759  * mappable region can be mapped into the user's region (start/end offsets),
760  * and the physical offset (phys_offset) into the BAR where the mapping should
761  * begin (either the VMA's or region lower bound).
762  *
763  * In other words, this calculates the overlap between the VMA
764  * (bar_offset, requested_length) and the given gasket_mappable_region.
765  *
766  * Returns true if there's anything to map, and false otherwise.
767  */
768 static bool
gasket_mm_get_mapping_addrs(const struct gasket_mappable_region * region,ulong bar_offset,ulong requested_length,struct gasket_mappable_region * mappable_region,ulong * virt_offset)769 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
770 			    ulong bar_offset, ulong requested_length,
771 			    struct gasket_mappable_region *mappable_region,
772 			    ulong *virt_offset)
773 {
774 	ulong range_start = region->start;
775 	ulong range_length = region->length_bytes;
776 	ulong range_end = range_start + range_length;
777 
778 	*virt_offset = 0;
779 	if (bar_offset + requested_length < range_start) {
780 		/*
781 		 * If the requested region is completely below the range,
782 		 * there is nothing to map.
783 		 */
784 		return false;
785 	} else if (bar_offset <= range_start) {
786 		/* If the bar offset is below this range's start
787 		 * but the requested length continues into it:
788 		 * 1) Only map starting from the beginning of this
789 		 *      range's phys. offset, so we don't map unmappable
790 		 *	memory.
791 		 * 2) The length of the virtual memory to not map is the
792 		 *	delta between the bar offset and the
793 		 *	mappable start (and since the mappable start is
794 		 *	bigger, start - req.)
795 		 * 3) The map length is the minimum of the mappable
796 		 *	requested length (requested_length - virt_offset)
797 		 *	and the actual mappable length of the range.
798 		 */
799 		mappable_region->start = range_start;
800 		*virt_offset = range_start - bar_offset;
801 		mappable_region->length_bytes =
802 			min(requested_length - *virt_offset, range_length);
803 		return true;
804 	} else if (bar_offset > range_start &&
805 		   bar_offset < range_end) {
806 		/*
807 		 * If the bar offset is within this range:
808 		 * 1) Map starting from the bar offset.
809 		 * 2) Because there is no forbidden memory between the
810 		 *	bar offset and the range start,
811 		 *	virt_offset is 0.
812 		 * 3) The map length is the minimum of the requested
813 		 *	length and the remaining length in the buffer
814 		 *	(range_end - bar_offset)
815 		 */
816 		mappable_region->start = bar_offset;
817 		*virt_offset = 0;
818 		mappable_region->length_bytes =
819 			min(requested_length, range_end - bar_offset);
820 		return true;
821 	}
822 
823 	/*
824 	 * If the requested [start] offset is above range_end,
825 	 * there's nothing to map.
826 	 */
827 	return false;
828 }
829 
830 /*
831  * Calculates the offset where the VMA range begins in its containing BAR.
832  * The offset is written into bar_offset on success.
833  * Returns zero on success, anything else on error.
834  */
gasket_mm_vma_bar_offset(const struct gasket_dev * gasket_dev,const struct vm_area_struct * vma,ulong * bar_offset)835 static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
836 				    const struct vm_area_struct *vma,
837 				    ulong *bar_offset)
838 {
839 	ulong raw_offset;
840 	int bar_index;
841 	const struct gasket_driver_desc *driver_desc =
842 		gasket_dev->internal_desc->driver_desc;
843 
844 	raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
845 		driver_desc->legacy_mmap_address_offset;
846 	bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
847 	if (bar_index < 0) {
848 		dev_err(gasket_dev->dev,
849 			"Unable to find matching bar for address 0x%lx\n",
850 			raw_offset);
851 		trace_gasket_mmap_exit(bar_index);
852 		return bar_index;
853 	}
854 	*bar_offset =
855 		raw_offset - driver_desc->bar_descriptions[bar_index].base;
856 
857 	return 0;
858 }
859 
gasket_mm_unmap_region(const struct gasket_dev * gasket_dev,struct vm_area_struct * vma,const struct gasket_mappable_region * map_region)860 int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
861 			   struct vm_area_struct *vma,
862 			   const struct gasket_mappable_region *map_region)
863 {
864 	ulong bar_offset;
865 	ulong virt_offset;
866 	struct gasket_mappable_region mappable_region;
867 	int ret;
868 
869 	if (map_region->length_bytes == 0)
870 		return 0;
871 
872 	ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
873 	if (ret)
874 		return ret;
875 
876 	if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
877 					 vma->vm_end - vma->vm_start,
878 					 &mappable_region, &virt_offset))
879 		return 1;
880 
881 	/*
882 	 * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
883 	 * PAGE_SIZE! Trust me. I have the scars.
884 	 *
885 	 * Next multiple of y: ceil_div(x, y) * y
886 	 */
887 	zap_vma_ptes(vma, vma->vm_start + virt_offset,
888 		     DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
889 		     PAGE_SIZE);
890 	return 0;
891 }
892 EXPORT_SYMBOL(gasket_mm_unmap_region);
893 
894 /* Maps a virtual address + range to a physical offset of a BAR. */
895 static enum do_map_region_status
do_map_region(const struct gasket_dev * gasket_dev,struct vm_area_struct * vma,struct gasket_mappable_region * mappable_region)896 do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
897 	      struct gasket_mappable_region *mappable_region)
898 {
899 	/* Maximum size of a single call to io_remap_pfn_range. */
900 	/* I pulled this number out of thin air. */
901 	const ulong max_chunk_size = 64 * 1024 * 1024;
902 	ulong chunk_size, mapped_bytes = 0;
903 
904 	const struct gasket_driver_desc *driver_desc =
905 		gasket_dev->internal_desc->driver_desc;
906 
907 	ulong bar_offset, virt_offset;
908 	struct gasket_mappable_region region_to_map;
909 	ulong phys_offset, map_length;
910 	ulong virt_base, phys_base;
911 	int bar_index, ret;
912 
913 	ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
914 	if (ret)
915 		return DO_MAP_REGION_INVALID;
916 
917 	if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
918 					 vma->vm_end - vma->vm_start,
919 					 &region_to_map, &virt_offset))
920 		return DO_MAP_REGION_INVALID;
921 	phys_offset = region_to_map.start;
922 	map_length = region_to_map.length_bytes;
923 
924 	virt_base = vma->vm_start + virt_offset;
925 	bar_index =
926 		gasket_get_bar_index(gasket_dev,
927 				     (vma->vm_pgoff << PAGE_SHIFT) +
928 				     driver_desc->legacy_mmap_address_offset);
929 	phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
930 	while (mapped_bytes < map_length) {
931 		/*
932 		 * io_remap_pfn_range can take a while, so we chunk its
933 		 * calls and call cond_resched between each.
934 		 */
935 		chunk_size = min(max_chunk_size, map_length - mapped_bytes);
936 
937 		cond_resched();
938 		ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
939 					 (phys_base + mapped_bytes) >>
940 					 PAGE_SHIFT, chunk_size,
941 					 vma->vm_page_prot);
942 		if (ret) {
943 			dev_err(gasket_dev->dev,
944 				"Error remapping PFN range.\n");
945 			goto fail;
946 		}
947 		mapped_bytes += chunk_size;
948 	}
949 
950 	return DO_MAP_REGION_SUCCESS;
951 
952 fail:
953 	/* Unmap the partial chunk we mapped. */
954 	mappable_region->length_bytes = mapped_bytes;
955 	if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
956 		dev_err(gasket_dev->dev,
957 			"Error unmapping partial region 0x%lx (0x%lx bytes)\n",
958 			(ulong)virt_offset,
959 			(ulong)mapped_bytes);
960 
961 	return DO_MAP_REGION_FAILURE;
962 }
963 
964 /* Map a region of coherent memory. */
gasket_mmap_coherent(struct gasket_dev * gasket_dev,struct vm_area_struct * vma)965 static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
966 				struct vm_area_struct *vma)
967 {
968 	const struct gasket_driver_desc *driver_desc =
969 		gasket_dev->internal_desc->driver_desc;
970 	const ulong requested_length = vma->vm_end - vma->vm_start;
971 	int ret;
972 	ulong permissions;
973 
974 	if (requested_length == 0 || requested_length >
975 	    gasket_dev->coherent_buffer.length_bytes) {
976 		trace_gasket_mmap_exit(-EINVAL);
977 		return -EINVAL;
978 	}
979 
980 	permissions = driver_desc->coherent_buffer_description.permissions;
981 	if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
982 		dev_err(gasket_dev->dev, "Permission checking failed.\n");
983 		trace_gasket_mmap_exit(-EPERM);
984 		return -EPERM;
985 	}
986 
987 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
988 
989 	ret = remap_pfn_range(vma, vma->vm_start,
990 			      (gasket_dev->coherent_buffer.phys_base) >>
991 			      PAGE_SHIFT, requested_length, vma->vm_page_prot);
992 	if (ret) {
993 		dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
994 			ret);
995 		trace_gasket_mmap_exit(ret);
996 		return ret;
997 	}
998 
999 	/* Record the user virtual to dma_address mapping that was
1000 	 * created by the kernel.
1001 	 */
1002 	gasket_set_user_virt(gasket_dev, requested_length,
1003 			     gasket_dev->coherent_buffer.phys_base,
1004 			     vma->vm_start);
1005 	return 0;
1006 }
1007 
1008 /* Map a device's BARs into user space. */
gasket_mmap(struct file * filp,struct vm_area_struct * vma)1009 static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1010 {
1011 	int i, ret;
1012 	int bar_index;
1013 	int has_mapped_anything = 0;
1014 	ulong permissions;
1015 	ulong raw_offset, vma_size;
1016 	bool is_coherent_region;
1017 	const struct gasket_driver_desc *driver_desc;
1018 	struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1019 	const struct gasket_bar_desc *bar_desc;
1020 	struct gasket_mappable_region *map_regions = NULL;
1021 	int num_map_regions = 0;
1022 	enum do_map_region_status map_status;
1023 
1024 	driver_desc = gasket_dev->internal_desc->driver_desc;
1025 
1026 	if (vma->vm_start & ~PAGE_MASK) {
1027 		dev_err(gasket_dev->dev,
1028 			"Base address not page-aligned: 0x%lx\n",
1029 			vma->vm_start);
1030 		trace_gasket_mmap_exit(-EINVAL);
1031 		return -EINVAL;
1032 	}
1033 
1034 	/* Calculate the offset of this range into physical mem. */
1035 	raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1036 		driver_desc->legacy_mmap_address_offset;
1037 	vma_size = vma->vm_end - vma->vm_start;
1038 	trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1039 				vma_size);
1040 
1041 	/*
1042 	 * Check if the raw offset is within a bar region. If not, check if it
1043 	 * is a coherent region.
1044 	 */
1045 	bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1046 	is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1047 	if (bar_index < 0 && !is_coherent_region) {
1048 		dev_err(gasket_dev->dev,
1049 			"Unable to find matching bar for address 0x%lx\n",
1050 			raw_offset);
1051 		trace_gasket_mmap_exit(bar_index);
1052 		return bar_index;
1053 	}
1054 	if (bar_index > 0 && is_coherent_region) {
1055 		dev_err(gasket_dev->dev,
1056 			"double matching bar and coherent buffers for address 0x%lx\n",
1057 			raw_offset);
1058 		trace_gasket_mmap_exit(bar_index);
1059 		return -EINVAL;
1060 	}
1061 
1062 	vma->vm_private_data = gasket_dev;
1063 
1064 	if (is_coherent_region)
1065 		return gasket_mmap_coherent(gasket_dev, vma);
1066 
1067 	/* Everything in the rest of this function is for normal BAR mapping. */
1068 
1069 	/*
1070 	 * Subtract the base of the bar from the raw offset to get the
1071 	 * memory location within the bar to map.
1072 	 */
1073 	bar_desc = &driver_desc->bar_descriptions[bar_index];
1074 	permissions = bar_desc->permissions;
1075 	if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1076 		dev_err(gasket_dev->dev, "Permission checking failed.\n");
1077 		trace_gasket_mmap_exit(-EPERM);
1078 		return -EPERM;
1079 	}
1080 
1081 	if (driver_desc->get_mappable_regions_cb) {
1082 		ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1083 							   bar_index,
1084 							   &map_regions,
1085 							   &num_map_regions);
1086 		if (ret)
1087 			return ret;
1088 	} else {
1089 		if (!gasket_mmap_has_permissions(gasket_dev, vma,
1090 						 bar_desc->permissions)) {
1091 			dev_err(gasket_dev->dev,
1092 				"Permission checking failed.\n");
1093 			trace_gasket_mmap_exit(-EPERM);
1094 			return -EPERM;
1095 		}
1096 		num_map_regions = bar_desc->num_mappable_regions;
1097 		map_regions = kcalloc(num_map_regions,
1098 				      sizeof(*bar_desc->mappable_regions),
1099 				      GFP_KERNEL);
1100 		if (map_regions) {
1101 			memcpy(map_regions, bar_desc->mappable_regions,
1102 			       num_map_regions *
1103 					sizeof(*bar_desc->mappable_regions));
1104 		}
1105 	}
1106 
1107 	if (!map_regions || num_map_regions == 0) {
1108 		dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1109 		return -EINVAL;
1110 	}
1111 
1112 	/* Marks the VMA's pages as uncacheable. */
1113 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1114 	for (i = 0; i < num_map_regions; i++) {
1115 		map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1116 		/* Try the next region if this one was not mappable. */
1117 		if (map_status == DO_MAP_REGION_INVALID)
1118 			continue;
1119 		if (map_status == DO_MAP_REGION_FAILURE) {
1120 			ret = -ENOMEM;
1121 			goto fail;
1122 		}
1123 
1124 		has_mapped_anything = 1;
1125 	}
1126 
1127 	kfree(map_regions);
1128 
1129 	/* If we could not map any memory, the request was invalid. */
1130 	if (!has_mapped_anything) {
1131 		dev_err(gasket_dev->dev,
1132 			"Map request did not contain a valid region.\n");
1133 		trace_gasket_mmap_exit(-EINVAL);
1134 		return -EINVAL;
1135 	}
1136 
1137 	trace_gasket_mmap_exit(0);
1138 	return 0;
1139 
1140 fail:
1141 	/* Need to unmap any mapped ranges. */
1142 	num_map_regions = i;
1143 	for (i = 0; i < num_map_regions; i++)
1144 		if (gasket_mm_unmap_region(gasket_dev, vma,
1145 					   &bar_desc->mappable_regions[i]))
1146 			dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1147 				i);
1148 	kfree(map_regions);
1149 
1150 	return ret;
1151 }
1152 
1153 /*
1154  * Open the char device file.
1155  *
1156  * If the open is for writing, and the device is not owned, this process becomes
1157  * the owner.  If the open is for writing and the device is already owned by
1158  * some other process, it is an error.  If this process is the owner, increment
1159  * the open count.
1160  *
1161  * Returns 0 if successful, a negative error number otherwise.
1162  */
gasket_open(struct inode * inode,struct file * filp)1163 static int gasket_open(struct inode *inode, struct file *filp)
1164 {
1165 	int ret;
1166 	struct gasket_dev *gasket_dev;
1167 	const struct gasket_driver_desc *driver_desc;
1168 	struct gasket_ownership *ownership;
1169 	char task_name[TASK_COMM_LEN];
1170 	struct gasket_cdev_info *dev_info =
1171 	    container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1172 	struct pid_namespace *pid_ns = task_active_pid_ns(current);
1173 	bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1174 
1175 	gasket_dev = dev_info->gasket_dev_ptr;
1176 	driver_desc = gasket_dev->internal_desc->driver_desc;
1177 	ownership = &dev_info->ownership;
1178 	get_task_comm(task_name, current);
1179 	filp->private_data = gasket_dev;
1180 	inode->i_size = 0;
1181 
1182 	dev_dbg(gasket_dev->dev,
1183 		"Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
1184 		"fmode_write: %d is_root: %u)\n",
1185 		current->tgid, task_name, filp->f_mode,
1186 		(filp->f_mode & FMODE_WRITE), is_root);
1187 
1188 	/* Always allow non-writing accesses. */
1189 	if (!(filp->f_mode & FMODE_WRITE)) {
1190 		dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1191 		return 0;
1192 	}
1193 
1194 	mutex_lock(&gasket_dev->mutex);
1195 
1196 	dev_dbg(gasket_dev->dev,
1197 		"Current owner open count (owning tgid %u): %d.\n",
1198 		ownership->owner, ownership->write_open_count);
1199 
1200 	/* Opening a node owned by another TGID is an error (unless root) */
1201 	if (ownership->is_owned && ownership->owner != current->tgid &&
1202 	    !is_root) {
1203 		dev_err(gasket_dev->dev,
1204 			"Process %u is opening a node held by %u.\n",
1205 			current->tgid, ownership->owner);
1206 		mutex_unlock(&gasket_dev->mutex);
1207 		return -EPERM;
1208 	}
1209 
1210 	/* If the node is not owned, assign it to the current TGID. */
1211 	if (!ownership->is_owned) {
1212 		ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1213 							      driver_desc->device_open_cb);
1214 		if (ret) {
1215 			dev_err(gasket_dev->dev,
1216 				"Error in device open cb: %d\n", ret);
1217 			mutex_unlock(&gasket_dev->mutex);
1218 			return ret;
1219 		}
1220 		ownership->is_owned = 1;
1221 		ownership->owner = current->tgid;
1222 		dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1223 			ownership->owner);
1224 	}
1225 
1226 	ownership->write_open_count++;
1227 
1228 	dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1229 		ownership->owner, ownership->write_open_count);
1230 
1231 	mutex_unlock(&gasket_dev->mutex);
1232 	return 0;
1233 }
1234 
1235 /*
1236  * Called on a close of the device file.  If this process is the owner,
1237  * decrement the open count.  On last close by the owner, free up buffers and
1238  * eventfd contexts, and release ownership.
1239  *
1240  * Returns 0 if successful, a negative error number otherwise.
1241  */
gasket_release(struct inode * inode,struct file * file)1242 static int gasket_release(struct inode *inode, struct file *file)
1243 {
1244 	int i;
1245 	struct gasket_dev *gasket_dev;
1246 	struct gasket_ownership *ownership;
1247 	const struct gasket_driver_desc *driver_desc;
1248 	char task_name[TASK_COMM_LEN];
1249 	struct gasket_cdev_info *dev_info =
1250 		container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1251 	struct pid_namespace *pid_ns = task_active_pid_ns(current);
1252 	bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1253 
1254 	gasket_dev = dev_info->gasket_dev_ptr;
1255 	driver_desc = gasket_dev->internal_desc->driver_desc;
1256 	ownership = &dev_info->ownership;
1257 	get_task_comm(task_name, current);
1258 	mutex_lock(&gasket_dev->mutex);
1259 
1260 	dev_dbg(gasket_dev->dev,
1261 		"Releasing device node. Call origin: tgid %u (%s) "
1262 		"(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1263 		current->tgid, task_name, file->f_mode,
1264 		(file->f_mode & FMODE_WRITE), is_root);
1265 	dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1266 		ownership->owner, ownership->write_open_count);
1267 
1268 	if (file->f_mode & FMODE_WRITE) {
1269 		ownership->write_open_count--;
1270 		if (ownership->write_open_count == 0) {
1271 			dev_dbg(gasket_dev->dev, "Device is now free\n");
1272 			ownership->is_owned = 0;
1273 			ownership->owner = 0;
1274 
1275 			/* Forces chip reset before we unmap the page tables. */
1276 			driver_desc->device_reset_cb(gasket_dev);
1277 
1278 			for (i = 0; i < driver_desc->num_page_tables; ++i) {
1279 				gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1280 				gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1281 				gasket_free_coherent_memory_all(gasket_dev, i);
1282 			}
1283 
1284 			/* Closes device, enters power save. */
1285 			gasket_check_and_invoke_callback_nolock(gasket_dev,
1286 								driver_desc->device_close_cb);
1287 		}
1288 	}
1289 
1290 	dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1291 		ownership->owner, ownership->write_open_count);
1292 	mutex_unlock(&gasket_dev->mutex);
1293 	return 0;
1294 }
1295 
1296 /*
1297  * Gasket ioctl dispatch function.
1298  *
1299  * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1300  * ioctl_handler_cb registered in the driver description.
1301  * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1302  */
gasket_ioctl(struct file * filp,uint cmd,ulong arg)1303 static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1304 {
1305 	struct gasket_dev *gasket_dev;
1306 	const struct gasket_driver_desc *driver_desc;
1307 	void __user *argp = (void __user *)arg;
1308 	char path[256];
1309 
1310 	gasket_dev = (struct gasket_dev *)filp->private_data;
1311 	driver_desc = gasket_dev->internal_desc->driver_desc;
1312 	if (!driver_desc) {
1313 		dev_dbg(gasket_dev->dev,
1314 			"Unable to find device descriptor for file %s\n",
1315 			d_path(&filp->f_path, path, 256));
1316 		return -ENODEV;
1317 	}
1318 
1319 	if (!gasket_is_supported_ioctl(cmd)) {
1320 		/*
1321 		 * The ioctl handler is not a standard Gasket callback, since
1322 		 * it requires different arguments. This means we can't use
1323 		 * check_and_invoke_callback.
1324 		 */
1325 		if (driver_desc->ioctl_handler_cb)
1326 			return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1327 
1328 		dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1329 		return -EINVAL;
1330 	}
1331 
1332 	return gasket_handle_ioctl(filp, cmd, argp);
1333 }
1334 
1335 /* File operations for all Gasket devices. */
1336 static const struct file_operations gasket_file_ops = {
1337 	.owner = THIS_MODULE,
1338 	.llseek = no_llseek,
1339 	.mmap = gasket_mmap,
1340 	.open = gasket_open,
1341 	.release = gasket_release,
1342 	.unlocked_ioctl = gasket_ioctl,
1343 };
1344 
1345 /* Perform final init and marks the device as active. */
gasket_enable_device(struct gasket_dev * gasket_dev)1346 int gasket_enable_device(struct gasket_dev *gasket_dev)
1347 {
1348 	int tbl_idx;
1349 	int ret;
1350 	const struct gasket_driver_desc *driver_desc =
1351 		gasket_dev->internal_desc->driver_desc;
1352 
1353 	ret = gasket_interrupt_init(gasket_dev);
1354 	if (ret) {
1355 		dev_err(gasket_dev->dev,
1356 			"Critical failure to allocate interrupts: %d\n", ret);
1357 		gasket_interrupt_cleanup(gasket_dev);
1358 		return ret;
1359 	}
1360 
1361 	for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1362 		dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1363 			tbl_idx);
1364 		ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1365 					     &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1366 					     &driver_desc->page_table_configs[tbl_idx],
1367 					     gasket_dev->dev,
1368 					     gasket_dev->pci_dev);
1369 		if (ret) {
1370 			dev_err(gasket_dev->dev,
1371 				"Couldn't init page table %d: %d\n",
1372 				tbl_idx, ret);
1373 			return ret;
1374 		}
1375 		/*
1376 		 * Make sure that the page table is clear and set to simple
1377 		 * addresses.
1378 		 */
1379 		gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1380 	}
1381 
1382 	/*
1383 	 * hardware_revision_cb returns a positive integer (the rev) if
1384 	 * successful.)
1385 	 */
1386 	ret = check_and_invoke_callback(gasket_dev,
1387 					driver_desc->hardware_revision_cb);
1388 	if (ret < 0) {
1389 		dev_err(gasket_dev->dev,
1390 			"Error getting hardware revision: %d\n", ret);
1391 		return ret;
1392 	}
1393 	gasket_dev->hardware_revision = ret;
1394 
1395 	/* device_status_cb returns a device status, not an error code. */
1396 	gasket_dev->status = gasket_get_hw_status(gasket_dev);
1397 	if (gasket_dev->status == GASKET_STATUS_DEAD)
1398 		dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1399 
1400 	ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1401 			      driver_desc->module);
1402 	if (ret)
1403 		return ret;
1404 
1405 	return 0;
1406 }
1407 EXPORT_SYMBOL(gasket_enable_device);
1408 
__gasket_add_device(struct device * parent_dev,struct gasket_internal_desc * internal_desc,struct gasket_dev ** gasket_devp)1409 static int __gasket_add_device(struct device *parent_dev,
1410 			       struct gasket_internal_desc *internal_desc,
1411 			       struct gasket_dev **gasket_devp)
1412 {
1413 	int ret;
1414 	struct gasket_dev *gasket_dev;
1415 	const struct gasket_driver_desc *driver_desc =
1416 	    internal_desc->driver_desc;
1417 
1418 	ret = gasket_alloc_dev(internal_desc, parent_dev, &gasket_dev);
1419 	if (ret)
1420 		return ret;
1421 	if (IS_ERR(gasket_dev->dev_info.device)) {
1422 		dev_err(parent_dev, "Cannot create %s device %s [ret = %ld]\n",
1423 			driver_desc->name, gasket_dev->dev_info.name,
1424 			PTR_ERR(gasket_dev->dev_info.device));
1425 		ret = -ENODEV;
1426 		goto free_gasket_dev;
1427 	}
1428 
1429 	ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1430 					  gasket_dev);
1431 	if (ret)
1432 		goto remove_device;
1433 
1434 	ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1435 					  gasket_sysfs_generic_attrs);
1436 	if (ret)
1437 		goto remove_sysfs_mapping;
1438 
1439 	*gasket_devp = gasket_dev;
1440 	return 0;
1441 
1442 remove_sysfs_mapping:
1443 	gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1444 remove_device:
1445 	device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1446 free_gasket_dev:
1447 	gasket_free_dev(gasket_dev);
1448 	return ret;
1449 }
1450 
__gasket_remove_device(struct gasket_internal_desc * internal_desc,struct gasket_dev * gasket_dev)1451 static void __gasket_remove_device(struct gasket_internal_desc *internal_desc,
1452 				   struct gasket_dev *gasket_dev)
1453 {
1454 	gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1455 	device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1456 	gasket_free_dev(gasket_dev);
1457 }
1458 
1459 /*
1460  * Add PCI gasket device.
1461  *
1462  * Called by Gasket device probe function.
1463  * Allocates device metadata and maps device memory.  The device driver must
1464  * call gasket_enable_device after driver init is complete to place the device
1465  * in active use.
1466  */
gasket_pci_add_device(struct pci_dev * pci_dev,struct gasket_dev ** gasket_devp)1467 int gasket_pci_add_device(struct pci_dev *pci_dev,
1468 			  struct gasket_dev **gasket_devp)
1469 {
1470 	int ret;
1471 	struct gasket_internal_desc *internal_desc;
1472 	struct gasket_dev *gasket_dev;
1473 	struct device *parent;
1474 
1475 	dev_dbg(&pci_dev->dev, "add PCI gasket device\n");
1476 
1477 	mutex_lock(&g_mutex);
1478 	internal_desc = lookup_pci_internal_desc(pci_dev);
1479 	mutex_unlock(&g_mutex);
1480 	if (!internal_desc) {
1481 		dev_err(&pci_dev->dev,
1482 			"PCI add device called for unknown driver type\n");
1483 		return -ENODEV;
1484 	}
1485 
1486 	parent = &pci_dev->dev;
1487 	ret = __gasket_add_device(parent, internal_desc, &gasket_dev);
1488 	if (ret)
1489 		return ret;
1490 
1491 	gasket_dev->pci_dev = pci_dev;
1492 	ret = gasket_setup_pci(pci_dev, gasket_dev);
1493 	if (ret)
1494 		goto cleanup_pci;
1495 
1496 	/*
1497 	 * Once we've created the mapping structures successfully, attempt to
1498 	 * create a symlink to the pci directory of this object.
1499 	 */
1500 	ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1501 				&pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1502 	if (ret) {
1503 		dev_err(gasket_dev->dev,
1504 			"Cannot create sysfs pci link: %d\n", ret);
1505 		goto cleanup_pci;
1506 	}
1507 
1508 	*gasket_devp = gasket_dev;
1509 	return 0;
1510 
1511 cleanup_pci:
1512 	gasket_cleanup_pci(gasket_dev);
1513 	__gasket_remove_device(internal_desc, gasket_dev);
1514 	return ret;
1515 }
1516 EXPORT_SYMBOL(gasket_pci_add_device);
1517 
1518 /* Remove a PCI gasket device. */
gasket_pci_remove_device(struct pci_dev * pci_dev)1519 void gasket_pci_remove_device(struct pci_dev *pci_dev)
1520 {
1521 	int i;
1522 	struct gasket_internal_desc *internal_desc;
1523 	struct gasket_dev *gasket_dev = NULL;
1524 	/* Find the device desc. */
1525 	mutex_lock(&g_mutex);
1526 	internal_desc = lookup_pci_internal_desc(pci_dev);
1527 	if (!internal_desc) {
1528 		mutex_unlock(&g_mutex);
1529 		return;
1530 	}
1531 	mutex_unlock(&g_mutex);
1532 
1533 	/* Now find the specific device */
1534 	mutex_lock(&internal_desc->mutex);
1535 	for (i = 0; i < GASKET_DEV_MAX; i++) {
1536 		if (internal_desc->devs[i] &&
1537 		    internal_desc->devs[i]->pci_dev == pci_dev) {
1538 			gasket_dev = internal_desc->devs[i];
1539 			break;
1540 		}
1541 	}
1542 	mutex_unlock(&internal_desc->mutex);
1543 
1544 	if (!gasket_dev)
1545 		return;
1546 
1547 	dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1548 		internal_desc->driver_desc->name);
1549 
1550 	gasket_cleanup_pci(gasket_dev);
1551 	__gasket_remove_device(internal_desc, gasket_dev);
1552 }
1553 EXPORT_SYMBOL(gasket_pci_remove_device);
1554 
1555 /**
1556  * Lookup a name by number in a num_name table.
1557  * @num: Number to lookup.
1558  * @table: Array of num_name structures, the table for the lookup.
1559  *
1560  * Description: Searches for num in the table.  If found, the
1561  *		corresponding name is returned; otherwise NULL
1562  *		is returned.
1563  *
1564  *		The table must have a NULL name pointer at the end.
1565  */
gasket_num_name_lookup(uint num,const struct gasket_num_name * table)1566 const char *gasket_num_name_lookup(uint num,
1567 				   const struct gasket_num_name *table)
1568 {
1569 	uint i = 0;
1570 
1571 	while (table[i].snn_name) {
1572 		if (num == table[i].snn_num)
1573 			break;
1574 		++i;
1575 	}
1576 
1577 	return table[i].snn_name;
1578 }
1579 EXPORT_SYMBOL(gasket_num_name_lookup);
1580 
gasket_reset(struct gasket_dev * gasket_dev)1581 int gasket_reset(struct gasket_dev *gasket_dev)
1582 {
1583 	int ret;
1584 
1585 	mutex_lock(&gasket_dev->mutex);
1586 	ret = gasket_reset_nolock(gasket_dev);
1587 	mutex_unlock(&gasket_dev->mutex);
1588 	return ret;
1589 }
1590 EXPORT_SYMBOL(gasket_reset);
1591 
gasket_reset_nolock(struct gasket_dev * gasket_dev)1592 int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1593 {
1594 	int ret;
1595 	int i;
1596 	const struct gasket_driver_desc *driver_desc;
1597 
1598 	driver_desc = gasket_dev->internal_desc->driver_desc;
1599 	if (!driver_desc->device_reset_cb)
1600 		return 0;
1601 
1602 	ret = driver_desc->device_reset_cb(gasket_dev);
1603 	if (ret) {
1604 		dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1605 			ret);
1606 		return ret;
1607 	}
1608 
1609 	/* Reinitialize the page tables and interrupt framework. */
1610 	for (i = 0; i < driver_desc->num_page_tables; ++i)
1611 		gasket_page_table_reset(gasket_dev->page_table[i]);
1612 
1613 	ret = gasket_interrupt_reinit(gasket_dev);
1614 	if (ret) {
1615 		dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1616 			ret);
1617 		return ret;
1618 	}
1619 
1620 	/* Get current device health. */
1621 	gasket_dev->status = gasket_get_hw_status(gasket_dev);
1622 	if (gasket_dev->status == GASKET_STATUS_DEAD) {
1623 		dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1624 		return -EINVAL;
1625 	}
1626 
1627 	return 0;
1628 }
1629 EXPORT_SYMBOL(gasket_reset_nolock);
1630 
1631 gasket_ioctl_permissions_cb_t
gasket_get_ioctl_permissions_cb(struct gasket_dev * gasket_dev)1632 gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1633 {
1634 	return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1635 }
1636 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1637 
1638 /* Get the driver structure for a given gasket_dev.
1639  * @dev: pointer to gasket_dev, implementing the requested driver.
1640  */
gasket_get_driver_desc(struct gasket_dev * dev)1641 const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1642 {
1643 	return dev->internal_desc->driver_desc;
1644 }
1645 
1646 /* Get the device structure for a given gasket_dev.
1647  * @dev: pointer to gasket_dev, implementing the requested driver.
1648  */
gasket_get_device(struct gasket_dev * dev)1649 struct device *gasket_get_device(struct gasket_dev *dev)
1650 {
1651 	return dev->dev;
1652 }
1653 
1654 /**
1655  * Asynchronously waits on device.
1656  * @gasket_dev: Device struct.
1657  * @bar: Bar
1658  * @offset: Register offset
1659  * @mask: Register mask
1660  * @val: Expected value
1661  * @max_retries: number of sleep periods
1662  * @delay_ms: Timeout in milliseconds
1663  *
1664  * Description: Busy waits for a specific combination of bits to be set on a
1665  * Gasket register.
1666  **/
gasket_wait_with_reschedule(struct gasket_dev * gasket_dev,int bar,u64 offset,u64 mask,u64 val,uint max_retries,u64 delay_ms)1667 int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1668 				u64 offset, u64 mask, u64 val,
1669 				uint max_retries, u64 delay_ms)
1670 {
1671 	uint retries = 0;
1672 	u64 tmp;
1673 
1674 	while (retries < max_retries) {
1675 		tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1676 		if ((tmp & mask) == val)
1677 			return 0;
1678 		msleep(delay_ms);
1679 		retries++;
1680 	}
1681 	dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1682 		__func__, offset, max_retries * delay_ms);
1683 	return -ETIMEDOUT;
1684 }
1685 EXPORT_SYMBOL(gasket_wait_with_reschedule);
1686 
1687 /* See gasket_core.h for description. */
gasket_register_device(const struct gasket_driver_desc * driver_desc)1688 int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1689 {
1690 	int i, ret;
1691 	int desc_idx = -1;
1692 	struct gasket_internal_desc *internal;
1693 
1694 	pr_debug("Loading %s driver version %s\n", driver_desc->name,
1695 		 driver_desc->driver_version);
1696 	/* Check for duplicates and find a free slot. */
1697 	mutex_lock(&g_mutex);
1698 
1699 	for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1700 		if (g_descs[i].driver_desc == driver_desc) {
1701 			pr_err("%s driver already loaded/registered\n",
1702 			       driver_desc->name);
1703 			mutex_unlock(&g_mutex);
1704 			return -EBUSY;
1705 		}
1706 	}
1707 
1708 	/* This and the above loop could be combined, but this reads easier. */
1709 	for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1710 		if (!g_descs[i].driver_desc) {
1711 			g_descs[i].driver_desc = driver_desc;
1712 			desc_idx = i;
1713 			break;
1714 		}
1715 	}
1716 	mutex_unlock(&g_mutex);
1717 
1718 	if (desc_idx == -1) {
1719 		pr_err("too many drivers loaded, max %d\n",
1720 		       GASKET_FRAMEWORK_DESC_MAX);
1721 		return -EBUSY;
1722 	}
1723 
1724 	internal = &g_descs[desc_idx];
1725 	mutex_init(&internal->mutex);
1726 	memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1727 	internal->class =
1728 		class_create(driver_desc->module, driver_desc->name);
1729 
1730 	if (IS_ERR(internal->class)) {
1731 		pr_err("Cannot register %s class [ret=%ld]\n",
1732 		       driver_desc->name, PTR_ERR(internal->class));
1733 		ret = PTR_ERR(internal->class);
1734 		goto unregister_gasket_driver;
1735 	}
1736 
1737 	ret = register_chrdev_region(MKDEV(driver_desc->major,
1738 					   driver_desc->minor), GASKET_DEV_MAX,
1739 				     driver_desc->name);
1740 	if (ret) {
1741 		pr_err("cannot register %s char driver [ret=%d]\n",
1742 		       driver_desc->name, ret);
1743 		goto destroy_class;
1744 	}
1745 
1746 	return 0;
1747 
1748 destroy_class:
1749 	class_destroy(internal->class);
1750 
1751 unregister_gasket_driver:
1752 	mutex_lock(&g_mutex);
1753 	g_descs[desc_idx].driver_desc = NULL;
1754 	mutex_unlock(&g_mutex);
1755 	return ret;
1756 }
1757 EXPORT_SYMBOL(gasket_register_device);
1758 
1759 /* See gasket_core.h for description. */
gasket_unregister_device(const struct gasket_driver_desc * driver_desc)1760 void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1761 {
1762 	int i, desc_idx;
1763 	struct gasket_internal_desc *internal_desc = NULL;
1764 
1765 	mutex_lock(&g_mutex);
1766 	for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1767 		if (g_descs[i].driver_desc == driver_desc) {
1768 			internal_desc = &g_descs[i];
1769 			desc_idx = i;
1770 			break;
1771 		}
1772 	}
1773 
1774 	if (!internal_desc) {
1775 		mutex_unlock(&g_mutex);
1776 		pr_err("request to unregister unknown desc: %s, %d:%d\n",
1777 		       driver_desc->name, driver_desc->major,
1778 		       driver_desc->minor);
1779 		return;
1780 	}
1781 
1782 	unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1783 				 GASKET_DEV_MAX);
1784 
1785 	class_destroy(internal_desc->class);
1786 
1787 	/* Finally, effectively "remove" the driver. */
1788 	g_descs[desc_idx].driver_desc = NULL;
1789 	mutex_unlock(&g_mutex);
1790 
1791 	pr_debug("removed %s driver\n", driver_desc->name);
1792 }
1793 EXPORT_SYMBOL(gasket_unregister_device);
1794 
gasket_init(void)1795 static int __init gasket_init(void)
1796 {
1797 	int i;
1798 
1799 	mutex_lock(&g_mutex);
1800 	for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1801 		g_descs[i].driver_desc = NULL;
1802 		mutex_init(&g_descs[i].mutex);
1803 	}
1804 
1805 	gasket_sysfs_init();
1806 
1807 	mutex_unlock(&g_mutex);
1808 	return 0;
1809 }
1810 
1811 MODULE_DESCRIPTION("Google Gasket driver framework");
1812 MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1813 MODULE_LICENSE("GPL v2");
1814 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1815 module_init(gasket_init);
1816