1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  dcdbas.c: Dell Systems Management Base Driver
4  *
5  *  The Dell Systems Management Base Driver provides a sysfs interface for
6  *  systems management software to perform System Management Interrupts (SMIs)
7  *  and Host Control Actions (power cycle or power off after OS shutdown) on
8  *  Dell systems.
9  *
10  *  See Documentation/driver-api/dcdbas.rst for more information.
11  *
12  *  Copyright (C) 1995-2006 Dell Inc.
13  */
14 
15 #include <linux/platform_device.h>
16 #include <linux/acpi.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/errno.h>
19 #include <linux/cpu.h>
20 #include <linux/gfp.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/kernel.h>
24 #include <linux/mc146818rtc.h>
25 #include <linux/module.h>
26 #include <linux/reboot.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/mutex.h>
33 
34 #include "dcdbas.h"
35 
36 #define DRIVER_NAME		"dcdbas"
37 #define DRIVER_VERSION		"5.6.0-3.3"
38 #define DRIVER_DESCRIPTION	"Dell Systems Management Base Driver"
39 
40 static struct platform_device *dcdbas_pdev;
41 
42 static u8 *smi_data_buf;
43 static dma_addr_t smi_data_buf_handle;
44 static unsigned long smi_data_buf_size;
45 static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
46 static u32 smi_data_buf_phys_addr;
47 static DEFINE_MUTEX(smi_data_lock);
48 static u8 *eps_buffer;
49 
50 static unsigned int host_control_action;
51 static unsigned int host_control_smi_type;
52 static unsigned int host_control_on_shutdown;
53 
54 static bool wsmt_enabled;
55 
56 /**
57  * smi_data_buf_free: free SMI data buffer
58  */
smi_data_buf_free(void)59 static void smi_data_buf_free(void)
60 {
61 	if (!smi_data_buf || wsmt_enabled)
62 		return;
63 
64 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
65 		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
66 
67 	dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
68 			  smi_data_buf_handle);
69 	smi_data_buf = NULL;
70 	smi_data_buf_handle = 0;
71 	smi_data_buf_phys_addr = 0;
72 	smi_data_buf_size = 0;
73 }
74 
75 /**
76  * smi_data_buf_realloc: grow SMI data buffer if needed
77  */
smi_data_buf_realloc(unsigned long size)78 static int smi_data_buf_realloc(unsigned long size)
79 {
80 	void *buf;
81 	dma_addr_t handle;
82 
83 	if (smi_data_buf_size >= size)
84 		return 0;
85 
86 	if (size > max_smi_data_buf_size)
87 		return -EINVAL;
88 
89 	/* new buffer is needed */
90 	buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
91 	if (!buf) {
92 		dev_dbg(&dcdbas_pdev->dev,
93 			"%s: failed to allocate memory size %lu\n",
94 			__func__, size);
95 		return -ENOMEM;
96 	}
97 	/* memory zeroed by dma_alloc_coherent */
98 
99 	if (smi_data_buf)
100 		memcpy(buf, smi_data_buf, smi_data_buf_size);
101 
102 	/* free any existing buffer */
103 	smi_data_buf_free();
104 
105 	/* set up new buffer for use */
106 	smi_data_buf = buf;
107 	smi_data_buf_handle = handle;
108 	smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
109 	smi_data_buf_size = size;
110 
111 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
112 		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
113 
114 	return 0;
115 }
116 
smi_data_buf_phys_addr_show(struct device * dev,struct device_attribute * attr,char * buf)117 static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
118 					   struct device_attribute *attr,
119 					   char *buf)
120 {
121 	return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
122 }
123 
smi_data_buf_size_show(struct device * dev,struct device_attribute * attr,char * buf)124 static ssize_t smi_data_buf_size_show(struct device *dev,
125 				      struct device_attribute *attr,
126 				      char *buf)
127 {
128 	return sprintf(buf, "%lu\n", smi_data_buf_size);
129 }
130 
smi_data_buf_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)131 static ssize_t smi_data_buf_size_store(struct device *dev,
132 				       struct device_attribute *attr,
133 				       const char *buf, size_t count)
134 {
135 	unsigned long buf_size;
136 	ssize_t ret;
137 
138 	buf_size = simple_strtoul(buf, NULL, 10);
139 
140 	/* make sure SMI data buffer is at least buf_size */
141 	mutex_lock(&smi_data_lock);
142 	ret = smi_data_buf_realloc(buf_size);
143 	mutex_unlock(&smi_data_lock);
144 	if (ret)
145 		return ret;
146 
147 	return count;
148 }
149 
smi_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)150 static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
151 			     struct bin_attribute *bin_attr,
152 			     char *buf, loff_t pos, size_t count)
153 {
154 	ssize_t ret;
155 
156 	mutex_lock(&smi_data_lock);
157 	ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf,
158 					smi_data_buf_size);
159 	mutex_unlock(&smi_data_lock);
160 	return ret;
161 }
162 
smi_data_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)163 static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
164 			      struct bin_attribute *bin_attr,
165 			      char *buf, loff_t pos, size_t count)
166 {
167 	ssize_t ret;
168 
169 	if ((pos + count) > max_smi_data_buf_size)
170 		return -EINVAL;
171 
172 	mutex_lock(&smi_data_lock);
173 
174 	ret = smi_data_buf_realloc(pos + count);
175 	if (ret)
176 		goto out;
177 
178 	memcpy(smi_data_buf + pos, buf, count);
179 	ret = count;
180 out:
181 	mutex_unlock(&smi_data_lock);
182 	return ret;
183 }
184 
host_control_action_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t host_control_action_show(struct device *dev,
186 					struct device_attribute *attr,
187 					char *buf)
188 {
189 	return sprintf(buf, "%u\n", host_control_action);
190 }
191 
host_control_action_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)192 static ssize_t host_control_action_store(struct device *dev,
193 					 struct device_attribute *attr,
194 					 const char *buf, size_t count)
195 {
196 	ssize_t ret;
197 
198 	/* make sure buffer is available for host control command */
199 	mutex_lock(&smi_data_lock);
200 	ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
201 	mutex_unlock(&smi_data_lock);
202 	if (ret)
203 		return ret;
204 
205 	host_control_action = simple_strtoul(buf, NULL, 10);
206 	return count;
207 }
208 
host_control_smi_type_show(struct device * dev,struct device_attribute * attr,char * buf)209 static ssize_t host_control_smi_type_show(struct device *dev,
210 					  struct device_attribute *attr,
211 					  char *buf)
212 {
213 	return sprintf(buf, "%u\n", host_control_smi_type);
214 }
215 
host_control_smi_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)216 static ssize_t host_control_smi_type_store(struct device *dev,
217 					   struct device_attribute *attr,
218 					   const char *buf, size_t count)
219 {
220 	host_control_smi_type = simple_strtoul(buf, NULL, 10);
221 	return count;
222 }
223 
host_control_on_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)224 static ssize_t host_control_on_shutdown_show(struct device *dev,
225 					     struct device_attribute *attr,
226 					     char *buf)
227 {
228 	return sprintf(buf, "%u\n", host_control_on_shutdown);
229 }
230 
host_control_on_shutdown_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)231 static ssize_t host_control_on_shutdown_store(struct device *dev,
232 					      struct device_attribute *attr,
233 					      const char *buf, size_t count)
234 {
235 	host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
236 	return count;
237 }
238 
raise_smi(void * par)239 static int raise_smi(void *par)
240 {
241 	struct smi_cmd *smi_cmd = par;
242 
243 	if (smp_processor_id() != 0) {
244 		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
245 			__func__);
246 		return -EBUSY;
247 	}
248 
249 	/* generate SMI */
250 	/* inb to force posted write through and make SMI happen now */
251 	asm volatile (
252 		"outb %b0,%w1\n"
253 		"inb %w1"
254 		: /* no output args */
255 		: "a" (smi_cmd->command_code),
256 		  "d" (smi_cmd->command_address),
257 		  "b" (smi_cmd->ebx),
258 		  "c" (smi_cmd->ecx)
259 		: "memory"
260 	);
261 
262 	return 0;
263 }
264 /**
265  * dcdbas_smi_request: generate SMI request
266  *
267  * Called with smi_data_lock.
268  */
dcdbas_smi_request(struct smi_cmd * smi_cmd)269 int dcdbas_smi_request(struct smi_cmd *smi_cmd)
270 {
271 	int ret;
272 
273 	if (smi_cmd->magic != SMI_CMD_MAGIC) {
274 		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
275 			 __func__);
276 		return -EBADR;
277 	}
278 
279 	/* SMI requires CPU 0 */
280 	get_online_cpus();
281 	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
282 	put_online_cpus();
283 
284 	return ret;
285 }
286 
287 /**
288  * smi_request_store:
289  *
290  * The valid values are:
291  * 0: zero SMI data buffer
292  * 1: generate calling interface SMI
293  * 2: generate raw SMI
294  *
295  * User application writes smi_cmd to smi_data before telling driver
296  * to generate SMI.
297  */
smi_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)298 static ssize_t smi_request_store(struct device *dev,
299 				 struct device_attribute *attr,
300 				 const char *buf, size_t count)
301 {
302 	struct smi_cmd *smi_cmd;
303 	unsigned long val = simple_strtoul(buf, NULL, 10);
304 	ssize_t ret;
305 
306 	mutex_lock(&smi_data_lock);
307 
308 	if (smi_data_buf_size < sizeof(struct smi_cmd)) {
309 		ret = -ENODEV;
310 		goto out;
311 	}
312 	smi_cmd = (struct smi_cmd *)smi_data_buf;
313 
314 	switch (val) {
315 	case 2:
316 		/* Raw SMI */
317 		ret = dcdbas_smi_request(smi_cmd);
318 		if (!ret)
319 			ret = count;
320 		break;
321 	case 1:
322 		/*
323 		 * Calling Interface SMI
324 		 *
325 		 * Provide physical address of command buffer field within
326 		 * the struct smi_cmd to BIOS.
327 		 *
328 		 * Because the address that smi_cmd (smi_data_buf) points to
329 		 * will be from memremap() of a non-memory address if WSMT
330 		 * is present, we can't use virt_to_phys() on smi_cmd, so
331 		 * we have to use the physical address that was saved when
332 		 * the virtual address for smi_cmd was received.
333 		 */
334 		smi_cmd->ebx = smi_data_buf_phys_addr +
335 				offsetof(struct smi_cmd, command_buffer);
336 		ret = dcdbas_smi_request(smi_cmd);
337 		if (!ret)
338 			ret = count;
339 		break;
340 	case 0:
341 		memset(smi_data_buf, 0, smi_data_buf_size);
342 		ret = count;
343 		break;
344 	default:
345 		ret = -EINVAL;
346 		break;
347 	}
348 
349 out:
350 	mutex_unlock(&smi_data_lock);
351 	return ret;
352 }
353 EXPORT_SYMBOL(dcdbas_smi_request);
354 
355 /**
356  * host_control_smi: generate host control SMI
357  *
358  * Caller must set up the host control command in smi_data_buf.
359  */
host_control_smi(void)360 static int host_control_smi(void)
361 {
362 	struct apm_cmd *apm_cmd;
363 	u8 *data;
364 	unsigned long flags;
365 	u32 num_ticks;
366 	s8 cmd_status;
367 	u8 index;
368 
369 	apm_cmd = (struct apm_cmd *)smi_data_buf;
370 	apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
371 
372 	switch (host_control_smi_type) {
373 	case HC_SMITYPE_TYPE1:
374 		spin_lock_irqsave(&rtc_lock, flags);
375 		/* write SMI data buffer physical address */
376 		data = (u8 *)&smi_data_buf_phys_addr;
377 		for (index = PE1300_CMOS_CMD_STRUCT_PTR;
378 		     index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
379 		     index++, data++) {
380 			outb(index,
381 			     (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
382 			outb(*data,
383 			     (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
384 		}
385 
386 		/* first set status to -1 as called by spec */
387 		cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
388 		outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
389 
390 		/* generate SMM call */
391 		outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
392 		spin_unlock_irqrestore(&rtc_lock, flags);
393 
394 		/* wait a few to see if it executed */
395 		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
396 		while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
397 		       == ESM_STATUS_CMD_UNSUCCESSFUL) {
398 			num_ticks--;
399 			if (num_ticks == EXPIRED_TIMER)
400 				return -ETIME;
401 		}
402 		break;
403 
404 	case HC_SMITYPE_TYPE2:
405 	case HC_SMITYPE_TYPE3:
406 		spin_lock_irqsave(&rtc_lock, flags);
407 		/* write SMI data buffer physical address */
408 		data = (u8 *)&smi_data_buf_phys_addr;
409 		for (index = PE1400_CMOS_CMD_STRUCT_PTR;
410 		     index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
411 		     index++, data++) {
412 			outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
413 			outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
414 		}
415 
416 		/* generate SMM call */
417 		if (host_control_smi_type == HC_SMITYPE_TYPE3)
418 			outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
419 		else
420 			outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
421 
422 		/* restore RTC index pointer since it was written to above */
423 		CMOS_READ(RTC_REG_C);
424 		spin_unlock_irqrestore(&rtc_lock, flags);
425 
426 		/* read control port back to serialize write */
427 		cmd_status = inb(PE1400_APM_CONTROL_PORT);
428 
429 		/* wait a few to see if it executed */
430 		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
431 		while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
432 			num_ticks--;
433 			if (num_ticks == EXPIRED_TIMER)
434 				return -ETIME;
435 		}
436 		break;
437 
438 	default:
439 		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
440 			__func__, host_control_smi_type);
441 		return -ENOSYS;
442 	}
443 
444 	return 0;
445 }
446 
447 /**
448  * dcdbas_host_control: initiate host control
449  *
450  * This function is called by the driver after the system has
451  * finished shutting down if the user application specified a
452  * host control action to perform on shutdown.  It is safe to
453  * use smi_data_buf at this point because the system has finished
454  * shutting down and no userspace apps are running.
455  */
dcdbas_host_control(void)456 static void dcdbas_host_control(void)
457 {
458 	struct apm_cmd *apm_cmd;
459 	u8 action;
460 
461 	if (host_control_action == HC_ACTION_NONE)
462 		return;
463 
464 	action = host_control_action;
465 	host_control_action = HC_ACTION_NONE;
466 
467 	if (!smi_data_buf) {
468 		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
469 		return;
470 	}
471 
472 	if (smi_data_buf_size < sizeof(struct apm_cmd)) {
473 		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
474 			__func__);
475 		return;
476 	}
477 
478 	apm_cmd = (struct apm_cmd *)smi_data_buf;
479 
480 	/* power off takes precedence */
481 	if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
482 		apm_cmd->command = ESM_APM_POWER_CYCLE;
483 		apm_cmd->reserved = 0;
484 		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
485 		host_control_smi();
486 	} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
487 		apm_cmd->command = ESM_APM_POWER_CYCLE;
488 		apm_cmd->reserved = 0;
489 		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
490 		host_control_smi();
491 	}
492 }
493 
494 /* WSMT */
495 
checksum(u8 * buffer,u8 length)496 static u8 checksum(u8 *buffer, u8 length)
497 {
498 	u8 sum = 0;
499 	u8 *end = buffer + length;
500 
501 	while (buffer < end)
502 		sum += *buffer++;
503 	return sum;
504 }
505 
check_eps_table(u8 * addr)506 static inline struct smm_eps_table *check_eps_table(u8 *addr)
507 {
508 	struct smm_eps_table *eps = (struct smm_eps_table *)addr;
509 
510 	if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
511 		return NULL;
512 
513 	if (checksum(addr, eps->length) != 0)
514 		return NULL;
515 
516 	return eps;
517 }
518 
dcdbas_check_wsmt(void)519 static int dcdbas_check_wsmt(void)
520 {
521 	struct acpi_table_wsmt *wsmt = NULL;
522 	struct smm_eps_table *eps = NULL;
523 	u64 remap_size;
524 	u8 *addr;
525 
526 	acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
527 	if (!wsmt)
528 		return 0;
529 
530 	/* Check if WSMT ACPI table shows that protection is enabled */
531 	if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
532 	    !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
533 		return 0;
534 
535 	/* Scan for EPS (entry point structure) */
536 	for (addr = (u8 *)__va(0xf0000);
537 	     addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
538 	     addr += 16) {
539 		eps = check_eps_table(addr);
540 		if (eps)
541 			break;
542 	}
543 
544 	if (!eps) {
545 		dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no EPS found\n");
546 		return -ENODEV;
547 	}
548 
549 	/*
550 	 * Get physical address of buffer and map to virtual address.
551 	 * Table gives size in 4K pages, regardless of actual system page size.
552 	 */
553 	if (upper_32_bits(eps->smm_comm_buff_addr + 8)) {
554 		dev_warn(&dcdbas_pdev->dev, "found WSMT, but EPS buffer address is above 4GB\n");
555 		return -EINVAL;
556 	}
557 	/*
558 	 * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
559 	 * bytes are used for a semaphore, not the data buffer itself).
560 	 */
561 	remap_size = eps->num_of_4k_pages * PAGE_SIZE;
562 	if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
563 		remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
564 	eps_buffer = memremap(eps->smm_comm_buff_addr, remap_size, MEMREMAP_WB);
565 	if (!eps_buffer) {
566 		dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map EPS buffer\n");
567 		return -ENOMEM;
568 	}
569 
570 	/* First 8 bytes is for a semaphore, not part of the smi_data_buf */
571 	smi_data_buf_phys_addr = eps->smm_comm_buff_addr + 8;
572 	smi_data_buf = eps_buffer + 8;
573 	smi_data_buf_size = remap_size - 8;
574 	max_smi_data_buf_size = smi_data_buf_size;
575 	wsmt_enabled = true;
576 	dev_info(&dcdbas_pdev->dev,
577 		 "WSMT found, using firmware-provided SMI buffer.\n");
578 	return 1;
579 }
580 
581 /**
582  * dcdbas_reboot_notify: handle reboot notification for host control
583  */
dcdbas_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)584 static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
585 				void *unused)
586 {
587 	switch (code) {
588 	case SYS_DOWN:
589 	case SYS_HALT:
590 	case SYS_POWER_OFF:
591 		if (host_control_on_shutdown) {
592 			/* firmware is going to perform host control action */
593 			printk(KERN_WARNING "Please wait for shutdown "
594 			       "action to complete...\n");
595 			dcdbas_host_control();
596 		}
597 		break;
598 	}
599 
600 	return NOTIFY_DONE;
601 }
602 
603 static struct notifier_block dcdbas_reboot_nb = {
604 	.notifier_call = dcdbas_reboot_notify,
605 	.next = NULL,
606 	.priority = INT_MIN
607 };
608 
609 static DCDBAS_BIN_ATTR_RW(smi_data);
610 
611 static struct bin_attribute *dcdbas_bin_attrs[] = {
612 	&bin_attr_smi_data,
613 	NULL
614 };
615 
616 static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
617 static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
618 static DCDBAS_DEV_ATTR_WO(smi_request);
619 static DCDBAS_DEV_ATTR_RW(host_control_action);
620 static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
621 static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
622 
623 static struct attribute *dcdbas_dev_attrs[] = {
624 	&dev_attr_smi_data_buf_size.attr,
625 	&dev_attr_smi_data_buf_phys_addr.attr,
626 	&dev_attr_smi_request.attr,
627 	&dev_attr_host_control_action.attr,
628 	&dev_attr_host_control_smi_type.attr,
629 	&dev_attr_host_control_on_shutdown.attr,
630 	NULL
631 };
632 
633 static const struct attribute_group dcdbas_attr_group = {
634 	.attrs = dcdbas_dev_attrs,
635 	.bin_attrs = dcdbas_bin_attrs,
636 };
637 
dcdbas_probe(struct platform_device * dev)638 static int dcdbas_probe(struct platform_device *dev)
639 {
640 	int error;
641 
642 	host_control_action = HC_ACTION_NONE;
643 	host_control_smi_type = HC_SMITYPE_NONE;
644 
645 	dcdbas_pdev = dev;
646 
647 	/* Check if ACPI WSMT table specifies protected SMI buffer address */
648 	error = dcdbas_check_wsmt();
649 	if (error < 0)
650 		return error;
651 
652 	/*
653 	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
654 	 * This is done by setting the DMA mask below.
655 	 */
656 	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
657 	if (error)
658 		return error;
659 
660 	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
661 	if (error)
662 		return error;
663 
664 	register_reboot_notifier(&dcdbas_reboot_nb);
665 
666 	dev_info(&dev->dev, "%s (version %s)\n",
667 		 DRIVER_DESCRIPTION, DRIVER_VERSION);
668 
669 	return 0;
670 }
671 
dcdbas_remove(struct platform_device * dev)672 static int dcdbas_remove(struct platform_device *dev)
673 {
674 	unregister_reboot_notifier(&dcdbas_reboot_nb);
675 	sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
676 
677 	return 0;
678 }
679 
680 static struct platform_driver dcdbas_driver = {
681 	.driver		= {
682 		.name	= DRIVER_NAME,
683 	},
684 	.probe		= dcdbas_probe,
685 	.remove		= dcdbas_remove,
686 };
687 
688 static const struct platform_device_info dcdbas_dev_info __initconst = {
689 	.name		= DRIVER_NAME,
690 	.id		= -1,
691 	.dma_mask	= DMA_BIT_MASK(32),
692 };
693 
694 static struct platform_device *dcdbas_pdev_reg;
695 
696 /**
697  * dcdbas_init: initialize driver
698  */
dcdbas_init(void)699 static int __init dcdbas_init(void)
700 {
701 	int error;
702 
703 	error = platform_driver_register(&dcdbas_driver);
704 	if (error)
705 		return error;
706 
707 	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
708 	if (IS_ERR(dcdbas_pdev_reg)) {
709 		error = PTR_ERR(dcdbas_pdev_reg);
710 		goto err_unregister_driver;
711 	}
712 
713 	return 0;
714 
715  err_unregister_driver:
716 	platform_driver_unregister(&dcdbas_driver);
717 	return error;
718 }
719 
720 /**
721  * dcdbas_exit: perform driver cleanup
722  */
dcdbas_exit(void)723 static void __exit dcdbas_exit(void)
724 {
725 	/*
726 	 * make sure functions that use dcdbas_pdev are called
727 	 * before platform_device_unregister
728 	 */
729 	unregister_reboot_notifier(&dcdbas_reboot_nb);
730 
731 	/*
732 	 * We have to free the buffer here instead of dcdbas_remove
733 	 * because only in module exit function we can be sure that
734 	 * all sysfs attributes belonging to this module have been
735 	 * released.
736 	 */
737 	if (dcdbas_pdev)
738 		smi_data_buf_free();
739 	if (eps_buffer)
740 		memunmap(eps_buffer);
741 	platform_device_unregister(dcdbas_pdev_reg);
742 	platform_driver_unregister(&dcdbas_driver);
743 }
744 
745 subsys_initcall_sync(dcdbas_init);
746 module_exit(dcdbas_exit);
747 
748 MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
749 MODULE_VERSION(DRIVER_VERSION);
750 MODULE_AUTHOR("Dell Inc.");
751 MODULE_LICENSE("GPL");
752 /* Any System or BIOS claiming to be by Dell */
753 MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
754