1 /*
2 * bios-less APM driver for ARM Linux
3 * Jamey Hicks <jamey@crl.dec.com>
4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
5 *
6 * APM 1.2 Reference:
7 * Intel Corporation, Microsoft Corporation. Advanced Power Management
8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
9 *
10 * This document is available from Microsoft at:
11 * http://www.microsoft.com/whdc/archive/amp_12.mspx
12 */
13 #include <linux/module.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16 #include <linux/mutex.h>
17 #include <linux/proc_fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/miscdevice.h>
20 #include <linux/apm_bios.h>
21 #include <linux/capability.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/apm-emulation.h>
25 #include <linux/freezer.h>
26 #include <linux/device.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/init.h>
30 #include <linux/completion.h>
31 #include <linux/kthread.h>
32 #include <linux/delay.h>
33
34 /*
35 * One option can be changed at boot time as follows:
36 * apm=on/off enable/disable APM
37 */
38
39 /*
40 * Maximum number of events stored
41 */
42 #define APM_MAX_EVENTS 16
43
44 struct apm_queue {
45 unsigned int event_head;
46 unsigned int event_tail;
47 apm_event_t events[APM_MAX_EVENTS];
48 };
49
50 /*
51 * thread states (for threads using a writable /dev/apm_bios fd):
52 *
53 * SUSPEND_NONE: nothing happening
54 * SUSPEND_PENDING: suspend event queued for thread and pending to be read
55 * SUSPEND_READ: suspend event read, pending acknowledgement
56 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
57 * waiting for resume
58 * SUSPEND_ACKTO: acknowledgement timeout
59 * SUSPEND_DONE: thread had acked suspend and is now notified of
60 * resume
61 *
62 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
63 *
64 * A thread migrates in one of three paths:
65 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
66 * -6-> ACKTO -7-> NONE
67 * NONE -8-> WAIT -9-> NONE
68 *
69 * While in PENDING or READ, the thread is accounted for in the
70 * suspend_acks_pending counter.
71 *
72 * The transitions are invoked as follows:
73 * 1: suspend event is signalled from the core PM code
74 * 2: the suspend event is read from the fd by the userspace thread
75 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
76 * 4: core PM code signals that we have resumed
77 * 5: APM_IOC_SUSPEND ioctl returns
78 *
79 * 6: the notifier invoked from the core PM code timed out waiting
80 * for all relevant threds to enter ACKED state and puts those
81 * that haven't into ACKTO
82 * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
83 * get an error
84 *
85 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
86 * ioctl code invokes pm_suspend()
87 * 9: pm_suspend() returns indicating resume
88 */
89 enum apm_suspend_state {
90 SUSPEND_NONE,
91 SUSPEND_PENDING,
92 SUSPEND_READ,
93 SUSPEND_ACKED,
94 SUSPEND_ACKTO,
95 SUSPEND_WAIT,
96 SUSPEND_DONE,
97 };
98
99 /*
100 * The per-file APM data
101 */
102 struct apm_user {
103 struct list_head list;
104
105 unsigned int suser: 1;
106 unsigned int writer: 1;
107 unsigned int reader: 1;
108
109 int suspend_result;
110 enum apm_suspend_state suspend_state;
111
112 struct apm_queue queue;
113 };
114
115 /*
116 * Local variables
117 */
118 static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
119 static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
120 static int apm_disabled;
121 static struct task_struct *kapmd_tsk;
122
123 static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
124 static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
125
126 /*
127 * This is a list of everyone who has opened /dev/apm_bios
128 */
129 static DECLARE_RWSEM(user_list_lock);
130 static LIST_HEAD(apm_user_list);
131
132 /*
133 * kapmd info. kapmd provides us a process context to handle
134 * "APM" events within - specifically necessary if we're going
135 * to be suspending the system.
136 */
137 static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
138 static DEFINE_SPINLOCK(kapmd_queue_lock);
139 static struct apm_queue kapmd_queue;
140
141 static DEFINE_MUTEX(state_lock);
142
143 static const char driver_version[] = "1.13"; /* no spaces */
144
145
146
147 /*
148 * Compatibility cruft until the IPAQ people move over to the new
149 * interface.
150 */
__apm_get_power_status(struct apm_power_info * info)151 static void __apm_get_power_status(struct apm_power_info *info)
152 {
153 }
154
155 /*
156 * This allows machines to provide their own "apm get power status" function.
157 */
158 void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
159 EXPORT_SYMBOL(apm_get_power_status);
160
161
162 /*
163 * APM event queue management.
164 */
queue_empty(struct apm_queue * q)165 static inline int queue_empty(struct apm_queue *q)
166 {
167 return q->event_head == q->event_tail;
168 }
169
queue_get_event(struct apm_queue * q)170 static inline apm_event_t queue_get_event(struct apm_queue *q)
171 {
172 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
173 return q->events[q->event_tail];
174 }
175
queue_add_event(struct apm_queue * q,apm_event_t event)176 static void queue_add_event(struct apm_queue *q, apm_event_t event)
177 {
178 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
179 if (q->event_head == q->event_tail) {
180 static int notified;
181
182 if (notified++ == 0)
183 printk(KERN_ERR "apm: an event queue overflowed\n");
184 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
185 }
186 q->events[q->event_head] = event;
187 }
188
queue_event(apm_event_t event)189 static void queue_event(apm_event_t event)
190 {
191 struct apm_user *as;
192
193 down_read(&user_list_lock);
194 list_for_each_entry(as, &apm_user_list, list) {
195 if (as->reader)
196 queue_add_event(&as->queue, event);
197 }
198 up_read(&user_list_lock);
199 wake_up_interruptible(&apm_waitqueue);
200 }
201
apm_read(struct file * fp,char __user * buf,size_t count,loff_t * ppos)202 static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
203 {
204 struct apm_user *as = fp->private_data;
205 apm_event_t event;
206 int i = count, ret = 0;
207
208 if (count < sizeof(apm_event_t))
209 return -EINVAL;
210
211 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
212 return -EAGAIN;
213
214 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
215
216 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
217 event = queue_get_event(&as->queue);
218
219 ret = -EFAULT;
220 if (copy_to_user(buf, &event, sizeof(event)))
221 break;
222
223 mutex_lock(&state_lock);
224 if (as->suspend_state == SUSPEND_PENDING &&
225 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
226 as->suspend_state = SUSPEND_READ;
227 mutex_unlock(&state_lock);
228
229 buf += sizeof(event);
230 i -= sizeof(event);
231 }
232
233 if (i < count)
234 ret = count - i;
235
236 return ret;
237 }
238
apm_poll(struct file * fp,poll_table * wait)239 static __poll_t apm_poll(struct file *fp, poll_table * wait)
240 {
241 struct apm_user *as = fp->private_data;
242
243 poll_wait(fp, &apm_waitqueue, wait);
244 return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
245 }
246
247 /*
248 * apm_ioctl - handle APM ioctl
249 *
250 * APM_IOC_SUSPEND
251 * This IOCTL is overloaded, and performs two functions. It is used to:
252 * - initiate a suspend
253 * - acknowledge a suspend read from /dev/apm_bios.
254 * Only when everyone who has opened /dev/apm_bios with write permission
255 * has acknowledge does the actual suspend happen.
256 */
257 static long
apm_ioctl(struct file * filp,u_int cmd,u_long arg)258 apm_ioctl(struct file *filp, u_int cmd, u_long arg)
259 {
260 struct apm_user *as = filp->private_data;
261 int err = -EINVAL;
262
263 if (!as->suser || !as->writer)
264 return -EPERM;
265
266 switch (cmd) {
267 case APM_IOC_SUSPEND:
268 mutex_lock(&state_lock);
269
270 as->suspend_result = -EINTR;
271
272 switch (as->suspend_state) {
273 case SUSPEND_READ:
274 /*
275 * If we read a suspend command from /dev/apm_bios,
276 * then the corresponding APM_IOC_SUSPEND ioctl is
277 * interpreted as an acknowledge.
278 */
279 as->suspend_state = SUSPEND_ACKED;
280 atomic_dec(&suspend_acks_pending);
281 mutex_unlock(&state_lock);
282
283 /*
284 * suspend_acks_pending changed, the notifier needs to
285 * be woken up for this
286 */
287 wake_up(&apm_suspend_waitqueue);
288
289 /*
290 * Wait for the suspend/resume to complete. If there
291 * are pending acknowledges, we wait here for them.
292 * wait_event_freezable() is interruptible and pending
293 * signal can cause busy looping. We aren't doing
294 * anything critical, chill a bit on each iteration.
295 */
296 while (wait_event_freezable(apm_suspend_waitqueue,
297 as->suspend_state != SUSPEND_ACKED))
298 msleep(10);
299 break;
300 case SUSPEND_ACKTO:
301 as->suspend_result = -ETIMEDOUT;
302 mutex_unlock(&state_lock);
303 break;
304 default:
305 as->suspend_state = SUSPEND_WAIT;
306 mutex_unlock(&state_lock);
307
308 /*
309 * Otherwise it is a request to suspend the system.
310 * Just invoke pm_suspend(), we'll handle it from
311 * there via the notifier.
312 */
313 as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
314 }
315
316 mutex_lock(&state_lock);
317 err = as->suspend_result;
318 as->suspend_state = SUSPEND_NONE;
319 mutex_unlock(&state_lock);
320 break;
321 }
322
323 return err;
324 }
325
apm_release(struct inode * inode,struct file * filp)326 static int apm_release(struct inode * inode, struct file * filp)
327 {
328 struct apm_user *as = filp->private_data;
329
330 filp->private_data = NULL;
331
332 down_write(&user_list_lock);
333 list_del(&as->list);
334 up_write(&user_list_lock);
335
336 /*
337 * We are now unhooked from the chain. As far as new
338 * events are concerned, we no longer exist.
339 */
340 mutex_lock(&state_lock);
341 if (as->suspend_state == SUSPEND_PENDING ||
342 as->suspend_state == SUSPEND_READ)
343 atomic_dec(&suspend_acks_pending);
344 mutex_unlock(&state_lock);
345
346 wake_up(&apm_suspend_waitqueue);
347
348 kfree(as);
349 return 0;
350 }
351
apm_open(struct inode * inode,struct file * filp)352 static int apm_open(struct inode * inode, struct file * filp)
353 {
354 struct apm_user *as;
355
356 as = kzalloc(sizeof(*as), GFP_KERNEL);
357 if (as) {
358 /*
359 * XXX - this is a tiny bit broken, when we consider BSD
360 * process accounting. If the device is opened by root, we
361 * instantly flag that we used superuser privs. Who knows,
362 * we might close the device immediately without doing a
363 * privileged operation -- cevans
364 */
365 as->suser = capable(CAP_SYS_ADMIN);
366 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
367 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
368
369 down_write(&user_list_lock);
370 list_add(&as->list, &apm_user_list);
371 up_write(&user_list_lock);
372
373 filp->private_data = as;
374 }
375
376 return as ? 0 : -ENOMEM;
377 }
378
379 static const struct file_operations apm_bios_fops = {
380 .owner = THIS_MODULE,
381 .read = apm_read,
382 .poll = apm_poll,
383 .unlocked_ioctl = apm_ioctl,
384 .open = apm_open,
385 .release = apm_release,
386 .llseek = noop_llseek,
387 };
388
389 static struct miscdevice apm_device = {
390 .minor = APM_MINOR_DEV,
391 .name = "apm_bios",
392 .fops = &apm_bios_fops
393 };
394
395
396 #ifdef CONFIG_PROC_FS
397 /*
398 * Arguments, with symbols from linux/apm_bios.h.
399 *
400 * 0) Linux driver version (this will change if format changes)
401 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
402 * 2) APM flags from APM Installation Check (0x00):
403 * bit 0: APM_16_BIT_SUPPORT
404 * bit 1: APM_32_BIT_SUPPORT
405 * bit 2: APM_IDLE_SLOWS_CLOCK
406 * bit 3: APM_BIOS_DISABLED
407 * bit 4: APM_BIOS_DISENGAGED
408 * 3) AC line status
409 * 0x00: Off-line
410 * 0x01: On-line
411 * 0x02: On backup power (BIOS >= 1.1 only)
412 * 0xff: Unknown
413 * 4) Battery status
414 * 0x00: High
415 * 0x01: Low
416 * 0x02: Critical
417 * 0x03: Charging
418 * 0x04: Selected battery not present (BIOS >= 1.2 only)
419 * 0xff: Unknown
420 * 5) Battery flag
421 * bit 0: High
422 * bit 1: Low
423 * bit 2: Critical
424 * bit 3: Charging
425 * bit 7: No system battery
426 * 0xff: Unknown
427 * 6) Remaining battery life (percentage of charge):
428 * 0-100: valid
429 * -1: Unknown
430 * 7) Remaining battery life (time units):
431 * Number of remaining minutes or seconds
432 * -1: Unknown
433 * 8) min = minutes; sec = seconds
434 */
proc_apm_show(struct seq_file * m,void * v)435 static int proc_apm_show(struct seq_file *m, void *v)
436 {
437 struct apm_power_info info;
438 char *units;
439
440 info.ac_line_status = 0xff;
441 info.battery_status = 0xff;
442 info.battery_flag = 0xff;
443 info.battery_life = -1;
444 info.time = -1;
445 info.units = -1;
446
447 if (apm_get_power_status)
448 apm_get_power_status(&info);
449
450 switch (info.units) {
451 default: units = "?"; break;
452 case 0: units = "min"; break;
453 case 1: units = "sec"; break;
454 }
455
456 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
457 driver_version, APM_32_BIT_SUPPORT,
458 info.ac_line_status, info.battery_status,
459 info.battery_flag, info.battery_life,
460 info.time, units);
461
462 return 0;
463 }
464 #endif
465
kapmd(void * arg)466 static int kapmd(void *arg)
467 {
468 do {
469 apm_event_t event;
470
471 wait_event_interruptible(kapmd_wait,
472 !queue_empty(&kapmd_queue) || kthread_should_stop());
473
474 if (kthread_should_stop())
475 break;
476
477 spin_lock_irq(&kapmd_queue_lock);
478 event = 0;
479 if (!queue_empty(&kapmd_queue))
480 event = queue_get_event(&kapmd_queue);
481 spin_unlock_irq(&kapmd_queue_lock);
482
483 switch (event) {
484 case 0:
485 break;
486
487 case APM_LOW_BATTERY:
488 case APM_POWER_STATUS_CHANGE:
489 queue_event(event);
490 break;
491
492 case APM_USER_SUSPEND:
493 case APM_SYS_SUSPEND:
494 pm_suspend(PM_SUSPEND_MEM);
495 break;
496
497 case APM_CRITICAL_SUSPEND:
498 atomic_inc(&userspace_notification_inhibit);
499 pm_suspend(PM_SUSPEND_MEM);
500 atomic_dec(&userspace_notification_inhibit);
501 break;
502 }
503 } while (1);
504
505 return 0;
506 }
507
apm_suspend_notifier(struct notifier_block * nb,unsigned long event,void * dummy)508 static int apm_suspend_notifier(struct notifier_block *nb,
509 unsigned long event,
510 void *dummy)
511 {
512 struct apm_user *as;
513 int err;
514 unsigned long apm_event;
515
516 /* short-cut emergency suspends */
517 if (atomic_read(&userspace_notification_inhibit))
518 return NOTIFY_DONE;
519
520 switch (event) {
521 case PM_SUSPEND_PREPARE:
522 case PM_HIBERNATION_PREPARE:
523 apm_event = (event == PM_SUSPEND_PREPARE) ?
524 APM_USER_SUSPEND : APM_USER_HIBERNATION;
525 /*
526 * Queue an event to all "writer" users that we want
527 * to suspend and need their ack.
528 */
529 mutex_lock(&state_lock);
530 down_read(&user_list_lock);
531
532 list_for_each_entry(as, &apm_user_list, list) {
533 if (as->suspend_state != SUSPEND_WAIT && as->reader &&
534 as->writer && as->suser) {
535 as->suspend_state = SUSPEND_PENDING;
536 atomic_inc(&suspend_acks_pending);
537 queue_add_event(&as->queue, apm_event);
538 }
539 }
540
541 up_read(&user_list_lock);
542 mutex_unlock(&state_lock);
543 wake_up_interruptible(&apm_waitqueue);
544
545 /*
546 * Wait for the the suspend_acks_pending variable to drop to
547 * zero, meaning everybody acked the suspend event (or the
548 * process was killed.)
549 *
550 * If the app won't answer within a short while we assume it
551 * locked up and ignore it.
552 */
553 err = wait_event_interruptible_timeout(
554 apm_suspend_waitqueue,
555 atomic_read(&suspend_acks_pending) == 0,
556 5*HZ);
557
558 /* timed out */
559 if (err == 0) {
560 /*
561 * Move anybody who timed out to "ack timeout" state.
562 *
563 * We could time out and the userspace does the ACK
564 * right after we time out but before we enter the
565 * locked section here, but that's fine.
566 */
567 mutex_lock(&state_lock);
568 down_read(&user_list_lock);
569 list_for_each_entry(as, &apm_user_list, list) {
570 if (as->suspend_state == SUSPEND_PENDING ||
571 as->suspend_state == SUSPEND_READ) {
572 as->suspend_state = SUSPEND_ACKTO;
573 atomic_dec(&suspend_acks_pending);
574 }
575 }
576 up_read(&user_list_lock);
577 mutex_unlock(&state_lock);
578 }
579
580 /* let suspend proceed */
581 if (err >= 0)
582 return NOTIFY_OK;
583
584 /* interrupted by signal */
585 return notifier_from_errno(err);
586
587 case PM_POST_SUSPEND:
588 case PM_POST_HIBERNATION:
589 apm_event = (event == PM_POST_SUSPEND) ?
590 APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
591 /*
592 * Anyone on the APM queues will think we're still suspended.
593 * Send a message so everyone knows we're now awake again.
594 */
595 queue_event(apm_event);
596
597 /*
598 * Finally, wake up anyone who is sleeping on the suspend.
599 */
600 mutex_lock(&state_lock);
601 down_read(&user_list_lock);
602 list_for_each_entry(as, &apm_user_list, list) {
603 if (as->suspend_state == SUSPEND_ACKED) {
604 /*
605 * TODO: maybe grab error code, needs core
606 * changes to push the error to the notifier
607 * chain (could use the second parameter if
608 * implemented)
609 */
610 as->suspend_result = 0;
611 as->suspend_state = SUSPEND_DONE;
612 }
613 }
614 up_read(&user_list_lock);
615 mutex_unlock(&state_lock);
616
617 wake_up(&apm_suspend_waitqueue);
618 return NOTIFY_OK;
619
620 default:
621 return NOTIFY_DONE;
622 }
623 }
624
625 static struct notifier_block apm_notif_block = {
626 .notifier_call = apm_suspend_notifier,
627 };
628
apm_init(void)629 static int __init apm_init(void)
630 {
631 int ret;
632
633 if (apm_disabled) {
634 printk(KERN_NOTICE "apm: disabled on user request.\n");
635 return -ENODEV;
636 }
637
638 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
639 if (IS_ERR(kapmd_tsk)) {
640 ret = PTR_ERR(kapmd_tsk);
641 kapmd_tsk = NULL;
642 goto out;
643 }
644 wake_up_process(kapmd_tsk);
645
646 #ifdef CONFIG_PROC_FS
647 proc_create_single("apm", 0, NULL, proc_apm_show);
648 #endif
649
650 ret = misc_register(&apm_device);
651 if (ret)
652 goto out_stop;
653
654 ret = register_pm_notifier(&apm_notif_block);
655 if (ret)
656 goto out_unregister;
657
658 return 0;
659
660 out_unregister:
661 misc_deregister(&apm_device);
662 out_stop:
663 remove_proc_entry("apm", NULL);
664 kthread_stop(kapmd_tsk);
665 out:
666 return ret;
667 }
668
apm_exit(void)669 static void __exit apm_exit(void)
670 {
671 unregister_pm_notifier(&apm_notif_block);
672 misc_deregister(&apm_device);
673 remove_proc_entry("apm", NULL);
674
675 kthread_stop(kapmd_tsk);
676 }
677
678 module_init(apm_init);
679 module_exit(apm_exit);
680
681 MODULE_AUTHOR("Stephen Rothwell");
682 MODULE_DESCRIPTION("Advanced Power Management");
683 MODULE_LICENSE("GPL");
684
685 #ifndef MODULE
apm_setup(char * str)686 static int __init apm_setup(char *str)
687 {
688 while ((str != NULL) && (*str != '\0')) {
689 if (strncmp(str, "off", 3) == 0)
690 apm_disabled = 1;
691 if (strncmp(str, "on", 2) == 0)
692 apm_disabled = 0;
693 str = strchr(str, ',');
694 if (str != NULL)
695 str += strspn(str, ", \t");
696 }
697 return 1;
698 }
699
700 __setup("apm=", apm_setup);
701 #endif
702
703 /**
704 * apm_queue_event - queue an APM event for kapmd
705 * @event: APM event
706 *
707 * Queue an APM event for kapmd to process and ultimately take the
708 * appropriate action. Only a subset of events are handled:
709 * %APM_LOW_BATTERY
710 * %APM_POWER_STATUS_CHANGE
711 * %APM_USER_SUSPEND
712 * %APM_SYS_SUSPEND
713 * %APM_CRITICAL_SUSPEND
714 */
apm_queue_event(apm_event_t event)715 void apm_queue_event(apm_event_t event)
716 {
717 unsigned long flags;
718
719 spin_lock_irqsave(&kapmd_queue_lock, flags);
720 queue_add_event(&kapmd_queue, event);
721 spin_unlock_irqrestore(&kapmd_queue_lock, flags);
722
723 wake_up_interruptible(&kapmd_wait);
724 }
725 EXPORT_SYMBOL(apm_queue_event);
726