1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * OMAP mailbox driver
4 *
5 * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
6 * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
7 *
8 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
9 * Suman Anna <s-anna@ti.com>
10 */
11
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/kfifo.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/omap-mailbox.h>
23 #include <linux/mailbox_controller.h>
24 #include <linux/mailbox_client.h>
25
26 #include "mailbox.h"
27
28 #define MAILBOX_REVISION 0x000
29 #define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
30 #define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
31 #define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
32
33 #define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
34 #define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
35
36 #define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
37 #define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
38 #define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
39
40 #define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
41 OMAP2_MAILBOX_IRQSTATUS(u))
42 #define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
43 OMAP2_MAILBOX_IRQENABLE(u))
44 #define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
45 : OMAP2_MAILBOX_IRQENABLE(u))
46
47 #define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
48 #define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
49
50 /* Interrupt register configuration types */
51 #define MBOX_INTR_CFG_TYPE1 0
52 #define MBOX_INTR_CFG_TYPE2 1
53
54 struct omap_mbox_fifo {
55 unsigned long msg;
56 unsigned long fifo_stat;
57 unsigned long msg_stat;
58 unsigned long irqenable;
59 unsigned long irqstatus;
60 unsigned long irqdisable;
61 u32 intr_bit;
62 };
63
64 struct omap_mbox_queue {
65 spinlock_t lock;
66 struct kfifo fifo;
67 struct work_struct work;
68 struct omap_mbox *mbox;
69 bool full;
70 };
71
72 struct omap_mbox_match_data {
73 u32 intr_type;
74 };
75
76 struct omap_mbox_device {
77 struct device *dev;
78 struct mutex cfg_lock;
79 void __iomem *mbox_base;
80 u32 *irq_ctx;
81 u32 num_users;
82 u32 num_fifos;
83 u32 intr_type;
84 struct omap_mbox **mboxes;
85 struct mbox_controller controller;
86 struct list_head elem;
87 };
88
89 struct omap_mbox_fifo_info {
90 int tx_id;
91 int tx_usr;
92 int tx_irq;
93
94 int rx_id;
95 int rx_usr;
96 int rx_irq;
97
98 const char *name;
99 bool send_no_irq;
100 };
101
102 struct omap_mbox {
103 const char *name;
104 int irq;
105 struct omap_mbox_queue *rxq;
106 struct device *dev;
107 struct omap_mbox_device *parent;
108 struct omap_mbox_fifo tx_fifo;
109 struct omap_mbox_fifo rx_fifo;
110 u32 intr_type;
111 struct mbox_chan *chan;
112 bool send_no_irq;
113 };
114
115 /* global variables for the mailbox devices */
116 static DEFINE_MUTEX(omap_mbox_devices_lock);
117 static LIST_HEAD(omap_mbox_devices);
118
119 static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
120 module_param(mbox_kfifo_size, uint, S_IRUGO);
121 MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
122
mbox_chan_to_omap_mbox(struct mbox_chan * chan)123 static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
124 {
125 if (!chan || !chan->con_priv)
126 return NULL;
127
128 return (struct omap_mbox *)chan->con_priv;
129 }
130
131 static inline
mbox_read_reg(struct omap_mbox_device * mdev,size_t ofs)132 unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
133 {
134 return __raw_readl(mdev->mbox_base + ofs);
135 }
136
137 static inline
mbox_write_reg(struct omap_mbox_device * mdev,u32 val,size_t ofs)138 void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
139 {
140 __raw_writel(val, mdev->mbox_base + ofs);
141 }
142
143 /* Mailbox FIFO handle functions */
mbox_fifo_read(struct omap_mbox * mbox)144 static u32 mbox_fifo_read(struct omap_mbox *mbox)
145 {
146 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
147
148 return mbox_read_reg(mbox->parent, fifo->msg);
149 }
150
mbox_fifo_write(struct omap_mbox * mbox,u32 msg)151 static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
152 {
153 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
154
155 mbox_write_reg(mbox->parent, msg, fifo->msg);
156 }
157
mbox_fifo_empty(struct omap_mbox * mbox)158 static int mbox_fifo_empty(struct omap_mbox *mbox)
159 {
160 struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
161
162 return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
163 }
164
mbox_fifo_full(struct omap_mbox * mbox)165 static int mbox_fifo_full(struct omap_mbox *mbox)
166 {
167 struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
168
169 return mbox_read_reg(mbox->parent, fifo->fifo_stat);
170 }
171
172 /* Mailbox IRQ handle functions */
ack_mbox_irq(struct omap_mbox * mbox,omap_mbox_irq_t irq)173 static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
174 {
175 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
176 &mbox->tx_fifo : &mbox->rx_fifo;
177 u32 bit = fifo->intr_bit;
178 u32 irqstatus = fifo->irqstatus;
179
180 mbox_write_reg(mbox->parent, bit, irqstatus);
181
182 /* Flush posted write for irq status to avoid spurious interrupts */
183 mbox_read_reg(mbox->parent, irqstatus);
184 }
185
is_mbox_irq(struct omap_mbox * mbox,omap_mbox_irq_t irq)186 static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
187 {
188 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
189 &mbox->tx_fifo : &mbox->rx_fifo;
190 u32 bit = fifo->intr_bit;
191 u32 irqenable = fifo->irqenable;
192 u32 irqstatus = fifo->irqstatus;
193
194 u32 enable = mbox_read_reg(mbox->parent, irqenable);
195 u32 status = mbox_read_reg(mbox->parent, irqstatus);
196
197 return (int)(enable & status & bit);
198 }
199
_omap_mbox_enable_irq(struct omap_mbox * mbox,omap_mbox_irq_t irq)200 static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
201 {
202 u32 l;
203 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
204 &mbox->tx_fifo : &mbox->rx_fifo;
205 u32 bit = fifo->intr_bit;
206 u32 irqenable = fifo->irqenable;
207
208 l = mbox_read_reg(mbox->parent, irqenable);
209 l |= bit;
210 mbox_write_reg(mbox->parent, l, irqenable);
211 }
212
_omap_mbox_disable_irq(struct omap_mbox * mbox,omap_mbox_irq_t irq)213 static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
214 {
215 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
216 &mbox->tx_fifo : &mbox->rx_fifo;
217 u32 bit = fifo->intr_bit;
218 u32 irqdisable = fifo->irqdisable;
219
220 /*
221 * Read and update the interrupt configuration register for pre-OMAP4.
222 * OMAP4 and later SoCs have a dedicated interrupt disabling register.
223 */
224 if (!mbox->intr_type)
225 bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
226
227 mbox_write_reg(mbox->parent, bit, irqdisable);
228 }
229
omap_mbox_enable_irq(struct mbox_chan * chan,omap_mbox_irq_t irq)230 void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
231 {
232 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
233
234 if (WARN_ON(!mbox))
235 return;
236
237 _omap_mbox_enable_irq(mbox, irq);
238 }
239 EXPORT_SYMBOL(omap_mbox_enable_irq);
240
omap_mbox_disable_irq(struct mbox_chan * chan,omap_mbox_irq_t irq)241 void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
242 {
243 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
244
245 if (WARN_ON(!mbox))
246 return;
247
248 _omap_mbox_disable_irq(mbox, irq);
249 }
250 EXPORT_SYMBOL(omap_mbox_disable_irq);
251
252 /*
253 * Message receiver(workqueue)
254 */
mbox_rx_work(struct work_struct * work)255 static void mbox_rx_work(struct work_struct *work)
256 {
257 struct omap_mbox_queue *mq =
258 container_of(work, struct omap_mbox_queue, work);
259 mbox_msg_t data;
260 u32 msg;
261 int len;
262
263 while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
264 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
265 WARN_ON(len != sizeof(msg));
266 data = msg;
267
268 mbox_chan_received_data(mq->mbox->chan, (void *)data);
269 spin_lock_irq(&mq->lock);
270 if (mq->full) {
271 mq->full = false;
272 _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
273 }
274 spin_unlock_irq(&mq->lock);
275 }
276 }
277
278 /*
279 * Mailbox interrupt handler
280 */
__mbox_tx_interrupt(struct omap_mbox * mbox)281 static void __mbox_tx_interrupt(struct omap_mbox *mbox)
282 {
283 _omap_mbox_disable_irq(mbox, IRQ_TX);
284 ack_mbox_irq(mbox, IRQ_TX);
285 mbox_chan_txdone(mbox->chan, 0);
286 }
287
__mbox_rx_interrupt(struct omap_mbox * mbox)288 static void __mbox_rx_interrupt(struct omap_mbox *mbox)
289 {
290 struct omap_mbox_queue *mq = mbox->rxq;
291 u32 msg;
292 int len;
293
294 while (!mbox_fifo_empty(mbox)) {
295 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
296 _omap_mbox_disable_irq(mbox, IRQ_RX);
297 mq->full = true;
298 goto nomem;
299 }
300
301 msg = mbox_fifo_read(mbox);
302
303 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
304 WARN_ON(len != sizeof(msg));
305 }
306
307 /* no more messages in the fifo. clear IRQ source. */
308 ack_mbox_irq(mbox, IRQ_RX);
309 nomem:
310 schedule_work(&mbox->rxq->work);
311 }
312
mbox_interrupt(int irq,void * p)313 static irqreturn_t mbox_interrupt(int irq, void *p)
314 {
315 struct omap_mbox *mbox = p;
316
317 if (is_mbox_irq(mbox, IRQ_TX))
318 __mbox_tx_interrupt(mbox);
319
320 if (is_mbox_irq(mbox, IRQ_RX))
321 __mbox_rx_interrupt(mbox);
322
323 return IRQ_HANDLED;
324 }
325
mbox_queue_alloc(struct omap_mbox * mbox,void (* work)(struct work_struct *))326 static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
327 void (*work)(struct work_struct *))
328 {
329 struct omap_mbox_queue *mq;
330
331 if (!work)
332 return NULL;
333
334 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
335 if (!mq)
336 return NULL;
337
338 spin_lock_init(&mq->lock);
339
340 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
341 goto error;
342
343 INIT_WORK(&mq->work, work);
344 return mq;
345
346 error:
347 kfree(mq);
348 return NULL;
349 }
350
mbox_queue_free(struct omap_mbox_queue * q)351 static void mbox_queue_free(struct omap_mbox_queue *q)
352 {
353 kfifo_free(&q->fifo);
354 kfree(q);
355 }
356
omap_mbox_startup(struct omap_mbox * mbox)357 static int omap_mbox_startup(struct omap_mbox *mbox)
358 {
359 int ret = 0;
360 struct omap_mbox_queue *mq;
361
362 mq = mbox_queue_alloc(mbox, mbox_rx_work);
363 if (!mq)
364 return -ENOMEM;
365 mbox->rxq = mq;
366 mq->mbox = mbox;
367
368 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
369 mbox->name, mbox);
370 if (unlikely(ret)) {
371 pr_err("failed to register mailbox interrupt:%d\n", ret);
372 goto fail_request_irq;
373 }
374
375 if (mbox->send_no_irq)
376 mbox->chan->txdone_method = TXDONE_BY_ACK;
377
378 _omap_mbox_enable_irq(mbox, IRQ_RX);
379
380 return 0;
381
382 fail_request_irq:
383 mbox_queue_free(mbox->rxq);
384 return ret;
385 }
386
omap_mbox_fini(struct omap_mbox * mbox)387 static void omap_mbox_fini(struct omap_mbox *mbox)
388 {
389 _omap_mbox_disable_irq(mbox, IRQ_RX);
390 free_irq(mbox->irq, mbox);
391 flush_work(&mbox->rxq->work);
392 mbox_queue_free(mbox->rxq);
393 }
394
omap_mbox_device_find(struct omap_mbox_device * mdev,const char * mbox_name)395 static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
396 const char *mbox_name)
397 {
398 struct omap_mbox *_mbox, *mbox = NULL;
399 struct omap_mbox **mboxes = mdev->mboxes;
400 int i;
401
402 if (!mboxes)
403 return NULL;
404
405 for (i = 0; (_mbox = mboxes[i]); i++) {
406 if (!strcmp(_mbox->name, mbox_name)) {
407 mbox = _mbox;
408 break;
409 }
410 }
411 return mbox;
412 }
413
omap_mbox_request_channel(struct mbox_client * cl,const char * chan_name)414 struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
415 const char *chan_name)
416 {
417 struct device *dev = cl->dev;
418 struct omap_mbox *mbox = NULL;
419 struct omap_mbox_device *mdev;
420 struct mbox_chan *chan;
421 unsigned long flags;
422 int ret;
423
424 if (!dev)
425 return ERR_PTR(-ENODEV);
426
427 if (dev->of_node) {
428 pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
429 __func__);
430 return ERR_PTR(-ENODEV);
431 }
432
433 mutex_lock(&omap_mbox_devices_lock);
434 list_for_each_entry(mdev, &omap_mbox_devices, elem) {
435 mbox = omap_mbox_device_find(mdev, chan_name);
436 if (mbox)
437 break;
438 }
439 mutex_unlock(&omap_mbox_devices_lock);
440
441 if (!mbox || !mbox->chan)
442 return ERR_PTR(-ENOENT);
443
444 chan = mbox->chan;
445 spin_lock_irqsave(&chan->lock, flags);
446 chan->msg_free = 0;
447 chan->msg_count = 0;
448 chan->active_req = NULL;
449 chan->cl = cl;
450 init_completion(&chan->tx_complete);
451 spin_unlock_irqrestore(&chan->lock, flags);
452
453 ret = chan->mbox->ops->startup(chan);
454 if (ret) {
455 pr_err("Unable to startup the chan (%d)\n", ret);
456 mbox_free_channel(chan);
457 chan = ERR_PTR(ret);
458 }
459
460 return chan;
461 }
462 EXPORT_SYMBOL(omap_mbox_request_channel);
463
464 static struct class omap_mbox_class = { .name = "mbox", };
465
omap_mbox_register(struct omap_mbox_device * mdev)466 static int omap_mbox_register(struct omap_mbox_device *mdev)
467 {
468 int ret;
469 int i;
470 struct omap_mbox **mboxes;
471
472 if (!mdev || !mdev->mboxes)
473 return -EINVAL;
474
475 mboxes = mdev->mboxes;
476 for (i = 0; mboxes[i]; i++) {
477 struct omap_mbox *mbox = mboxes[i];
478
479 mbox->dev = device_create(&omap_mbox_class, mdev->dev,
480 0, mbox, "%s", mbox->name);
481 if (IS_ERR(mbox->dev)) {
482 ret = PTR_ERR(mbox->dev);
483 goto err_out;
484 }
485 }
486
487 mutex_lock(&omap_mbox_devices_lock);
488 list_add(&mdev->elem, &omap_mbox_devices);
489 mutex_unlock(&omap_mbox_devices_lock);
490
491 ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
492
493 err_out:
494 if (ret) {
495 while (i--)
496 device_unregister(mboxes[i]->dev);
497 }
498 return ret;
499 }
500
omap_mbox_unregister(struct omap_mbox_device * mdev)501 static int omap_mbox_unregister(struct omap_mbox_device *mdev)
502 {
503 int i;
504 struct omap_mbox **mboxes;
505
506 if (!mdev || !mdev->mboxes)
507 return -EINVAL;
508
509 mutex_lock(&omap_mbox_devices_lock);
510 list_del(&mdev->elem);
511 mutex_unlock(&omap_mbox_devices_lock);
512
513 mboxes = mdev->mboxes;
514 for (i = 0; mboxes[i]; i++)
515 device_unregister(mboxes[i]->dev);
516 return 0;
517 }
518
omap_mbox_chan_startup(struct mbox_chan * chan)519 static int omap_mbox_chan_startup(struct mbox_chan *chan)
520 {
521 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
522 struct omap_mbox_device *mdev = mbox->parent;
523 int ret = 0;
524
525 mutex_lock(&mdev->cfg_lock);
526 pm_runtime_get_sync(mdev->dev);
527 ret = omap_mbox_startup(mbox);
528 if (ret)
529 pm_runtime_put_sync(mdev->dev);
530 mutex_unlock(&mdev->cfg_lock);
531 return ret;
532 }
533
omap_mbox_chan_shutdown(struct mbox_chan * chan)534 static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
535 {
536 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
537 struct omap_mbox_device *mdev = mbox->parent;
538
539 mutex_lock(&mdev->cfg_lock);
540 omap_mbox_fini(mbox);
541 pm_runtime_put_sync(mdev->dev);
542 mutex_unlock(&mdev->cfg_lock);
543 }
544
omap_mbox_chan_send_noirq(struct omap_mbox * mbox,u32 msg)545 static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
546 {
547 int ret = -EBUSY;
548
549 if (!mbox_fifo_full(mbox)) {
550 _omap_mbox_enable_irq(mbox, IRQ_RX);
551 mbox_fifo_write(mbox, msg);
552 ret = 0;
553 _omap_mbox_disable_irq(mbox, IRQ_RX);
554
555 /* we must read and ack the interrupt directly from here */
556 mbox_fifo_read(mbox);
557 ack_mbox_irq(mbox, IRQ_RX);
558 }
559
560 return ret;
561 }
562
omap_mbox_chan_send(struct omap_mbox * mbox,u32 msg)563 static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
564 {
565 int ret = -EBUSY;
566
567 if (!mbox_fifo_full(mbox)) {
568 mbox_fifo_write(mbox, msg);
569 ret = 0;
570 }
571
572 /* always enable the interrupt */
573 _omap_mbox_enable_irq(mbox, IRQ_TX);
574 return ret;
575 }
576
omap_mbox_chan_send_data(struct mbox_chan * chan,void * data)577 static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
578 {
579 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
580 int ret;
581 u32 msg = omap_mbox_message(data);
582
583 if (!mbox)
584 return -EINVAL;
585
586 if (mbox->send_no_irq)
587 ret = omap_mbox_chan_send_noirq(mbox, msg);
588 else
589 ret = omap_mbox_chan_send(mbox, msg);
590
591 return ret;
592 }
593
594 static const struct mbox_chan_ops omap_mbox_chan_ops = {
595 .startup = omap_mbox_chan_startup,
596 .send_data = omap_mbox_chan_send_data,
597 .shutdown = omap_mbox_chan_shutdown,
598 };
599
600 #ifdef CONFIG_PM_SLEEP
omap_mbox_suspend(struct device * dev)601 static int omap_mbox_suspend(struct device *dev)
602 {
603 struct omap_mbox_device *mdev = dev_get_drvdata(dev);
604 u32 usr, fifo, reg;
605
606 if (pm_runtime_status_suspended(dev))
607 return 0;
608
609 for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
610 if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
611 dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
612 fifo);
613 return -EBUSY;
614 }
615 }
616
617 for (usr = 0; usr < mdev->num_users; usr++) {
618 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
619 mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
620 }
621
622 return 0;
623 }
624
omap_mbox_resume(struct device * dev)625 static int omap_mbox_resume(struct device *dev)
626 {
627 struct omap_mbox_device *mdev = dev_get_drvdata(dev);
628 u32 usr, reg;
629
630 if (pm_runtime_status_suspended(dev))
631 return 0;
632
633 for (usr = 0; usr < mdev->num_users; usr++) {
634 reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
635 mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
636 }
637
638 return 0;
639 }
640 #endif
641
642 static const struct dev_pm_ops omap_mbox_pm_ops = {
643 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
644 };
645
646 static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
647 static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
648
649 static const struct of_device_id omap_mailbox_of_match[] = {
650 {
651 .compatible = "ti,omap2-mailbox",
652 .data = &omap2_data,
653 },
654 {
655 .compatible = "ti,omap3-mailbox",
656 .data = &omap2_data,
657 },
658 {
659 .compatible = "ti,omap4-mailbox",
660 .data = &omap4_data,
661 },
662 {
663 .compatible = "ti,am654-mailbox",
664 .data = &omap4_data,
665 },
666 {
667 .compatible = "ti,am64-mailbox",
668 .data = &omap4_data,
669 },
670 {
671 /* end */
672 },
673 };
674 MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
675
omap_mbox_of_xlate(struct mbox_controller * controller,const struct of_phandle_args * sp)676 static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
677 const struct of_phandle_args *sp)
678 {
679 phandle phandle = sp->args[0];
680 struct device_node *node;
681 struct omap_mbox_device *mdev;
682 struct omap_mbox *mbox;
683
684 mdev = container_of(controller, struct omap_mbox_device, controller);
685 if (WARN_ON(!mdev))
686 return ERR_PTR(-EINVAL);
687
688 node = of_find_node_by_phandle(phandle);
689 if (!node) {
690 pr_err("%s: could not find node phandle 0x%x\n",
691 __func__, phandle);
692 return ERR_PTR(-ENODEV);
693 }
694
695 mbox = omap_mbox_device_find(mdev, node->name);
696 of_node_put(node);
697 return mbox ? mbox->chan : ERR_PTR(-ENOENT);
698 }
699
omap_mbox_probe(struct platform_device * pdev)700 static int omap_mbox_probe(struct platform_device *pdev)
701 {
702 struct resource *mem;
703 int ret;
704 struct mbox_chan *chnls;
705 struct omap_mbox **list, *mbox, *mboxblk;
706 struct omap_mbox_fifo_info *finfo, *finfoblk;
707 struct omap_mbox_device *mdev;
708 struct omap_mbox_fifo *fifo;
709 struct device_node *node = pdev->dev.of_node;
710 struct device_node *child;
711 const struct omap_mbox_match_data *match_data;
712 u32 intr_type, info_count;
713 u32 num_users, num_fifos;
714 u32 tmp[3];
715 u32 l;
716 int i;
717
718 if (!node) {
719 pr_err("%s: only DT-based devices are supported\n", __func__);
720 return -ENODEV;
721 }
722
723 match_data = of_device_get_match_data(&pdev->dev);
724 if (!match_data)
725 return -ENODEV;
726 intr_type = match_data->intr_type;
727
728 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
729 return -ENODEV;
730
731 if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
732 return -ENODEV;
733
734 info_count = of_get_available_child_count(node);
735 if (!info_count) {
736 dev_err(&pdev->dev, "no available mbox devices found\n");
737 return -ENODEV;
738 }
739
740 finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
741 GFP_KERNEL);
742 if (!finfoblk)
743 return -ENOMEM;
744
745 finfo = finfoblk;
746 child = NULL;
747 for (i = 0; i < info_count; i++, finfo++) {
748 child = of_get_next_available_child(node, child);
749 ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
750 ARRAY_SIZE(tmp));
751 if (ret)
752 return ret;
753 finfo->tx_id = tmp[0];
754 finfo->tx_irq = tmp[1];
755 finfo->tx_usr = tmp[2];
756
757 ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
758 ARRAY_SIZE(tmp));
759 if (ret)
760 return ret;
761 finfo->rx_id = tmp[0];
762 finfo->rx_irq = tmp[1];
763 finfo->rx_usr = tmp[2];
764
765 finfo->name = child->name;
766
767 if (of_find_property(child, "ti,mbox-send-noirq", NULL))
768 finfo->send_no_irq = true;
769
770 if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
771 finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
772 return -EINVAL;
773 }
774
775 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
776 if (!mdev)
777 return -ENOMEM;
778
779 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
780 mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
781 if (IS_ERR(mdev->mbox_base))
782 return PTR_ERR(mdev->mbox_base);
783
784 mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
785 GFP_KERNEL);
786 if (!mdev->irq_ctx)
787 return -ENOMEM;
788
789 /* allocate one extra for marking end of list */
790 list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
791 GFP_KERNEL);
792 if (!list)
793 return -ENOMEM;
794
795 chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
796 GFP_KERNEL);
797 if (!chnls)
798 return -ENOMEM;
799
800 mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
801 GFP_KERNEL);
802 if (!mboxblk)
803 return -ENOMEM;
804
805 mbox = mboxblk;
806 finfo = finfoblk;
807 for (i = 0; i < info_count; i++, finfo++) {
808 fifo = &mbox->tx_fifo;
809 fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
810 fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
811 fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
812 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
813 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
814 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
815
816 fifo = &mbox->rx_fifo;
817 fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
818 fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
819 fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
820 fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
821 fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
822 fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
823
824 mbox->send_no_irq = finfo->send_no_irq;
825 mbox->intr_type = intr_type;
826
827 mbox->parent = mdev;
828 mbox->name = finfo->name;
829 mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
830 if (mbox->irq < 0)
831 return mbox->irq;
832 mbox->chan = &chnls[i];
833 chnls[i].con_priv = mbox;
834 list[i] = mbox++;
835 }
836
837 mutex_init(&mdev->cfg_lock);
838 mdev->dev = &pdev->dev;
839 mdev->num_users = num_users;
840 mdev->num_fifos = num_fifos;
841 mdev->intr_type = intr_type;
842 mdev->mboxes = list;
843
844 /*
845 * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
846 * IRQ and is needed to run the Tx state machine
847 */
848 mdev->controller.txdone_irq = true;
849 mdev->controller.dev = mdev->dev;
850 mdev->controller.ops = &omap_mbox_chan_ops;
851 mdev->controller.chans = chnls;
852 mdev->controller.num_chans = info_count;
853 mdev->controller.of_xlate = omap_mbox_of_xlate;
854 ret = omap_mbox_register(mdev);
855 if (ret)
856 return ret;
857
858 platform_set_drvdata(pdev, mdev);
859 pm_runtime_enable(mdev->dev);
860
861 ret = pm_runtime_get_sync(mdev->dev);
862 if (ret < 0) {
863 pm_runtime_put_noidle(mdev->dev);
864 goto unregister;
865 }
866
867 /*
868 * just print the raw revision register, the format is not
869 * uniform across all SoCs
870 */
871 l = mbox_read_reg(mdev, MAILBOX_REVISION);
872 dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
873
874 ret = pm_runtime_put_sync(mdev->dev);
875 if (ret < 0 && ret != -ENOSYS)
876 goto unregister;
877
878 devm_kfree(&pdev->dev, finfoblk);
879 return 0;
880
881 unregister:
882 pm_runtime_disable(mdev->dev);
883 omap_mbox_unregister(mdev);
884 return ret;
885 }
886
omap_mbox_remove(struct platform_device * pdev)887 static int omap_mbox_remove(struct platform_device *pdev)
888 {
889 struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
890
891 pm_runtime_disable(mdev->dev);
892 omap_mbox_unregister(mdev);
893
894 return 0;
895 }
896
897 static struct platform_driver omap_mbox_driver = {
898 .probe = omap_mbox_probe,
899 .remove = omap_mbox_remove,
900 .driver = {
901 .name = "omap-mailbox",
902 .pm = &omap_mbox_pm_ops,
903 .of_match_table = of_match_ptr(omap_mailbox_of_match),
904 },
905 };
906
omap_mbox_init(void)907 static int __init omap_mbox_init(void)
908 {
909 int err;
910
911 err = class_register(&omap_mbox_class);
912 if (err)
913 return err;
914
915 /* kfifo size sanity check: alignment and minimal size */
916 mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
917 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
918
919 err = platform_driver_register(&omap_mbox_driver);
920 if (err)
921 class_unregister(&omap_mbox_class);
922
923 return err;
924 }
925 subsys_initcall(omap_mbox_init);
926
omap_mbox_exit(void)927 static void __exit omap_mbox_exit(void)
928 {
929 platform_driver_unregister(&omap_mbox_driver);
930 class_unregister(&omap_mbox_class);
931 }
932 module_exit(omap_mbox_exit);
933
934 MODULE_LICENSE("GPL v2");
935 MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
936 MODULE_AUTHOR("Toshihiro Kobayashi");
937 MODULE_AUTHOR("Hiroshi DOYU");
938