1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2011-2017, The Linux Foundation
4 */
5
6 #include <linux/irq.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/io.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/delay.h>
14 #include <linux/clk.h>
15 #include <linux/of.h>
16 #include <linux/pm_runtime.h>
17 #include "slimbus.h"
18
19 /* Manager registers */
20 #define MGR_CFG 0x200
21 #define MGR_STATUS 0x204
22 #define MGR_INT_EN 0x210
23 #define MGR_INT_STAT 0x214
24 #define MGR_INT_CLR 0x218
25 #define MGR_TX_MSG 0x230
26 #define MGR_RX_MSG 0x270
27 #define MGR_IE_STAT 0x2F0
28 #define MGR_VE_STAT 0x300
29 #define MGR_CFG_ENABLE 1
30
31 /* Framer registers */
32 #define FRM_CFG 0x400
33 #define FRM_STAT 0x404
34 #define FRM_INT_EN 0x410
35 #define FRM_INT_STAT 0x414
36 #define FRM_INT_CLR 0x418
37 #define FRM_WAKEUP 0x41C
38 #define FRM_CLKCTL_DONE 0x420
39 #define FRM_IE_STAT 0x430
40 #define FRM_VE_STAT 0x440
41
42 /* Interface registers */
43 #define INTF_CFG 0x600
44 #define INTF_STAT 0x604
45 #define INTF_INT_EN 0x610
46 #define INTF_INT_STAT 0x614
47 #define INTF_INT_CLR 0x618
48 #define INTF_IE_STAT 0x630
49 #define INTF_VE_STAT 0x640
50
51 /* Interrupt status bits */
52 #define MGR_INT_TX_NACKED_2 BIT(25)
53 #define MGR_INT_MSG_BUF_CONTE BIT(26)
54 #define MGR_INT_RX_MSG_RCVD BIT(30)
55 #define MGR_INT_TX_MSG_SENT BIT(31)
56
57 /* Framer config register settings */
58 #define FRM_ACTIVE 1
59 #define CLK_GEAR 7
60 #define ROOT_FREQ 11
61 #define REF_CLK_GEAR 15
62 #define INTR_WAKE 19
63
64 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
65 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
66
67 #define SLIM_ROOT_FREQ 24576000
68 #define QCOM_SLIM_AUTOSUSPEND 1000
69
70 /* MAX message size over control channel */
71 #define SLIM_MSGQ_BUF_LEN 40
72 #define QCOM_TX_MSGS 2
73 #define QCOM_RX_MSGS 8
74 #define QCOM_BUF_ALLOC_RETRIES 10
75
76 #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
77
78 /* V2 Component registers */
79 #define CFG_PORT_V2(r) ((r ## _V2))
80 #define COMP_CFG_V2 4
81 #define COMP_TRUST_CFG_V2 0x3000
82
83 /* V1 Component registers */
84 #define CFG_PORT_V1(r) ((r ## _V1))
85 #define COMP_CFG_V1 0
86 #define COMP_TRUST_CFG_V1 0x14
87
88 /* Resource group info for manager, and non-ported generic device-components */
89 #define EE_MGR_RSC_GRP (1 << 10)
90 #define EE_NGD_2 (2 << 6)
91 #define EE_NGD_1 0
92
93 struct slim_ctrl_buf {
94 void *base;
95 spinlock_t lock;
96 int head;
97 int tail;
98 int sl_sz;
99 int n;
100 };
101
102 struct qcom_slim_ctrl {
103 struct slim_controller ctrl;
104 struct slim_framer framer;
105 struct device *dev;
106 void __iomem *base;
107 void __iomem *slew_reg;
108
109 struct slim_ctrl_buf rx;
110 struct slim_ctrl_buf tx;
111
112 struct completion **wr_comp;
113 int irq;
114 struct workqueue_struct *rxwq;
115 struct work_struct wd;
116 struct clk *rclk;
117 struct clk *hclk;
118 };
119
qcom_slim_queue_tx(struct qcom_slim_ctrl * ctrl,void * buf,u8 len,u32 tx_reg)120 static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
121 u8 len, u32 tx_reg)
122 {
123 int count = (len + 3) >> 2;
124
125 __iowrite32_copy(ctrl->base + tx_reg, buf, count);
126
127 /* Ensure Oder of subsequent writes */
128 mb();
129 }
130
slim_alloc_rxbuf(struct qcom_slim_ctrl * ctrl)131 static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
132 {
133 unsigned long flags;
134 int idx;
135
136 spin_lock_irqsave(&ctrl->rx.lock, flags);
137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
138 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
139 dev_err(ctrl->dev, "RX QUEUE full!");
140 return NULL;
141 }
142 idx = ctrl->rx.tail;
143 ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
144 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
145
146 return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
147 }
148
slim_ack_txn(struct qcom_slim_ctrl * ctrl,int err)149 static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
150 {
151 struct completion *comp;
152 unsigned long flags;
153 int idx;
154
155 spin_lock_irqsave(&ctrl->tx.lock, flags);
156 idx = ctrl->tx.head;
157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
158 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
159
160 comp = ctrl->wr_comp[idx];
161 ctrl->wr_comp[idx] = NULL;
162
163 complete(comp);
164 }
165
qcom_slim_handle_tx_irq(struct qcom_slim_ctrl * ctrl,u32 stat)166 static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
167 u32 stat)
168 {
169 int err = 0;
170
171 if (stat & MGR_INT_TX_MSG_SENT)
172 writel_relaxed(MGR_INT_TX_MSG_SENT,
173 ctrl->base + MGR_INT_CLR);
174
175 if (stat & MGR_INT_TX_NACKED_2) {
176 u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
177 u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
178 u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
179 u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
180 u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
181 u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
182 u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
183 u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
184 u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
185
186 writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
187
188 dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
189 stat, mgr_stat);
190 dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
191 dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
192 frm_intr_stat, frm_stat);
193 dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
194 frm_cfg, frm_ie_stat);
195 dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
196 intf_intr_stat, intf_stat);
197 dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
198 intf_ie_stat);
199 err = -ENOTCONN;
200 }
201
202 slim_ack_txn(ctrl, err);
203
204 return IRQ_HANDLED;
205 }
206
qcom_slim_handle_rx_irq(struct qcom_slim_ctrl * ctrl,u32 stat)207 static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
208 u32 stat)
209 {
210 u32 *rx_buf, pkt[10];
211 bool q_rx = false;
212 u8 mc, mt, len;
213
214 pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
215 mt = SLIM_HEADER_GET_MT(pkt[0]);
216 len = SLIM_HEADER_GET_RL(pkt[0]);
217 mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
218
219 /*
220 * this message cannot be handled by ISR, so
221 * let work-queue handle it
222 */
223 if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
224 rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
225 if (!rx_buf) {
226 dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
227 pkt[0]);
228 goto rx_ret_irq;
229 }
230 rx_buf[0] = pkt[0];
231
232 } else {
233 rx_buf = pkt;
234 }
235
236 __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
237 DIV_ROUND_UP(len, 4));
238
239 switch (mc) {
240
241 case SLIM_MSG_MC_REPORT_PRESENT:
242 q_rx = true;
243 break;
244 case SLIM_MSG_MC_REPLY_INFORMATION:
245 case SLIM_MSG_MC_REPLY_VALUE:
246 slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
247 (u8)(*rx_buf >> 24), (len - 4));
248 break;
249 default:
250 dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
251 mc, mt);
252 break;
253 }
254 rx_ret_irq:
255 writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
256 MGR_INT_CLR);
257 if (q_rx)
258 queue_work(ctrl->rxwq, &ctrl->wd);
259
260 return IRQ_HANDLED;
261 }
262
qcom_slim_interrupt(int irq,void * d)263 static irqreturn_t qcom_slim_interrupt(int irq, void *d)
264 {
265 struct qcom_slim_ctrl *ctrl = d;
266 u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
267 int ret = IRQ_NONE;
268
269 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
270 ret = qcom_slim_handle_tx_irq(ctrl, stat);
271
272 if (stat & MGR_INT_RX_MSG_RCVD)
273 ret = qcom_slim_handle_rx_irq(ctrl, stat);
274
275 return ret;
276 }
277
qcom_clk_pause_wakeup(struct slim_controller * sctrl)278 static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
279 {
280 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
281
282 clk_prepare_enable(ctrl->hclk);
283 clk_prepare_enable(ctrl->rclk);
284 enable_irq(ctrl->irq);
285
286 writel_relaxed(1, ctrl->base + FRM_WAKEUP);
287 /* Make sure framer wakeup write goes through before ISR fires */
288 mb();
289 /*
290 * HW Workaround: Currently, slave is reporting lost-sync messages
291 * after SLIMbus comes out of clock pause.
292 * Transaction with slave fail before slave reports that message
293 * Give some time for that report to come
294 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
295 * being 250 usecs, we wait for 5-10 superframes here to ensure
296 * we get the message
297 */
298 usleep_range(1250, 2500);
299 return 0;
300 }
301
slim_alloc_txbuf(struct qcom_slim_ctrl * ctrl,struct slim_msg_txn * txn,struct completion * done)302 static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
303 struct slim_msg_txn *txn,
304 struct completion *done)
305 {
306 unsigned long flags;
307 int idx;
308
309 spin_lock_irqsave(&ctrl->tx.lock, flags);
310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
311 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
312 dev_err(ctrl->dev, "controller TX buf unavailable");
313 return NULL;
314 }
315 idx = ctrl->tx.tail;
316 ctrl->wr_comp[idx] = done;
317 ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
318
319 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
320
321 return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
322 }
323
324
qcom_xfer_msg(struct slim_controller * sctrl,struct slim_msg_txn * txn)325 static int qcom_xfer_msg(struct slim_controller *sctrl,
326 struct slim_msg_txn *txn)
327 {
328 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
329 DECLARE_COMPLETION_ONSTACK(done);
330 void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
331 unsigned long ms = txn->rl + HZ;
332 u8 *puc;
333 int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
334 u8 la = txn->la;
335 u32 *head;
336 /* HW expects length field to be excluded */
337 txn->rl--;
338
339 /* spin till buffer is made available */
340 if (!pbuf) {
341 while (retries--) {
342 usleep_range(10000, 15000);
343 pbuf = slim_alloc_txbuf(ctrl, txn, &done);
344 if (pbuf)
345 break;
346 }
347 }
348
349 if (retries < 0 && !pbuf)
350 return -ENOMEM;
351
352 puc = (u8 *)pbuf;
353 head = (u32 *)pbuf;
354
355 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
356 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
357 txn->mc, 0, la);
358 puc += 3;
359 } else {
360 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
361 txn->mc, 1, la);
362 puc += 2;
363 }
364
365 if (slim_tid_txn(txn->mt, txn->mc))
366 *(puc++) = txn->tid;
367
368 if (slim_ec_txn(txn->mt, txn->mc)) {
369 *(puc++) = (txn->ec & 0xFF);
370 *(puc++) = (txn->ec >> 8) & 0xFF;
371 }
372
373 if (txn->msg && txn->msg->wbuf)
374 memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
375
376 qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
377 timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
378
379 if (!timeout) {
380 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
381 txn->mt);
382 ret = -ETIMEDOUT;
383 }
384
385 return ret;
386
387 }
388
qcom_set_laddr(struct slim_controller * sctrl,struct slim_eaddr * ead,u8 laddr)389 static int qcom_set_laddr(struct slim_controller *sctrl,
390 struct slim_eaddr *ead, u8 laddr)
391 {
392 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
393 struct {
394 __be16 manf_id;
395 __be16 prod_code;
396 u8 dev_index;
397 u8 instance;
398 u8 laddr;
399 } __packed p;
400 struct slim_val_inf msg = {0};
401 DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
402 10, laddr, &msg);
403 int ret;
404
405 p.manf_id = cpu_to_be16(ead->manf_id);
406 p.prod_code = cpu_to_be16(ead->prod_code);
407 p.dev_index = ead->dev_index;
408 p.instance = ead->instance;
409 p.laddr = laddr;
410
411 msg.wbuf = (void *)&p;
412 msg.num_bytes = 7;
413 ret = slim_do_transfer(&ctrl->ctrl, &txn);
414
415 if (ret)
416 dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
417 laddr, ret);
418 return ret;
419 }
420
slim_get_current_rxbuf(struct qcom_slim_ctrl * ctrl,void * buf)421 static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
422 {
423 unsigned long flags;
424
425 spin_lock_irqsave(&ctrl->rx.lock, flags);
426 if (ctrl->rx.tail == ctrl->rx.head) {
427 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
428 return -ENODATA;
429 }
430 memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
431 ctrl->rx.sl_sz);
432
433 ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
434 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
435
436 return 0;
437 }
438
qcom_slim_rxwq(struct work_struct * work)439 static void qcom_slim_rxwq(struct work_struct *work)
440 {
441 u8 buf[SLIM_MSGQ_BUF_LEN];
442 u8 mc, mt;
443 int ret;
444 struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
445 wd);
446
447 while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
448 mt = SLIM_HEADER_GET_MT(buf[0]);
449 mc = SLIM_HEADER_GET_MC(buf[1]);
450 if (mt == SLIM_MSG_MT_CORE &&
451 mc == SLIM_MSG_MC_REPORT_PRESENT) {
452 struct slim_eaddr ea;
453 u8 laddr;
454
455 ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
456 ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
457 ea.dev_index = buf[6];
458 ea.instance = buf[7];
459
460 ret = slim_device_report_present(&ctrl->ctrl, &ea,
461 &laddr);
462 if (ret < 0)
463 dev_err(ctrl->dev, "assign laddr failed:%d\n",
464 ret);
465 } else {
466 dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
467 mc, mt);
468 }
469 }
470 }
471
qcom_slim_prg_slew(struct platform_device * pdev,struct qcom_slim_ctrl * ctrl)472 static void qcom_slim_prg_slew(struct platform_device *pdev,
473 struct qcom_slim_ctrl *ctrl)
474 {
475 struct resource *slew_mem;
476
477 if (!ctrl->slew_reg) {
478 /* SLEW RATE register for this SLIMbus */
479 slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
480 "slew");
481 ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start,
482 resource_size(slew_mem));
483 if (!ctrl->slew_reg)
484 return;
485 }
486
487 writel_relaxed(1, ctrl->slew_reg);
488 /* Make sure SLIMbus-slew rate enabling goes through */
489 wmb();
490 }
491
qcom_slim_probe(struct platform_device * pdev)492 static int qcom_slim_probe(struct platform_device *pdev)
493 {
494 struct qcom_slim_ctrl *ctrl;
495 struct slim_controller *sctrl;
496 struct resource *slim_mem;
497 int ret, ver;
498
499 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
500 if (!ctrl)
501 return -ENOMEM;
502
503 ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
504 if (IS_ERR(ctrl->hclk))
505 return PTR_ERR(ctrl->hclk);
506
507 ctrl->rclk = devm_clk_get(&pdev->dev, "core");
508 if (IS_ERR(ctrl->rclk))
509 return PTR_ERR(ctrl->rclk);
510
511 ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
512 if (ret) {
513 dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
514 return ret;
515 }
516
517 ctrl->irq = platform_get_irq(pdev, 0);
518 if (!ctrl->irq) {
519 dev_err(&pdev->dev, "no slimbus IRQ\n");
520 return -ENODEV;
521 }
522
523 sctrl = &ctrl->ctrl;
524 sctrl->dev = &pdev->dev;
525 ctrl->dev = &pdev->dev;
526 platform_set_drvdata(pdev, ctrl);
527 dev_set_drvdata(ctrl->dev, ctrl);
528
529 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
530 ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
531 if (IS_ERR(ctrl->base))
532 return PTR_ERR(ctrl->base);
533
534 sctrl->set_laddr = qcom_set_laddr;
535 sctrl->xfer_msg = qcom_xfer_msg;
536 sctrl->wakeup = qcom_clk_pause_wakeup;
537 ctrl->tx.n = QCOM_TX_MSGS;
538 ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
539 ctrl->rx.n = QCOM_RX_MSGS;
540 ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
541 ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
542 GFP_KERNEL);
543 if (!ctrl->wr_comp)
544 return -ENOMEM;
545
546 spin_lock_init(&ctrl->rx.lock);
547 spin_lock_init(&ctrl->tx.lock);
548 INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
549 ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
550 if (!ctrl->rxwq) {
551 dev_err(ctrl->dev, "Failed to start Rx WQ\n");
552 return -ENOMEM;
553 }
554
555 ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
556 ctrl->framer.superfreq =
557 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
558 sctrl->a_framer = &ctrl->framer;
559 sctrl->clkgear = SLIM_MAX_CLK_GEAR;
560
561 qcom_slim_prg_slew(pdev, ctrl);
562
563 ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
564 IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
565 if (ret) {
566 dev_err(&pdev->dev, "request IRQ failed\n");
567 goto err_request_irq_failed;
568 }
569
570 ret = clk_prepare_enable(ctrl->hclk);
571 if (ret)
572 goto err_hclk_enable_failed;
573
574 ret = clk_prepare_enable(ctrl->rclk);
575 if (ret)
576 goto err_rclk_enable_failed;
577
578 ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
579 GFP_KERNEL);
580 if (!ctrl->tx.base) {
581 ret = -ENOMEM;
582 goto err;
583 }
584
585 ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
586 GFP_KERNEL);
587 if (!ctrl->rx.base) {
588 ret = -ENOMEM;
589 goto err;
590 }
591
592 /* Register with framework before enabling frame, clock */
593 ret = slim_register_controller(&ctrl->ctrl);
594 if (ret) {
595 dev_err(ctrl->dev, "error adding controller\n");
596 goto err;
597 }
598
599 ver = readl_relaxed(ctrl->base);
600 /* Version info in 16 MSbits */
601 ver >>= 16;
602 /* Component register initialization */
603 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
604 writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
605 ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
606
607 writel((MGR_INT_TX_NACKED_2 |
608 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
609 MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
610 writel(1, ctrl->base + MGR_CFG);
611 /* Framer register initialization */
612 writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
613 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
614 ctrl->base + FRM_CFG);
615 writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
616 writel(1, ctrl->base + INTF_CFG);
617 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
618
619 pm_runtime_use_autosuspend(&pdev->dev);
620 pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
621 pm_runtime_set_active(&pdev->dev);
622 pm_runtime_mark_last_busy(&pdev->dev);
623 pm_runtime_enable(&pdev->dev);
624
625 dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
626 return 0;
627
628 err:
629 clk_disable_unprepare(ctrl->rclk);
630 err_rclk_enable_failed:
631 clk_disable_unprepare(ctrl->hclk);
632 err_hclk_enable_failed:
633 err_request_irq_failed:
634 destroy_workqueue(ctrl->rxwq);
635 return ret;
636 }
637
qcom_slim_remove(struct platform_device * pdev)638 static int qcom_slim_remove(struct platform_device *pdev)
639 {
640 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
641
642 pm_runtime_disable(&pdev->dev);
643 slim_unregister_controller(&ctrl->ctrl);
644 destroy_workqueue(ctrl->rxwq);
645 return 0;
646 }
647
648 /*
649 * If PM_RUNTIME is not defined, these 2 functions become helper
650 * functions to be called from system suspend/resume.
651 */
652 #ifdef CONFIG_PM
qcom_slim_runtime_suspend(struct device * device)653 static int qcom_slim_runtime_suspend(struct device *device)
654 {
655 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
656 int ret;
657
658 dev_dbg(device, "pm_runtime: suspending...\n");
659 ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
660 if (ret) {
661 dev_err(device, "clk pause not entered:%d", ret);
662 } else {
663 disable_irq(ctrl->irq);
664 clk_disable_unprepare(ctrl->hclk);
665 clk_disable_unprepare(ctrl->rclk);
666 }
667 return ret;
668 }
669
qcom_slim_runtime_resume(struct device * device)670 static int qcom_slim_runtime_resume(struct device *device)
671 {
672 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
673 int ret = 0;
674
675 dev_dbg(device, "pm_runtime: resuming...\n");
676 ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
677 if (ret)
678 dev_err(device, "clk pause not exited:%d", ret);
679 return ret;
680 }
681 #endif
682
683 #ifdef CONFIG_PM_SLEEP
qcom_slim_suspend(struct device * dev)684 static int qcom_slim_suspend(struct device *dev)
685 {
686 int ret = 0;
687
688 if (!pm_runtime_enabled(dev) ||
689 (!pm_runtime_suspended(dev))) {
690 dev_dbg(dev, "system suspend");
691 ret = qcom_slim_runtime_suspend(dev);
692 }
693
694 return ret;
695 }
696
qcom_slim_resume(struct device * dev)697 static int qcom_slim_resume(struct device *dev)
698 {
699 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
700 int ret;
701
702 dev_dbg(dev, "system resume");
703 ret = qcom_slim_runtime_resume(dev);
704 if (!ret) {
705 pm_runtime_mark_last_busy(dev);
706 pm_request_autosuspend(dev);
707 }
708 return ret;
709
710 }
711 return 0;
712 }
713 #endif /* CONFIG_PM_SLEEP */
714
715 static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
716 SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
717 SET_RUNTIME_PM_OPS(
718 qcom_slim_runtime_suspend,
719 qcom_slim_runtime_resume,
720 NULL
721 )
722 };
723
724 static const struct of_device_id qcom_slim_dt_match[] = {
725 { .compatible = "qcom,slim", },
726 { .compatible = "qcom,apq8064-slim", },
727 {}
728 };
729
730 static struct platform_driver qcom_slim_driver = {
731 .probe = qcom_slim_probe,
732 .remove = qcom_slim_remove,
733 .driver = {
734 .name = "qcom_slim_ctrl",
735 .of_match_table = qcom_slim_dt_match,
736 .pm = &qcom_slim_dev_pm_ops,
737 },
738 };
739 module_platform_driver(qcom_slim_driver);
740
741 MODULE_LICENSE("GPL v2");
742 MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
743