1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
3
4 #include <linux/acpi.h>
5 #include <linux/clk.h>
6 #include <linux/dmaengine.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dma/qcom-gpi-dma.h>
9 #include <linux/err.h>
10 #include <linux/i2c.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/qcom-geni-se.h>
18 #include <linux/spinlock.h>
19
20 #define SE_I2C_TX_TRANS_LEN 0x26c
21 #define SE_I2C_RX_TRANS_LEN 0x270
22 #define SE_I2C_SCL_COUNTERS 0x278
23
24 #define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
25 M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
26 #define SE_I2C_ABORT BIT(1)
27
28 /* M_CMD OP codes for I2C */
29 #define I2C_WRITE 0x1
30 #define I2C_READ 0x2
31 #define I2C_WRITE_READ 0x3
32 #define I2C_ADDR_ONLY 0x4
33 #define I2C_BUS_CLEAR 0x6
34 #define I2C_STOP_ON_BUS 0x7
35 /* M_CMD params for I2C */
36 #define PRE_CMD_DELAY BIT(0)
37 #define TIMESTAMP_BEFORE BIT(1)
38 #define STOP_STRETCH BIT(2)
39 #define TIMESTAMP_AFTER BIT(3)
40 #define POST_COMMAND_DELAY BIT(4)
41 #define IGNORE_ADD_NACK BIT(6)
42 #define READ_FINISHED_WITH_ACK BIT(7)
43 #define BYPASS_ADDR_PHASE BIT(8)
44 #define SLV_ADDR_MSK GENMASK(15, 9)
45 #define SLV_ADDR_SHFT 9
46 /* I2C SCL COUNTER fields */
47 #define HIGH_COUNTER_MSK GENMASK(29, 20)
48 #define HIGH_COUNTER_SHFT 20
49 #define LOW_COUNTER_MSK GENMASK(19, 10)
50 #define LOW_COUNTER_SHFT 10
51 #define CYCLE_COUNTER_MSK GENMASK(9, 0)
52
53 #define I2C_PACK_TX BIT(0)
54 #define I2C_PACK_RX BIT(1)
55
56 enum geni_i2c_err_code {
57 GP_IRQ0,
58 NACK,
59 GP_IRQ2,
60 BUS_PROTO,
61 ARB_LOST,
62 GP_IRQ5,
63 GENI_OVERRUN,
64 GENI_ILLEGAL_CMD,
65 GENI_ABORT_DONE,
66 GENI_TIMEOUT,
67 };
68
69 #define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \
70 << 5)
71
72 #define I2C_AUTO_SUSPEND_DELAY 250
73 #define KHZ(freq) (1000 * freq)
74 #define PACKING_BYTES_PW 4
75
76 #define ABORT_TIMEOUT HZ
77 #define XFER_TIMEOUT HZ
78 #define RST_TIMEOUT HZ
79
80 struct geni_i2c_dev {
81 struct geni_se se;
82 u32 tx_wm;
83 int irq;
84 int err;
85 struct i2c_adapter adap;
86 struct completion done;
87 struct i2c_msg *cur;
88 int cur_wr;
89 int cur_rd;
90 spinlock_t lock;
91 u32 clk_freq_out;
92 const struct geni_i2c_clk_fld *clk_fld;
93 int suspended;
94 void *dma_buf;
95 size_t xfer_len;
96 dma_addr_t dma_addr;
97 struct dma_chan *tx_c;
98 struct dma_chan *rx_c;
99 bool gpi_mode;
100 bool abort_done;
101 };
102
103 struct geni_i2c_err_log {
104 int err;
105 const char *msg;
106 };
107
108 static const struct geni_i2c_err_log gi2c_log[] = {
109 [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
110 [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
111 [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
112 [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
113 [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
114 [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
115 [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
116 [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"},
117 [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
118 [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
119 };
120
121 struct geni_i2c_clk_fld {
122 u32 clk_freq_out;
123 u8 clk_div;
124 u8 t_high_cnt;
125 u8 t_low_cnt;
126 u8 t_cycle_cnt;
127 };
128
129 /*
130 * Hardware uses the underlying formula to calculate time periods of
131 * SCL clock cycle. Firmware uses some additional cycles excluded from the
132 * below formula and it is confirmed that the time periods are within
133 * specification limits.
134 *
135 * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock
136 * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock
137 * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock
138 * clk_freq_out = t / t_cycle
139 * source_clock = 19.2 MHz
140 */
141 static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
142 {KHZ(100), 7, 10, 11, 26},
143 {KHZ(400), 2, 5, 12, 24},
144 {KHZ(1000), 1, 3, 9, 18},
145 };
146
geni_i2c_clk_map_idx(struct geni_i2c_dev * gi2c)147 static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
148 {
149 int i;
150 const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
151
152 for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
153 if (itr->clk_freq_out == gi2c->clk_freq_out) {
154 gi2c->clk_fld = itr;
155 return 0;
156 }
157 }
158 return -EINVAL;
159 }
160
qcom_geni_i2c_conf(struct geni_i2c_dev * gi2c)161 static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c)
162 {
163 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
164 u32 val;
165
166 writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
167
168 val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN;
169 writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
170
171 val = itr->t_high_cnt << HIGH_COUNTER_SHFT;
172 val |= itr->t_low_cnt << LOW_COUNTER_SHFT;
173 val |= itr->t_cycle_cnt;
174 writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
175 }
176
geni_i2c_err_misc(struct geni_i2c_dev * gi2c)177 static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c)
178 {
179 u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
180 u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
181 u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
182 u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
183 u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
184 u32 rx_st, tx_st;
185
186 if (dma) {
187 rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
188 tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
189 } else {
190 rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
191 tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
192 }
193 dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
194 dma, tx_st, rx_st, m_stat);
195 dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
196 m_cmd, geni_s, geni_ios);
197 }
198
geni_i2c_err(struct geni_i2c_dev * gi2c,int err)199 static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
200 {
201 if (!gi2c->err)
202 gi2c->err = gi2c_log[err].err;
203 if (gi2c->cur)
204 dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
205 gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
206
207 switch (err) {
208 case GENI_ABORT_DONE:
209 gi2c->abort_done = true;
210 break;
211 case NACK:
212 case GENI_TIMEOUT:
213 dev_dbg(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
214 break;
215 default:
216 dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
217 geni_i2c_err_misc(gi2c);
218 break;
219 }
220 }
221
geni_i2c_irq(int irq,void * dev)222 static irqreturn_t geni_i2c_irq(int irq, void *dev)
223 {
224 struct geni_i2c_dev *gi2c = dev;
225 void __iomem *base = gi2c->se.base;
226 int j, p;
227 u32 m_stat;
228 u32 rx_st;
229 u32 dm_tx_st;
230 u32 dm_rx_st;
231 u32 dma;
232 u32 val;
233 struct i2c_msg *cur;
234
235 spin_lock(&gi2c->lock);
236 m_stat = readl_relaxed(base + SE_GENI_M_IRQ_STATUS);
237 rx_st = readl_relaxed(base + SE_GENI_RX_FIFO_STATUS);
238 dm_tx_st = readl_relaxed(base + SE_DMA_TX_IRQ_STAT);
239 dm_rx_st = readl_relaxed(base + SE_DMA_RX_IRQ_STAT);
240 dma = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
241 cur = gi2c->cur;
242
243 if (!cur ||
244 m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) ||
245 dm_rx_st & (DM_I2C_CB_ERR)) {
246 if (m_stat & M_GP_IRQ_1_EN)
247 geni_i2c_err(gi2c, NACK);
248 if (m_stat & M_GP_IRQ_3_EN)
249 geni_i2c_err(gi2c, BUS_PROTO);
250 if (m_stat & M_GP_IRQ_4_EN)
251 geni_i2c_err(gi2c, ARB_LOST);
252 if (m_stat & M_CMD_OVERRUN_EN)
253 geni_i2c_err(gi2c, GENI_OVERRUN);
254 if (m_stat & M_ILLEGAL_CMD_EN)
255 geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
256 if (m_stat & M_CMD_ABORT_EN)
257 geni_i2c_err(gi2c, GENI_ABORT_DONE);
258 if (m_stat & M_GP_IRQ_0_EN)
259 geni_i2c_err(gi2c, GP_IRQ0);
260
261 /* Disable the TX Watermark interrupt to stop TX */
262 if (!dma)
263 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
264 } else if (dma) {
265 dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
266 dm_tx_st, dm_rx_st);
267 } else if (cur->flags & I2C_M_RD &&
268 m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) {
269 u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
270
271 for (j = 0; j < rxcnt; j++) {
272 p = 0;
273 val = readl_relaxed(base + SE_GENI_RX_FIFOn);
274 while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
275 cur->buf[gi2c->cur_rd++] = val & 0xff;
276 val >>= 8;
277 p++;
278 }
279 if (gi2c->cur_rd == cur->len)
280 break;
281 }
282 } else if (!(cur->flags & I2C_M_RD) &&
283 m_stat & M_TX_FIFO_WATERMARK_EN) {
284 for (j = 0; j < gi2c->tx_wm; j++) {
285 u32 temp;
286
287 val = 0;
288 p = 0;
289 while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
290 temp = cur->buf[gi2c->cur_wr++];
291 val |= temp << (p * 8);
292 p++;
293 }
294 writel_relaxed(val, base + SE_GENI_TX_FIFOn);
295 /* TX Complete, Disable the TX Watermark interrupt */
296 if (gi2c->cur_wr == cur->len) {
297 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
298 break;
299 }
300 }
301 }
302
303 if (m_stat)
304 writel_relaxed(m_stat, base + SE_GENI_M_IRQ_CLEAR);
305
306 if (dma && dm_tx_st)
307 writel_relaxed(dm_tx_st, base + SE_DMA_TX_IRQ_CLR);
308 if (dma && dm_rx_st)
309 writel_relaxed(dm_rx_st, base + SE_DMA_RX_IRQ_CLR);
310
311 /* if this is err with done-bit not set, handle that through timeout. */
312 if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN ||
313 dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE ||
314 dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
315 complete(&gi2c->done);
316
317 spin_unlock(&gi2c->lock);
318
319 return IRQ_HANDLED;
320 }
321
geni_i2c_abort_xfer(struct geni_i2c_dev * gi2c)322 static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
323 {
324 unsigned long time_left = ABORT_TIMEOUT;
325 unsigned long flags;
326
327 spin_lock_irqsave(&gi2c->lock, flags);
328 geni_i2c_err(gi2c, GENI_TIMEOUT);
329 gi2c->cur = NULL;
330 gi2c->abort_done = false;
331 geni_se_abort_m_cmd(&gi2c->se);
332 spin_unlock_irqrestore(&gi2c->lock, flags);
333
334 do {
335 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
336 } while (!gi2c->abort_done && time_left);
337
338 if (!time_left)
339 dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
340 }
341
geni_i2c_rx_fsm_rst(struct geni_i2c_dev * gi2c)342 static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c)
343 {
344 u32 val;
345 unsigned long time_left = RST_TIMEOUT;
346
347 writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
348 do {
349 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
350 val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
351 } while (!(val & RX_RESET_DONE) && time_left);
352
353 if (!(val & RX_RESET_DONE))
354 dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
355 }
356
geni_i2c_tx_fsm_rst(struct geni_i2c_dev * gi2c)357 static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
358 {
359 u32 val;
360 unsigned long time_left = RST_TIMEOUT;
361
362 writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
363 do {
364 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
365 val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
366 } while (!(val & TX_RESET_DONE) && time_left);
367
368 if (!(val & TX_RESET_DONE))
369 dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
370 }
371
geni_i2c_rx_msg_cleanup(struct geni_i2c_dev * gi2c,struct i2c_msg * cur)372 static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
373 struct i2c_msg *cur)
374 {
375 gi2c->cur_rd = 0;
376 if (gi2c->dma_buf) {
377 if (gi2c->err)
378 geni_i2c_rx_fsm_rst(gi2c);
379 geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
380 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
381 }
382 }
383
geni_i2c_tx_msg_cleanup(struct geni_i2c_dev * gi2c,struct i2c_msg * cur)384 static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
385 struct i2c_msg *cur)
386 {
387 gi2c->cur_wr = 0;
388 if (gi2c->dma_buf) {
389 if (gi2c->err)
390 geni_i2c_tx_fsm_rst(gi2c);
391 geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
392 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
393 }
394 }
395
geni_i2c_rx_one_msg(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,u32 m_param)396 static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
397 u32 m_param)
398 {
399 dma_addr_t rx_dma = 0;
400 unsigned long time_left;
401 void *dma_buf;
402 struct geni_se *se = &gi2c->se;
403 size_t len = msg->len;
404 struct i2c_msg *cur;
405
406 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
407 if (dma_buf)
408 geni_se_select_mode(se, GENI_SE_DMA);
409 else
410 geni_se_select_mode(se, GENI_SE_FIFO);
411
412 writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
413 geni_se_setup_m_cmd(se, I2C_READ, m_param);
414
415 if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
416 geni_se_select_mode(se, GENI_SE_FIFO);
417 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
418 dma_buf = NULL;
419 } else {
420 gi2c->xfer_len = len;
421 gi2c->dma_addr = rx_dma;
422 gi2c->dma_buf = dma_buf;
423 }
424
425 cur = gi2c->cur;
426 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
427 if (!time_left)
428 geni_i2c_abort_xfer(gi2c);
429
430 geni_i2c_rx_msg_cleanup(gi2c, cur);
431
432 return gi2c->err;
433 }
434
geni_i2c_tx_one_msg(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,u32 m_param)435 static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
436 u32 m_param)
437 {
438 dma_addr_t tx_dma = 0;
439 unsigned long time_left;
440 void *dma_buf;
441 struct geni_se *se = &gi2c->se;
442 size_t len = msg->len;
443 struct i2c_msg *cur;
444
445 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
446 if (dma_buf)
447 geni_se_select_mode(se, GENI_SE_DMA);
448 else
449 geni_se_select_mode(se, GENI_SE_FIFO);
450
451 writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
452 geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
453
454 if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
455 geni_se_select_mode(se, GENI_SE_FIFO);
456 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
457 dma_buf = NULL;
458 } else {
459 gi2c->xfer_len = len;
460 gi2c->dma_addr = tx_dma;
461 gi2c->dma_buf = dma_buf;
462 }
463
464 if (!dma_buf) /* Get FIFO IRQ */
465 writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
466
467 cur = gi2c->cur;
468 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
469 if (!time_left)
470 geni_i2c_abort_xfer(gi2c);
471
472 geni_i2c_tx_msg_cleanup(gi2c, cur);
473
474 return gi2c->err;
475 }
476
i2c_gpi_cb_result(void * cb,const struct dmaengine_result * result)477 static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
478 {
479 struct geni_i2c_dev *gi2c = cb;
480
481 if (result->result != DMA_TRANS_NOERROR) {
482 dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
483 gi2c->err = -EIO;
484 } else if (result->residue) {
485 dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
486 }
487
488 complete(&gi2c->done);
489 }
490
geni_i2c_gpi_unmap(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,void * tx_buf,dma_addr_t tx_addr,void * rx_buf,dma_addr_t rx_addr)491 static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
492 void *tx_buf, dma_addr_t tx_addr,
493 void *rx_buf, dma_addr_t rx_addr)
494 {
495 if (tx_buf) {
496 dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
497 i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
498 }
499
500 if (rx_buf) {
501 dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
502 i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
503 }
504 }
505
geni_i2c_gpi(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,struct dma_slave_config * config,dma_addr_t * dma_addr_p,void ** buf,unsigned int op,struct dma_chan * dma_chan)506 static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
507 struct dma_slave_config *config, dma_addr_t *dma_addr_p,
508 void **buf, unsigned int op, struct dma_chan *dma_chan)
509 {
510 struct gpi_i2c_config *peripheral;
511 unsigned int flags;
512 void *dma_buf;
513 dma_addr_t addr;
514 enum dma_data_direction map_dirn;
515 enum dma_transfer_direction dma_dirn;
516 struct dma_async_tx_descriptor *desc;
517 int ret;
518
519 peripheral = config->peripheral_config;
520
521 dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
522 if (!dma_buf)
523 return -ENOMEM;
524
525 if (op == I2C_WRITE)
526 map_dirn = DMA_TO_DEVICE;
527 else
528 map_dirn = DMA_FROM_DEVICE;
529
530 addr = dma_map_single(gi2c->se.dev->parent, dma_buf, msg->len, map_dirn);
531 if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
532 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
533 return -ENOMEM;
534 }
535
536 /* set the length as message for rx txn */
537 peripheral->rx_len = msg->len;
538 peripheral->op = op;
539
540 ret = dmaengine_slave_config(dma_chan, config);
541 if (ret) {
542 dev_err(gi2c->se.dev, "dma config error: %d for op:%d\n", ret, op);
543 goto err_config;
544 }
545
546 peripheral->set_config = 0;
547 peripheral->multi_msg = true;
548 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
549
550 if (op == I2C_WRITE)
551 dma_dirn = DMA_MEM_TO_DEV;
552 else
553 dma_dirn = DMA_DEV_TO_MEM;
554
555 desc = dmaengine_prep_slave_single(dma_chan, addr, msg->len, dma_dirn, flags);
556 if (!desc) {
557 dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
558 ret = -EIO;
559 goto err_config;
560 }
561
562 desc->callback_result = i2c_gpi_cb_result;
563 desc->callback_param = gi2c;
564
565 dmaengine_submit(desc);
566 *buf = dma_buf;
567 *dma_addr_p = addr;
568
569 return 0;
570
571 err_config:
572 dma_unmap_single(gi2c->se.dev->parent, addr, msg->len, map_dirn);
573 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
574 return ret;
575 }
576
geni_i2c_gpi_xfer(struct geni_i2c_dev * gi2c,struct i2c_msg msgs[],int num)577 static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], int num)
578 {
579 struct dma_slave_config config = {};
580 struct gpi_i2c_config peripheral = {};
581 int i, ret = 0, timeout;
582 dma_addr_t tx_addr, rx_addr;
583 void *tx_buf = NULL, *rx_buf = NULL;
584 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
585
586 config.peripheral_config = &peripheral;
587 config.peripheral_size = sizeof(peripheral);
588
589 peripheral.pack_enable = I2C_PACK_TX | I2C_PACK_RX;
590 peripheral.cycle_count = itr->t_cycle_cnt;
591 peripheral.high_count = itr->t_high_cnt;
592 peripheral.low_count = itr->t_low_cnt;
593 peripheral.clk_div = itr->clk_div;
594 peripheral.set_config = 1;
595 peripheral.multi_msg = false;
596
597 for (i = 0; i < num; i++) {
598 gi2c->cur = &msgs[i];
599 gi2c->err = 0;
600 dev_dbg(gi2c->se.dev, "msg[%d].len:%d\n", i, gi2c->cur->len);
601
602 peripheral.stretch = 0;
603 if (i < num - 1)
604 peripheral.stretch = 1;
605
606 peripheral.addr = msgs[i].addr;
607
608 if (msgs[i].flags & I2C_M_RD) {
609 ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
610 &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
611 if (ret)
612 goto err;
613 }
614
615 ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
616 &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
617 if (ret)
618 goto err;
619
620 if (msgs[i].flags & I2C_M_RD)
621 dma_async_issue_pending(gi2c->rx_c);
622 dma_async_issue_pending(gi2c->tx_c);
623
624 timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
625 if (!timeout) {
626 dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n",
627 gi2c->cur->flags, gi2c->cur->addr);
628 gi2c->err = -ETIMEDOUT;
629 }
630
631 if (gi2c->err) {
632 ret = gi2c->err;
633 goto err;
634 }
635
636 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
637 }
638
639 return num;
640
641 err:
642 dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
643 dmaengine_terminate_sync(gi2c->rx_c);
644 dmaengine_terminate_sync(gi2c->tx_c);
645 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
646 return ret;
647 }
648
geni_i2c_fifo_xfer(struct geni_i2c_dev * gi2c,struct i2c_msg msgs[],int num)649 static int geni_i2c_fifo_xfer(struct geni_i2c_dev *gi2c,
650 struct i2c_msg msgs[], int num)
651 {
652 int i, ret = 0;
653
654 for (i = 0; i < num; i++) {
655 u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
656
657 m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK);
658
659 gi2c->cur = &msgs[i];
660 if (msgs[i].flags & I2C_M_RD)
661 ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param);
662 else
663 ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param);
664
665 if (ret)
666 return ret;
667 }
668
669 return num;
670 }
671
geni_i2c_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)672 static int geni_i2c_xfer(struct i2c_adapter *adap,
673 struct i2c_msg msgs[],
674 int num)
675 {
676 struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
677 int ret;
678
679 gi2c->err = 0;
680 reinit_completion(&gi2c->done);
681 ret = pm_runtime_get_sync(gi2c->se.dev);
682 if (ret < 0) {
683 dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
684 pm_runtime_put_noidle(gi2c->se.dev);
685 /* Set device in suspended since resume failed */
686 pm_runtime_set_suspended(gi2c->se.dev);
687 return ret;
688 }
689
690 qcom_geni_i2c_conf(gi2c);
691
692 if (gi2c->gpi_mode)
693 ret = geni_i2c_gpi_xfer(gi2c, msgs, num);
694 else
695 ret = geni_i2c_fifo_xfer(gi2c, msgs, num);
696
697 pm_runtime_mark_last_busy(gi2c->se.dev);
698 pm_runtime_put_autosuspend(gi2c->se.dev);
699 gi2c->cur = NULL;
700 gi2c->err = 0;
701 return ret;
702 }
703
geni_i2c_func(struct i2c_adapter * adap)704 static u32 geni_i2c_func(struct i2c_adapter *adap)
705 {
706 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
707 }
708
709 static const struct i2c_algorithm geni_i2c_algo = {
710 .master_xfer = geni_i2c_xfer,
711 .functionality = geni_i2c_func,
712 };
713
714 #ifdef CONFIG_ACPI
715 static const struct acpi_device_id geni_i2c_acpi_match[] = {
716 { "QCOM0220"},
717 { },
718 };
719 MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
720 #endif
721
release_gpi_dma(struct geni_i2c_dev * gi2c)722 static void release_gpi_dma(struct geni_i2c_dev *gi2c)
723 {
724 if (gi2c->rx_c)
725 dma_release_channel(gi2c->rx_c);
726
727 if (gi2c->tx_c)
728 dma_release_channel(gi2c->tx_c);
729 }
730
setup_gpi_dma(struct geni_i2c_dev * gi2c)731 static int setup_gpi_dma(struct geni_i2c_dev *gi2c)
732 {
733 int ret;
734
735 geni_se_select_mode(&gi2c->se, GENI_GPI_DMA);
736 gi2c->tx_c = dma_request_chan(gi2c->se.dev, "tx");
737 if (IS_ERR(gi2c->tx_c)) {
738 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c),
739 "Failed to get tx DMA ch\n");
740 goto err_tx;
741 }
742
743 gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx");
744 if (IS_ERR(gi2c->rx_c)) {
745 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c),
746 "Failed to get rx DMA ch\n");
747 goto err_rx;
748 }
749
750 dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n");
751 return 0;
752
753 err_rx:
754 dma_release_channel(gi2c->tx_c);
755 err_tx:
756 return ret;
757 }
758
geni_i2c_probe(struct platform_device * pdev)759 static int geni_i2c_probe(struct platform_device *pdev)
760 {
761 struct geni_i2c_dev *gi2c;
762 struct resource *res;
763 u32 proto, tx_depth, fifo_disable;
764 int ret;
765 struct device *dev = &pdev->dev;
766
767 gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
768 if (!gi2c)
769 return -ENOMEM;
770
771 gi2c->se.dev = dev;
772 gi2c->se.wrapper = dev_get_drvdata(dev->parent);
773 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
774 gi2c->se.base = devm_ioremap_resource(dev, res);
775 if (IS_ERR(gi2c->se.base))
776 return PTR_ERR(gi2c->se.base);
777
778 gi2c->se.clk = devm_clk_get(dev, "se");
779 if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
780 return PTR_ERR(gi2c->se.clk);
781
782 ret = device_property_read_u32(dev, "clock-frequency",
783 &gi2c->clk_freq_out);
784 if (ret) {
785 dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
786 gi2c->clk_freq_out = KHZ(100);
787 }
788
789 if (has_acpi_companion(dev))
790 ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev));
791
792 gi2c->irq = platform_get_irq(pdev, 0);
793 if (gi2c->irq < 0)
794 return gi2c->irq;
795
796 ret = geni_i2c_clk_map_idx(gi2c);
797 if (ret) {
798 dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
799 gi2c->clk_freq_out, ret);
800 return ret;
801 }
802
803 gi2c->adap.algo = &geni_i2c_algo;
804 init_completion(&gi2c->done);
805 spin_lock_init(&gi2c->lock);
806 platform_set_drvdata(pdev, gi2c);
807 ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
808 dev_name(dev), gi2c);
809 if (ret) {
810 dev_err(dev, "Request_irq failed:%d: err:%d\n",
811 gi2c->irq, ret);
812 return ret;
813 }
814 /* Disable the interrupt so that the system can enter low-power mode */
815 disable_irq(gi2c->irq);
816 i2c_set_adapdata(&gi2c->adap, gi2c);
817 gi2c->adap.dev.parent = dev;
818 gi2c->adap.dev.of_node = dev->of_node;
819 strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
820
821 ret = geni_icc_get(&gi2c->se, "qup-memory");
822 if (ret)
823 return ret;
824 /*
825 * Set the bus quota for core and cpu to a reasonable value for
826 * register access.
827 * Set quota for DDR based on bus speed.
828 */
829 gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
830 gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
831 gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
832
833 ret = geni_icc_set_bw(&gi2c->se);
834 if (ret)
835 return ret;
836
837 ret = geni_se_resources_on(&gi2c->se);
838 if (ret) {
839 dev_err(dev, "Error turning on resources %d\n", ret);
840 return ret;
841 }
842 proto = geni_se_read_proto(&gi2c->se);
843 if (proto != GENI_SE_I2C) {
844 dev_err(dev, "Invalid proto %d\n", proto);
845 geni_se_resources_off(&gi2c->se);
846 return -ENXIO;
847 }
848
849 fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
850 if (fifo_disable) {
851 /* FIFO is disabled, so we can only use GPI DMA */
852 gi2c->gpi_mode = true;
853 ret = setup_gpi_dma(gi2c);
854 if (ret)
855 return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
856
857 dev_dbg(dev, "Using GPI DMA mode for I2C\n");
858 } else {
859 gi2c->gpi_mode = false;
860 tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
861 gi2c->tx_wm = tx_depth - 1;
862 geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
863 geni_se_config_packing(&gi2c->se, BITS_PER_BYTE,
864 PACKING_BYTES_PW, true, true, true);
865
866 dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
867 }
868
869 ret = geni_se_resources_off(&gi2c->se);
870 if (ret) {
871 dev_err(dev, "Error turning off resources %d\n", ret);
872 goto err_dma;
873 }
874
875 ret = geni_icc_disable(&gi2c->se);
876 if (ret)
877 goto err_dma;
878
879 gi2c->suspended = 1;
880 pm_runtime_set_suspended(gi2c->se.dev);
881 pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
882 pm_runtime_use_autosuspend(gi2c->se.dev);
883 pm_runtime_enable(gi2c->se.dev);
884
885 ret = i2c_add_adapter(&gi2c->adap);
886 if (ret) {
887 dev_err(dev, "Error adding i2c adapter %d\n", ret);
888 pm_runtime_disable(gi2c->se.dev);
889 goto err_dma;
890 }
891
892 dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
893
894 return 0;
895
896 err_dma:
897 release_gpi_dma(gi2c);
898 return ret;
899 }
900
geni_i2c_remove(struct platform_device * pdev)901 static int geni_i2c_remove(struct platform_device *pdev)
902 {
903 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
904
905 i2c_del_adapter(&gi2c->adap);
906 release_gpi_dma(gi2c);
907 pm_runtime_disable(gi2c->se.dev);
908 return 0;
909 }
910
geni_i2c_shutdown(struct platform_device * pdev)911 static void geni_i2c_shutdown(struct platform_device *pdev)
912 {
913 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
914
915 /* Make client i2c transfers start failing */
916 i2c_mark_adapter_suspended(&gi2c->adap);
917 }
918
geni_i2c_runtime_suspend(struct device * dev)919 static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
920 {
921 int ret;
922 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
923
924 disable_irq(gi2c->irq);
925 ret = geni_se_resources_off(&gi2c->se);
926 if (ret) {
927 enable_irq(gi2c->irq);
928 return ret;
929
930 } else {
931 gi2c->suspended = 1;
932 }
933
934 return geni_icc_disable(&gi2c->se);
935 }
936
geni_i2c_runtime_resume(struct device * dev)937 static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
938 {
939 int ret;
940 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
941
942 ret = geni_icc_enable(&gi2c->se);
943 if (ret)
944 return ret;
945
946 ret = geni_se_resources_on(&gi2c->se);
947 if (ret)
948 return ret;
949
950 enable_irq(gi2c->irq);
951 gi2c->suspended = 0;
952 return 0;
953 }
954
geni_i2c_suspend_noirq(struct device * dev)955 static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
956 {
957 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
958
959 i2c_mark_adapter_suspended(&gi2c->adap);
960
961 if (!gi2c->suspended) {
962 geni_i2c_runtime_suspend(dev);
963 pm_runtime_disable(dev);
964 pm_runtime_set_suspended(dev);
965 pm_runtime_enable(dev);
966 }
967 return 0;
968 }
969
geni_i2c_resume_noirq(struct device * dev)970 static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
971 {
972 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
973
974 i2c_mark_adapter_resumed(&gi2c->adap);
975 return 0;
976 }
977
978 static const struct dev_pm_ops geni_i2c_pm_ops = {
979 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
980 SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
981 NULL)
982 };
983
984 static const struct of_device_id geni_i2c_dt_match[] = {
985 { .compatible = "qcom,geni-i2c" },
986 {}
987 };
988 MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
989
990 static struct platform_driver geni_i2c_driver = {
991 .probe = geni_i2c_probe,
992 .remove = geni_i2c_remove,
993 .shutdown = geni_i2c_shutdown,
994 .driver = {
995 .name = "geni_i2c",
996 .pm = &geni_i2c_pm_ops,
997 .of_match_table = geni_i2c_dt_match,
998 .acpi_match_table = ACPI_PTR(geni_i2c_acpi_match),
999 },
1000 };
1001
1002 module_platform_driver(geni_i2c_driver);
1003
1004 MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores");
1005 MODULE_LICENSE("GPL v2");
1006