1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
3
4 #include <linux/acpi.h>
5 #include <linux/clk.h>
6 #include <linux/dmaengine.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dma/qcom-gpi-dma.h>
9 #include <linux/err.h>
10 #include <linux/i2c.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/soc/qcom/geni-se.h>
18 #include <linux/spinlock.h>
19
20 #define SE_I2C_TX_TRANS_LEN 0x26c
21 #define SE_I2C_RX_TRANS_LEN 0x270
22 #define SE_I2C_SCL_COUNTERS 0x278
23
24 #define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
25 M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
26 #define SE_I2C_ABORT BIT(1)
27
28 /* M_CMD OP codes for I2C */
29 #define I2C_WRITE 0x1
30 #define I2C_READ 0x2
31 #define I2C_WRITE_READ 0x3
32 #define I2C_ADDR_ONLY 0x4
33 #define I2C_BUS_CLEAR 0x6
34 #define I2C_STOP_ON_BUS 0x7
35 /* M_CMD params for I2C */
36 #define PRE_CMD_DELAY BIT(0)
37 #define TIMESTAMP_BEFORE BIT(1)
38 #define STOP_STRETCH BIT(2)
39 #define TIMESTAMP_AFTER BIT(3)
40 #define POST_COMMAND_DELAY BIT(4)
41 #define IGNORE_ADD_NACK BIT(6)
42 #define READ_FINISHED_WITH_ACK BIT(7)
43 #define BYPASS_ADDR_PHASE BIT(8)
44 #define SLV_ADDR_MSK GENMASK(15, 9)
45 #define SLV_ADDR_SHFT 9
46 /* I2C SCL COUNTER fields */
47 #define HIGH_COUNTER_MSK GENMASK(29, 20)
48 #define HIGH_COUNTER_SHFT 20
49 #define LOW_COUNTER_MSK GENMASK(19, 10)
50 #define LOW_COUNTER_SHFT 10
51 #define CYCLE_COUNTER_MSK GENMASK(9, 0)
52
53 #define I2C_PACK_TX BIT(0)
54 #define I2C_PACK_RX BIT(1)
55
56 enum geni_i2c_err_code {
57 GP_IRQ0,
58 NACK,
59 GP_IRQ2,
60 BUS_PROTO,
61 ARB_LOST,
62 GP_IRQ5,
63 GENI_OVERRUN,
64 GENI_ILLEGAL_CMD,
65 GENI_ABORT_DONE,
66 GENI_TIMEOUT,
67 };
68
69 #define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \
70 << 5)
71
72 #define I2C_AUTO_SUSPEND_DELAY 250
73 #define KHZ(freq) (1000 * freq)
74 #define PACKING_BYTES_PW 4
75
76 #define ABORT_TIMEOUT HZ
77 #define XFER_TIMEOUT HZ
78 #define RST_TIMEOUT HZ
79
80 struct geni_i2c_dev {
81 struct geni_se se;
82 u32 tx_wm;
83 int irq;
84 int err;
85 struct i2c_adapter adap;
86 struct completion done;
87 struct i2c_msg *cur;
88 int cur_wr;
89 int cur_rd;
90 spinlock_t lock;
91 struct clk *core_clk;
92 u32 clk_freq_out;
93 const struct geni_i2c_clk_fld *clk_fld;
94 int suspended;
95 void *dma_buf;
96 size_t xfer_len;
97 dma_addr_t dma_addr;
98 struct dma_chan *tx_c;
99 struct dma_chan *rx_c;
100 bool gpi_mode;
101 bool abort_done;
102 };
103
104 struct geni_i2c_desc {
105 bool has_core_clk;
106 char *icc_ddr;
107 bool no_dma_support;
108 unsigned int tx_fifo_depth;
109 };
110
111 struct geni_i2c_err_log {
112 int err;
113 const char *msg;
114 };
115
116 static const struct geni_i2c_err_log gi2c_log[] = {
117 [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
118 [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
119 [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
120 [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
121 [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
122 [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
123 [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
124 [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"},
125 [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
126 [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
127 };
128
129 struct geni_i2c_clk_fld {
130 u32 clk_freq_out;
131 u8 clk_div;
132 u8 t_high_cnt;
133 u8 t_low_cnt;
134 u8 t_cycle_cnt;
135 };
136
137 /*
138 * Hardware uses the underlying formula to calculate time periods of
139 * SCL clock cycle. Firmware uses some additional cycles excluded from the
140 * below formula and it is confirmed that the time periods are within
141 * specification limits.
142 *
143 * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock
144 * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock
145 * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock
146 * clk_freq_out = t / t_cycle
147 * source_clock = 19.2 MHz
148 */
149 static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
150 {KHZ(100), 7, 10, 11, 26},
151 {KHZ(400), 2, 5, 12, 24},
152 {KHZ(1000), 1, 3, 9, 18},
153 };
154
geni_i2c_clk_map_idx(struct geni_i2c_dev * gi2c)155 static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
156 {
157 int i;
158 const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
159
160 for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
161 if (itr->clk_freq_out == gi2c->clk_freq_out) {
162 gi2c->clk_fld = itr;
163 return 0;
164 }
165 }
166 return -EINVAL;
167 }
168
qcom_geni_i2c_conf(struct geni_i2c_dev * gi2c)169 static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c)
170 {
171 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
172 u32 val;
173
174 writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
175
176 val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN;
177 writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
178
179 val = itr->t_high_cnt << HIGH_COUNTER_SHFT;
180 val |= itr->t_low_cnt << LOW_COUNTER_SHFT;
181 val |= itr->t_cycle_cnt;
182 writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
183 }
184
geni_i2c_err_misc(struct geni_i2c_dev * gi2c)185 static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c)
186 {
187 u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
188 u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
189 u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
190 u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
191 u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
192 u32 rx_st, tx_st;
193
194 if (dma) {
195 rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
196 tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
197 } else {
198 rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
199 tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
200 }
201 dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
202 dma, tx_st, rx_st, m_stat);
203 dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
204 m_cmd, geni_s, geni_ios);
205 }
206
geni_i2c_err(struct geni_i2c_dev * gi2c,int err)207 static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
208 {
209 if (!gi2c->err)
210 gi2c->err = gi2c_log[err].err;
211 if (gi2c->cur)
212 dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
213 gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
214
215 switch (err) {
216 case GENI_ABORT_DONE:
217 gi2c->abort_done = true;
218 break;
219 case NACK:
220 case GENI_TIMEOUT:
221 dev_dbg(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
222 break;
223 default:
224 dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
225 geni_i2c_err_misc(gi2c);
226 break;
227 }
228 }
229
geni_i2c_irq(int irq,void * dev)230 static irqreturn_t geni_i2c_irq(int irq, void *dev)
231 {
232 struct geni_i2c_dev *gi2c = dev;
233 void __iomem *base = gi2c->se.base;
234 int j, p;
235 u32 m_stat;
236 u32 rx_st;
237 u32 dm_tx_st;
238 u32 dm_rx_st;
239 u32 dma;
240 u32 val;
241 struct i2c_msg *cur;
242
243 spin_lock(&gi2c->lock);
244 m_stat = readl_relaxed(base + SE_GENI_M_IRQ_STATUS);
245 rx_st = readl_relaxed(base + SE_GENI_RX_FIFO_STATUS);
246 dm_tx_st = readl_relaxed(base + SE_DMA_TX_IRQ_STAT);
247 dm_rx_st = readl_relaxed(base + SE_DMA_RX_IRQ_STAT);
248 dma = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
249 cur = gi2c->cur;
250
251 if (!cur ||
252 m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) ||
253 dm_rx_st & (DM_I2C_CB_ERR)) {
254 if (m_stat & M_GP_IRQ_1_EN)
255 geni_i2c_err(gi2c, NACK);
256 if (m_stat & M_GP_IRQ_3_EN)
257 geni_i2c_err(gi2c, BUS_PROTO);
258 if (m_stat & M_GP_IRQ_4_EN)
259 geni_i2c_err(gi2c, ARB_LOST);
260 if (m_stat & M_CMD_OVERRUN_EN)
261 geni_i2c_err(gi2c, GENI_OVERRUN);
262 if (m_stat & M_ILLEGAL_CMD_EN)
263 geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
264 if (m_stat & M_CMD_ABORT_EN)
265 geni_i2c_err(gi2c, GENI_ABORT_DONE);
266 if (m_stat & M_GP_IRQ_0_EN)
267 geni_i2c_err(gi2c, GP_IRQ0);
268
269 /* Disable the TX Watermark interrupt to stop TX */
270 if (!dma)
271 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
272 } else if (dma) {
273 dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
274 dm_tx_st, dm_rx_st);
275 } else if (cur->flags & I2C_M_RD &&
276 m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) {
277 u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
278
279 for (j = 0; j < rxcnt; j++) {
280 p = 0;
281 val = readl_relaxed(base + SE_GENI_RX_FIFOn);
282 while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
283 cur->buf[gi2c->cur_rd++] = val & 0xff;
284 val >>= 8;
285 p++;
286 }
287 if (gi2c->cur_rd == cur->len)
288 break;
289 }
290 } else if (!(cur->flags & I2C_M_RD) &&
291 m_stat & M_TX_FIFO_WATERMARK_EN) {
292 for (j = 0; j < gi2c->tx_wm; j++) {
293 u32 temp;
294
295 val = 0;
296 p = 0;
297 while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
298 temp = cur->buf[gi2c->cur_wr++];
299 val |= temp << (p * 8);
300 p++;
301 }
302 writel_relaxed(val, base + SE_GENI_TX_FIFOn);
303 /* TX Complete, Disable the TX Watermark interrupt */
304 if (gi2c->cur_wr == cur->len) {
305 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
306 break;
307 }
308 }
309 }
310
311 if (m_stat)
312 writel_relaxed(m_stat, base + SE_GENI_M_IRQ_CLEAR);
313
314 if (dma && dm_tx_st)
315 writel_relaxed(dm_tx_st, base + SE_DMA_TX_IRQ_CLR);
316 if (dma && dm_rx_st)
317 writel_relaxed(dm_rx_st, base + SE_DMA_RX_IRQ_CLR);
318
319 /* if this is err with done-bit not set, handle that through timeout. */
320 if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN ||
321 dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE ||
322 dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
323 complete(&gi2c->done);
324
325 spin_unlock(&gi2c->lock);
326
327 return IRQ_HANDLED;
328 }
329
geni_i2c_abort_xfer(struct geni_i2c_dev * gi2c)330 static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
331 {
332 unsigned long time_left = ABORT_TIMEOUT;
333 unsigned long flags;
334
335 spin_lock_irqsave(&gi2c->lock, flags);
336 geni_i2c_err(gi2c, GENI_TIMEOUT);
337 gi2c->cur = NULL;
338 gi2c->abort_done = false;
339 geni_se_abort_m_cmd(&gi2c->se);
340 spin_unlock_irqrestore(&gi2c->lock, flags);
341
342 do {
343 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
344 } while (!gi2c->abort_done && time_left);
345
346 if (!time_left)
347 dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
348 }
349
geni_i2c_rx_fsm_rst(struct geni_i2c_dev * gi2c)350 static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c)
351 {
352 u32 val;
353 unsigned long time_left = RST_TIMEOUT;
354
355 writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
356 do {
357 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
358 val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
359 } while (!(val & RX_RESET_DONE) && time_left);
360
361 if (!(val & RX_RESET_DONE))
362 dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
363 }
364
geni_i2c_tx_fsm_rst(struct geni_i2c_dev * gi2c)365 static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
366 {
367 u32 val;
368 unsigned long time_left = RST_TIMEOUT;
369
370 writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
371 do {
372 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
373 val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
374 } while (!(val & TX_RESET_DONE) && time_left);
375
376 if (!(val & TX_RESET_DONE))
377 dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
378 }
379
geni_i2c_rx_msg_cleanup(struct geni_i2c_dev * gi2c,struct i2c_msg * cur)380 static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
381 struct i2c_msg *cur)
382 {
383 gi2c->cur_rd = 0;
384 if (gi2c->dma_buf) {
385 if (gi2c->err)
386 geni_i2c_rx_fsm_rst(gi2c);
387 geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
388 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
389 }
390 }
391
geni_i2c_tx_msg_cleanup(struct geni_i2c_dev * gi2c,struct i2c_msg * cur)392 static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
393 struct i2c_msg *cur)
394 {
395 gi2c->cur_wr = 0;
396 if (gi2c->dma_buf) {
397 if (gi2c->err)
398 geni_i2c_tx_fsm_rst(gi2c);
399 geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
400 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
401 }
402 }
403
geni_i2c_rx_one_msg(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,u32 m_param)404 static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
405 u32 m_param)
406 {
407 dma_addr_t rx_dma = 0;
408 unsigned long time_left;
409 void *dma_buf;
410 struct geni_se *se = &gi2c->se;
411 size_t len = msg->len;
412 struct i2c_msg *cur;
413
414 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
415 if (dma_buf)
416 geni_se_select_mode(se, GENI_SE_DMA);
417 else
418 geni_se_select_mode(se, GENI_SE_FIFO);
419
420 writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
421 geni_se_setup_m_cmd(se, I2C_READ, m_param);
422
423 if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
424 geni_se_select_mode(se, GENI_SE_FIFO);
425 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
426 dma_buf = NULL;
427 } else {
428 gi2c->xfer_len = len;
429 gi2c->dma_addr = rx_dma;
430 gi2c->dma_buf = dma_buf;
431 }
432
433 cur = gi2c->cur;
434 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
435 if (!time_left)
436 geni_i2c_abort_xfer(gi2c);
437
438 geni_i2c_rx_msg_cleanup(gi2c, cur);
439
440 return gi2c->err;
441 }
442
geni_i2c_tx_one_msg(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,u32 m_param)443 static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
444 u32 m_param)
445 {
446 dma_addr_t tx_dma = 0;
447 unsigned long time_left;
448 void *dma_buf;
449 struct geni_se *se = &gi2c->se;
450 size_t len = msg->len;
451 struct i2c_msg *cur;
452
453 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
454 if (dma_buf)
455 geni_se_select_mode(se, GENI_SE_DMA);
456 else
457 geni_se_select_mode(se, GENI_SE_FIFO);
458
459 writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
460 geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
461
462 if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
463 geni_se_select_mode(se, GENI_SE_FIFO);
464 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
465 dma_buf = NULL;
466 } else {
467 gi2c->xfer_len = len;
468 gi2c->dma_addr = tx_dma;
469 gi2c->dma_buf = dma_buf;
470 }
471
472 if (!dma_buf) /* Get FIFO IRQ */
473 writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
474
475 cur = gi2c->cur;
476 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
477 if (!time_left)
478 geni_i2c_abort_xfer(gi2c);
479
480 geni_i2c_tx_msg_cleanup(gi2c, cur);
481
482 return gi2c->err;
483 }
484
i2c_gpi_cb_result(void * cb,const struct dmaengine_result * result)485 static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
486 {
487 struct geni_i2c_dev *gi2c = cb;
488
489 if (result->result != DMA_TRANS_NOERROR) {
490 dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
491 gi2c->err = -EIO;
492 } else if (result->residue) {
493 dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
494 }
495
496 complete(&gi2c->done);
497 }
498
geni_i2c_gpi_unmap(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,void * tx_buf,dma_addr_t tx_addr,void * rx_buf,dma_addr_t rx_addr)499 static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
500 void *tx_buf, dma_addr_t tx_addr,
501 void *rx_buf, dma_addr_t rx_addr)
502 {
503 if (tx_buf) {
504 dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
505 i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
506 }
507
508 if (rx_buf) {
509 dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
510 i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
511 }
512 }
513
geni_i2c_gpi(struct geni_i2c_dev * gi2c,struct i2c_msg * msg,struct dma_slave_config * config,dma_addr_t * dma_addr_p,void ** buf,unsigned int op,struct dma_chan * dma_chan)514 static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
515 struct dma_slave_config *config, dma_addr_t *dma_addr_p,
516 void **buf, unsigned int op, struct dma_chan *dma_chan)
517 {
518 struct gpi_i2c_config *peripheral;
519 unsigned int flags;
520 void *dma_buf;
521 dma_addr_t addr;
522 enum dma_data_direction map_dirn;
523 enum dma_transfer_direction dma_dirn;
524 struct dma_async_tx_descriptor *desc;
525 int ret;
526
527 peripheral = config->peripheral_config;
528
529 dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
530 if (!dma_buf)
531 return -ENOMEM;
532
533 if (op == I2C_WRITE)
534 map_dirn = DMA_TO_DEVICE;
535 else
536 map_dirn = DMA_FROM_DEVICE;
537
538 addr = dma_map_single(gi2c->se.dev->parent, dma_buf, msg->len, map_dirn);
539 if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
540 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
541 return -ENOMEM;
542 }
543
544 /* set the length as message for rx txn */
545 peripheral->rx_len = msg->len;
546 peripheral->op = op;
547
548 ret = dmaengine_slave_config(dma_chan, config);
549 if (ret) {
550 dev_err(gi2c->se.dev, "dma config error: %d for op:%d\n", ret, op);
551 goto err_config;
552 }
553
554 peripheral->set_config = 0;
555 peripheral->multi_msg = true;
556 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
557
558 if (op == I2C_WRITE)
559 dma_dirn = DMA_MEM_TO_DEV;
560 else
561 dma_dirn = DMA_DEV_TO_MEM;
562
563 desc = dmaengine_prep_slave_single(dma_chan, addr, msg->len, dma_dirn, flags);
564 if (!desc) {
565 dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
566 ret = -EIO;
567 goto err_config;
568 }
569
570 desc->callback_result = i2c_gpi_cb_result;
571 desc->callback_param = gi2c;
572
573 dmaengine_submit(desc);
574 *buf = dma_buf;
575 *dma_addr_p = addr;
576
577 return 0;
578
579 err_config:
580 dma_unmap_single(gi2c->se.dev->parent, addr, msg->len, map_dirn);
581 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
582 return ret;
583 }
584
geni_i2c_gpi_xfer(struct geni_i2c_dev * gi2c,struct i2c_msg msgs[],int num)585 static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], int num)
586 {
587 struct dma_slave_config config = {};
588 struct gpi_i2c_config peripheral = {};
589 int i, ret = 0, timeout;
590 dma_addr_t tx_addr, rx_addr;
591 void *tx_buf = NULL, *rx_buf = NULL;
592 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
593
594 config.peripheral_config = &peripheral;
595 config.peripheral_size = sizeof(peripheral);
596
597 peripheral.pack_enable = I2C_PACK_TX | I2C_PACK_RX;
598 peripheral.cycle_count = itr->t_cycle_cnt;
599 peripheral.high_count = itr->t_high_cnt;
600 peripheral.low_count = itr->t_low_cnt;
601 peripheral.clk_div = itr->clk_div;
602 peripheral.set_config = 1;
603 peripheral.multi_msg = false;
604
605 for (i = 0; i < num; i++) {
606 gi2c->cur = &msgs[i];
607 gi2c->err = 0;
608 dev_dbg(gi2c->se.dev, "msg[%d].len:%d\n", i, gi2c->cur->len);
609
610 peripheral.stretch = 0;
611 if (i < num - 1)
612 peripheral.stretch = 1;
613
614 peripheral.addr = msgs[i].addr;
615
616 if (msgs[i].flags & I2C_M_RD) {
617 ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
618 &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
619 if (ret)
620 goto err;
621 }
622
623 ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
624 &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
625 if (ret)
626 goto err;
627
628 if (msgs[i].flags & I2C_M_RD)
629 dma_async_issue_pending(gi2c->rx_c);
630 dma_async_issue_pending(gi2c->tx_c);
631
632 timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
633 if (!timeout) {
634 dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n",
635 gi2c->cur->flags, gi2c->cur->addr);
636 gi2c->err = -ETIMEDOUT;
637 }
638
639 if (gi2c->err) {
640 ret = gi2c->err;
641 goto err;
642 }
643
644 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
645 }
646
647 return num;
648
649 err:
650 dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
651 dmaengine_terminate_sync(gi2c->rx_c);
652 dmaengine_terminate_sync(gi2c->tx_c);
653 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
654 return ret;
655 }
656
geni_i2c_fifo_xfer(struct geni_i2c_dev * gi2c,struct i2c_msg msgs[],int num)657 static int geni_i2c_fifo_xfer(struct geni_i2c_dev *gi2c,
658 struct i2c_msg msgs[], int num)
659 {
660 int i, ret = 0;
661
662 for (i = 0; i < num; i++) {
663 u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
664
665 m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK);
666
667 gi2c->cur = &msgs[i];
668 if (msgs[i].flags & I2C_M_RD)
669 ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param);
670 else
671 ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param);
672
673 if (ret)
674 return ret;
675 }
676
677 return num;
678 }
679
geni_i2c_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)680 static int geni_i2c_xfer(struct i2c_adapter *adap,
681 struct i2c_msg msgs[],
682 int num)
683 {
684 struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
685 int ret;
686
687 gi2c->err = 0;
688 reinit_completion(&gi2c->done);
689 ret = pm_runtime_get_sync(gi2c->se.dev);
690 if (ret < 0) {
691 dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
692 pm_runtime_put_noidle(gi2c->se.dev);
693 /* Set device in suspended since resume failed */
694 pm_runtime_set_suspended(gi2c->se.dev);
695 return ret;
696 }
697
698 qcom_geni_i2c_conf(gi2c);
699
700 if (gi2c->gpi_mode)
701 ret = geni_i2c_gpi_xfer(gi2c, msgs, num);
702 else
703 ret = geni_i2c_fifo_xfer(gi2c, msgs, num);
704
705 pm_runtime_mark_last_busy(gi2c->se.dev);
706 pm_runtime_put_autosuspend(gi2c->se.dev);
707 gi2c->cur = NULL;
708 gi2c->err = 0;
709 return ret;
710 }
711
geni_i2c_func(struct i2c_adapter * adap)712 static u32 geni_i2c_func(struct i2c_adapter *adap)
713 {
714 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
715 }
716
717 static const struct i2c_algorithm geni_i2c_algo = {
718 .master_xfer = geni_i2c_xfer,
719 .functionality = geni_i2c_func,
720 };
721
722 #ifdef CONFIG_ACPI
723 static const struct acpi_device_id geni_i2c_acpi_match[] = {
724 { "QCOM0220"},
725 { },
726 };
727 MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
728 #endif
729
release_gpi_dma(struct geni_i2c_dev * gi2c)730 static void release_gpi_dma(struct geni_i2c_dev *gi2c)
731 {
732 if (gi2c->rx_c)
733 dma_release_channel(gi2c->rx_c);
734
735 if (gi2c->tx_c)
736 dma_release_channel(gi2c->tx_c);
737 }
738
setup_gpi_dma(struct geni_i2c_dev * gi2c)739 static int setup_gpi_dma(struct geni_i2c_dev *gi2c)
740 {
741 int ret;
742
743 geni_se_select_mode(&gi2c->se, GENI_GPI_DMA);
744 gi2c->tx_c = dma_request_chan(gi2c->se.dev, "tx");
745 if (IS_ERR(gi2c->tx_c)) {
746 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c),
747 "Failed to get tx DMA ch\n");
748 goto err_tx;
749 }
750
751 gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx");
752 if (IS_ERR(gi2c->rx_c)) {
753 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c),
754 "Failed to get rx DMA ch\n");
755 goto err_rx;
756 }
757
758 dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n");
759 return 0;
760
761 err_rx:
762 dma_release_channel(gi2c->tx_c);
763 err_tx:
764 return ret;
765 }
766
geni_i2c_probe(struct platform_device * pdev)767 static int geni_i2c_probe(struct platform_device *pdev)
768 {
769 struct geni_i2c_dev *gi2c;
770 u32 proto, tx_depth, fifo_disable;
771 int ret;
772 struct device *dev = &pdev->dev;
773 const struct geni_i2c_desc *desc = NULL;
774
775 gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
776 if (!gi2c)
777 return -ENOMEM;
778
779 gi2c->se.dev = dev;
780 gi2c->se.wrapper = dev_get_drvdata(dev->parent);
781 gi2c->se.base = devm_platform_ioremap_resource(pdev, 0);
782 if (IS_ERR(gi2c->se.base))
783 return PTR_ERR(gi2c->se.base);
784
785 desc = device_get_match_data(&pdev->dev);
786
787 if (desc && desc->has_core_clk) {
788 gi2c->core_clk = devm_clk_get(dev, "core");
789 if (IS_ERR(gi2c->core_clk))
790 return PTR_ERR(gi2c->core_clk);
791 }
792
793 gi2c->se.clk = devm_clk_get(dev, "se");
794 if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
795 return PTR_ERR(gi2c->se.clk);
796
797 ret = device_property_read_u32(dev, "clock-frequency",
798 &gi2c->clk_freq_out);
799 if (ret) {
800 dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
801 gi2c->clk_freq_out = KHZ(100);
802 }
803
804 if (has_acpi_companion(dev))
805 ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev));
806
807 gi2c->irq = platform_get_irq(pdev, 0);
808 if (gi2c->irq < 0)
809 return gi2c->irq;
810
811 ret = geni_i2c_clk_map_idx(gi2c);
812 if (ret) {
813 dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
814 gi2c->clk_freq_out, ret);
815 return ret;
816 }
817
818 gi2c->adap.algo = &geni_i2c_algo;
819 init_completion(&gi2c->done);
820 spin_lock_init(&gi2c->lock);
821 platform_set_drvdata(pdev, gi2c);
822 ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
823 dev_name(dev), gi2c);
824 if (ret) {
825 dev_err(dev, "Request_irq failed:%d: err:%d\n",
826 gi2c->irq, ret);
827 return ret;
828 }
829 /* Disable the interrupt so that the system can enter low-power mode */
830 disable_irq(gi2c->irq);
831 i2c_set_adapdata(&gi2c->adap, gi2c);
832 gi2c->adap.dev.parent = dev;
833 gi2c->adap.dev.of_node = dev->of_node;
834 strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
835
836 ret = geni_icc_get(&gi2c->se, desc ? desc->icc_ddr : "qup-memory");
837 if (ret)
838 return ret;
839 /*
840 * Set the bus quota for core and cpu to a reasonable value for
841 * register access.
842 * Set quota for DDR based on bus speed.
843 */
844 gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
845 gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
846 if (!desc || desc->icc_ddr)
847 gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
848
849 ret = geni_icc_set_bw(&gi2c->se);
850 if (ret)
851 return ret;
852
853 ret = clk_prepare_enable(gi2c->core_clk);
854 if (ret)
855 return ret;
856
857 ret = geni_se_resources_on(&gi2c->se);
858 if (ret) {
859 dev_err(dev, "Error turning on resources %d\n", ret);
860 return ret;
861 }
862 proto = geni_se_read_proto(&gi2c->se);
863 if (proto != GENI_SE_I2C) {
864 dev_err(dev, "Invalid proto %d\n", proto);
865 geni_se_resources_off(&gi2c->se);
866 clk_disable_unprepare(gi2c->core_clk);
867 return -ENXIO;
868 }
869
870 if (desc && desc->no_dma_support)
871 fifo_disable = false;
872 else
873 fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
874
875 if (fifo_disable) {
876 /* FIFO is disabled, so we can only use GPI DMA */
877 gi2c->gpi_mode = true;
878 ret = setup_gpi_dma(gi2c);
879 if (ret)
880 return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
881
882 dev_dbg(dev, "Using GPI DMA mode for I2C\n");
883 } else {
884 gi2c->gpi_mode = false;
885 tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
886
887 /* I2C Master Hub Serial Elements doesn't have the HW_PARAM_0 register */
888 if (!tx_depth && desc)
889 tx_depth = desc->tx_fifo_depth;
890
891 if (!tx_depth) {
892 dev_err(dev, "Invalid TX FIFO depth\n");
893 return -EINVAL;
894 }
895
896 gi2c->tx_wm = tx_depth - 1;
897 geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
898 geni_se_config_packing(&gi2c->se, BITS_PER_BYTE,
899 PACKING_BYTES_PW, true, true, true);
900
901 dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
902 }
903
904 clk_disable_unprepare(gi2c->core_clk);
905 ret = geni_se_resources_off(&gi2c->se);
906 if (ret) {
907 dev_err(dev, "Error turning off resources %d\n", ret);
908 goto err_dma;
909 }
910
911 ret = geni_icc_disable(&gi2c->se);
912 if (ret)
913 goto err_dma;
914
915 gi2c->suspended = 1;
916 pm_runtime_set_suspended(gi2c->se.dev);
917 pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
918 pm_runtime_use_autosuspend(gi2c->se.dev);
919 pm_runtime_enable(gi2c->se.dev);
920
921 ret = i2c_add_adapter(&gi2c->adap);
922 if (ret) {
923 dev_err(dev, "Error adding i2c adapter %d\n", ret);
924 pm_runtime_disable(gi2c->se.dev);
925 goto err_dma;
926 }
927
928 dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
929
930 return 0;
931
932 err_dma:
933 release_gpi_dma(gi2c);
934 return ret;
935 }
936
geni_i2c_remove(struct platform_device * pdev)937 static void geni_i2c_remove(struct platform_device *pdev)
938 {
939 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
940
941 i2c_del_adapter(&gi2c->adap);
942 release_gpi_dma(gi2c);
943 pm_runtime_disable(gi2c->se.dev);
944 }
945
geni_i2c_shutdown(struct platform_device * pdev)946 static void geni_i2c_shutdown(struct platform_device *pdev)
947 {
948 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
949
950 /* Make client i2c transfers start failing */
951 i2c_mark_adapter_suspended(&gi2c->adap);
952 }
953
geni_i2c_runtime_suspend(struct device * dev)954 static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
955 {
956 int ret;
957 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
958
959 disable_irq(gi2c->irq);
960 ret = geni_se_resources_off(&gi2c->se);
961 if (ret) {
962 enable_irq(gi2c->irq);
963 return ret;
964
965 } else {
966 gi2c->suspended = 1;
967 }
968
969 clk_disable_unprepare(gi2c->core_clk);
970
971 return geni_icc_disable(&gi2c->se);
972 }
973
geni_i2c_runtime_resume(struct device * dev)974 static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
975 {
976 int ret;
977 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
978
979 ret = geni_icc_enable(&gi2c->se);
980 if (ret)
981 return ret;
982
983 ret = clk_prepare_enable(gi2c->core_clk);
984 if (ret)
985 return ret;
986
987 ret = geni_se_resources_on(&gi2c->se);
988 if (ret)
989 return ret;
990
991 enable_irq(gi2c->irq);
992 gi2c->suspended = 0;
993 return 0;
994 }
995
geni_i2c_suspend_noirq(struct device * dev)996 static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
997 {
998 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
999
1000 i2c_mark_adapter_suspended(&gi2c->adap);
1001
1002 if (!gi2c->suspended) {
1003 geni_i2c_runtime_suspend(dev);
1004 pm_runtime_disable(dev);
1005 pm_runtime_set_suspended(dev);
1006 pm_runtime_enable(dev);
1007 }
1008 return 0;
1009 }
1010
geni_i2c_resume_noirq(struct device * dev)1011 static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
1012 {
1013 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1014
1015 i2c_mark_adapter_resumed(&gi2c->adap);
1016 return 0;
1017 }
1018
1019 static const struct dev_pm_ops geni_i2c_pm_ops = {
1020 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
1021 SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
1022 NULL)
1023 };
1024
1025 static const struct geni_i2c_desc i2c_master_hub = {
1026 .has_core_clk = true,
1027 .icc_ddr = NULL,
1028 .no_dma_support = true,
1029 .tx_fifo_depth = 16,
1030 };
1031
1032 static const struct of_device_id geni_i2c_dt_match[] = {
1033 { .compatible = "qcom,geni-i2c" },
1034 { .compatible = "qcom,geni-i2c-master-hub", .data = &i2c_master_hub },
1035 {}
1036 };
1037 MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
1038
1039 static struct platform_driver geni_i2c_driver = {
1040 .probe = geni_i2c_probe,
1041 .remove_new = geni_i2c_remove,
1042 .shutdown = geni_i2c_shutdown,
1043 .driver = {
1044 .name = "geni_i2c",
1045 .pm = &geni_i2c_pm_ops,
1046 .of_match_table = geni_i2c_dt_match,
1047 .acpi_match_table = ACPI_PTR(geni_i2c_acpi_match),
1048 },
1049 };
1050
1051 module_platform_driver(geni_i2c_driver);
1052
1053 MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores");
1054 MODULE_LICENSE("GPL v2");
1055