1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 *
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 *
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 *
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
14 */
15
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/err.h>
21 #include <linux/i2c.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/platform_device.h>
27 #include <linux/platform_data/dma-atmel.h>
28 #include <linux/pm_runtime.h>
29
30 #include "i2c-at91.h"
31
at91_init_twi_bus_master(struct at91_twi_dev * dev)32 void at91_init_twi_bus_master(struct at91_twi_dev *dev)
33 {
34 /* FIFO should be enabled immediately after the software reset */
35 if (dev->fifo_size)
36 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
37 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
38 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
39 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
40 }
41
42 /*
43 * Calculate symmetric clock as stated in datasheet:
44 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
45 */
at91_calc_twi_clock(struct at91_twi_dev * dev)46 static void at91_calc_twi_clock(struct at91_twi_dev *dev)
47 {
48 int ckdiv, cdiv, div, hold = 0;
49 struct at91_twi_pdata *pdata = dev->pdata;
50 int offset = pdata->clk_offset;
51 int max_ckdiv = pdata->clk_max_div;
52 struct i2c_timings timings, *t = &timings;
53
54 i2c_parse_fw_timings(dev->dev, t, true);
55
56 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
57 2 * t->bus_freq_hz) - offset);
58 ckdiv = fls(div >> 8);
59 cdiv = div >> ckdiv;
60
61 if (ckdiv > max_ckdiv) {
62 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
63 ckdiv, max_ckdiv);
64 ckdiv = max_ckdiv;
65 cdiv = 255;
66 }
67
68 if (pdata->has_hold_field) {
69 /*
70 * hold time = HOLD + 3 x T_peripheral_clock
71 * Use clk rate in kHz to prevent overflows when computing
72 * hold.
73 */
74 hold = DIV_ROUND_UP(t->sda_hold_ns
75 * (clk_get_rate(dev->clk) / 1000), 1000000);
76 hold -= 3;
77 if (hold < 0)
78 hold = 0;
79 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
80 dev_warn(dev->dev,
81 "HOLD field set to its maximum value (%d instead of %d)\n",
82 AT91_TWI_CWGR_HOLD_MAX, hold);
83 hold = AT91_TWI_CWGR_HOLD_MAX;
84 }
85 }
86
87 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
88 | AT91_TWI_CWGR_HOLD(hold);
89
90 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
91 cdiv, ckdiv, hold, t->sda_hold_ns);
92 }
93
at91_twi_dma_cleanup(struct at91_twi_dev * dev)94 static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
95 {
96 struct at91_twi_dma *dma = &dev->dma;
97
98 at91_twi_irq_save(dev);
99
100 if (dma->xfer_in_progress) {
101 if (dma->direction == DMA_FROM_DEVICE)
102 dmaengine_terminate_all(dma->chan_rx);
103 else
104 dmaengine_terminate_all(dma->chan_tx);
105 dma->xfer_in_progress = false;
106 }
107 if (dma->buf_mapped) {
108 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
109 dev->buf_len, dma->direction);
110 dma->buf_mapped = false;
111 }
112
113 at91_twi_irq_restore(dev);
114 }
115
at91_twi_write_next_byte(struct at91_twi_dev * dev)116 static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
117 {
118 if (!dev->buf_len)
119 return;
120
121 /* 8bit write works with and without FIFO */
122 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
123
124 /* send stop when last byte has been written */
125 if (--dev->buf_len == 0) {
126 if (!dev->use_alt_cmd)
127 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
128 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
129 }
130
131 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
132
133 ++dev->buf;
134 }
135
at91_twi_write_data_dma_callback(void * data)136 static void at91_twi_write_data_dma_callback(void *data)
137 {
138 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
139
140 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
141 dev->buf_len, DMA_TO_DEVICE);
142
143 /*
144 * When this callback is called, THR/TX FIFO is likely not to be empty
145 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
146 * Status Register to be sure that the STOP bit has been sent and the
147 * transfer is completed. The NACK interrupt has already been enabled,
148 * we just have to enable TXCOMP one.
149 */
150 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
151 if (!dev->use_alt_cmd)
152 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
153 }
154
at91_twi_write_data_dma(struct at91_twi_dev * dev)155 static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
156 {
157 dma_addr_t dma_addr;
158 struct dma_async_tx_descriptor *txdesc;
159 struct at91_twi_dma *dma = &dev->dma;
160 struct dma_chan *chan_tx = dma->chan_tx;
161 unsigned int sg_len = 1;
162
163 if (!dev->buf_len)
164 return;
165
166 dma->direction = DMA_TO_DEVICE;
167
168 at91_twi_irq_save(dev);
169 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
170 DMA_TO_DEVICE);
171 if (dma_mapping_error(dev->dev, dma_addr)) {
172 dev_err(dev->dev, "dma map failed\n");
173 return;
174 }
175 dma->buf_mapped = true;
176 at91_twi_irq_restore(dev);
177
178 if (dev->fifo_size) {
179 size_t part1_len, part2_len;
180 struct scatterlist *sg;
181 unsigned fifo_mr;
182
183 sg_len = 0;
184
185 part1_len = dev->buf_len & ~0x3;
186 if (part1_len) {
187 sg = &dma->sg[sg_len++];
188 sg_dma_len(sg) = part1_len;
189 sg_dma_address(sg) = dma_addr;
190 }
191
192 part2_len = dev->buf_len & 0x3;
193 if (part2_len) {
194 sg = &dma->sg[sg_len++];
195 sg_dma_len(sg) = part2_len;
196 sg_dma_address(sg) = dma_addr + part1_len;
197 }
198
199 /*
200 * DMA controller is triggered when at least 4 data can be
201 * written into the TX FIFO
202 */
203 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
204 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
205 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
206 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
207 } else {
208 sg_dma_len(&dma->sg[0]) = dev->buf_len;
209 sg_dma_address(&dma->sg[0]) = dma_addr;
210 }
211
212 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
213 DMA_MEM_TO_DEV,
214 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
215 if (!txdesc) {
216 dev_err(dev->dev, "dma prep slave sg failed\n");
217 goto error;
218 }
219
220 txdesc->callback = at91_twi_write_data_dma_callback;
221 txdesc->callback_param = dev;
222
223 dma->xfer_in_progress = true;
224 dmaengine_submit(txdesc);
225 dma_async_issue_pending(chan_tx);
226
227 return;
228
229 error:
230 at91_twi_dma_cleanup(dev);
231 }
232
at91_twi_read_next_byte(struct at91_twi_dev * dev)233 static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
234 {
235 /*
236 * If we are in this case, it means there is garbage data in RHR, so
237 * delete them.
238 */
239 if (!dev->buf_len) {
240 at91_twi_read(dev, AT91_TWI_RHR);
241 return;
242 }
243
244 /* 8bit read works with and without FIFO */
245 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
246 --dev->buf_len;
247
248 /* return if aborting, we only needed to read RHR to clear RXRDY*/
249 if (dev->recv_len_abort)
250 return;
251
252 /* handle I2C_SMBUS_BLOCK_DATA */
253 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
254 /* ensure length byte is a valid value */
255 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
256 dev->msg->flags &= ~I2C_M_RECV_LEN;
257 dev->buf_len += *dev->buf;
258 dev->msg->len = dev->buf_len + 1;
259 dev_dbg(dev->dev, "received block length %zu\n",
260 dev->buf_len);
261 } else {
262 /* abort and send the stop by reading one more byte */
263 dev->recv_len_abort = true;
264 dev->buf_len = 1;
265 }
266 }
267
268 /* send stop if second but last byte has been read */
269 if (!dev->use_alt_cmd && dev->buf_len == 1)
270 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
271
272 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
273
274 ++dev->buf;
275 }
276
at91_twi_read_data_dma_callback(void * data)277 static void at91_twi_read_data_dma_callback(void *data)
278 {
279 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
280 unsigned ier = AT91_TWI_TXCOMP;
281
282 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
283 dev->buf_len, DMA_FROM_DEVICE);
284
285 if (!dev->use_alt_cmd) {
286 /* The last two bytes have to be read without using dma */
287 dev->buf += dev->buf_len - 2;
288 dev->buf_len = 2;
289 ier |= AT91_TWI_RXRDY;
290 }
291 at91_twi_write(dev, AT91_TWI_IER, ier);
292 }
293
at91_twi_read_data_dma(struct at91_twi_dev * dev)294 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
295 {
296 dma_addr_t dma_addr;
297 struct dma_async_tx_descriptor *rxdesc;
298 struct at91_twi_dma *dma = &dev->dma;
299 struct dma_chan *chan_rx = dma->chan_rx;
300 size_t buf_len;
301
302 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
303 dma->direction = DMA_FROM_DEVICE;
304
305 /* Keep in mind that we won't use dma to read the last two bytes */
306 at91_twi_irq_save(dev);
307 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
308 if (dma_mapping_error(dev->dev, dma_addr)) {
309 dev_err(dev->dev, "dma map failed\n");
310 return;
311 }
312 dma->buf_mapped = true;
313 at91_twi_irq_restore(dev);
314
315 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
316 unsigned fifo_mr;
317
318 /*
319 * DMA controller is triggered when at least 4 data can be
320 * read from the RX FIFO
321 */
322 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
323 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
324 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
325 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
326 }
327
328 sg_dma_len(&dma->sg[0]) = buf_len;
329 sg_dma_address(&dma->sg[0]) = dma_addr;
330
331 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
332 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
333 if (!rxdesc) {
334 dev_err(dev->dev, "dma prep slave sg failed\n");
335 goto error;
336 }
337
338 rxdesc->callback = at91_twi_read_data_dma_callback;
339 rxdesc->callback_param = dev;
340
341 dma->xfer_in_progress = true;
342 dmaengine_submit(rxdesc);
343 dma_async_issue_pending(dma->chan_rx);
344
345 return;
346
347 error:
348 at91_twi_dma_cleanup(dev);
349 }
350
atmel_twi_interrupt(int irq,void * dev_id)351 static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
352 {
353 struct at91_twi_dev *dev = dev_id;
354 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
355 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
356
357 if (!irqstatus)
358 return IRQ_NONE;
359 /*
360 * In reception, the behavior of the twi device (before sama5d2) is
361 * weird. There is some magic about RXRDY flag! When a data has been
362 * almost received, the reception of a new one is anticipated if there
363 * is no stop command to send. That is the reason why ask for sending
364 * the stop command not on the last data but on the second last one.
365 *
366 * Unfortunately, we could still have the RXRDY flag set even if the
367 * transfer is done and we have read the last data. It might happen
368 * when the i2c slave device sends too quickly data after receiving the
369 * ack from the master. The data has been almost received before having
370 * the order to send stop. In this case, sending the stop command could
371 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
372 * the RXRDY interrupt first in order to not keep garbage data in the
373 * Receive Holding Register for the next transfer.
374 */
375 if (irqstatus & AT91_TWI_RXRDY) {
376 /*
377 * Read all available bytes at once by polling RXRDY usable w/
378 * and w/o FIFO. With FIFO enabled we could also read RXFL and
379 * avoid polling RXRDY.
380 */
381 do {
382 at91_twi_read_next_byte(dev);
383 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
384 }
385
386 /*
387 * When a NACK condition is detected, the I2C controller sets the NACK,
388 * TXCOMP and TXRDY bits all together in the Status Register (SR).
389 *
390 * 1 - Handling NACK errors with CPU write transfer.
391 *
392 * In such case, we should not write the next byte into the Transmit
393 * Holding Register (THR) otherwise the I2C controller would start a new
394 * transfer and the I2C slave is likely to reply by another NACK.
395 *
396 * 2 - Handling NACK errors with DMA write transfer.
397 *
398 * By setting the TXRDY bit in the SR, the I2C controller also triggers
399 * the DMA controller to write the next data into the THR. Then the
400 * result depends on the hardware version of the I2C controller.
401 *
402 * 2a - Without support of the Alternative Command mode.
403 *
404 * This is the worst case: the DMA controller is triggered to write the
405 * next data into the THR, hence starting a new transfer: the I2C slave
406 * is likely to reply by another NACK.
407 * Concurrently, this interrupt handler is likely to be called to manage
408 * the first NACK before the I2C controller detects the second NACK and
409 * sets once again the NACK bit into the SR.
410 * When handling the first NACK, this interrupt handler disables the I2C
411 * controller interruptions, especially the NACK interrupt.
412 * Hence, the NACK bit is pending into the SR. This is why we should
413 * read the SR to clear all pending interrupts at the beginning of
414 * at91_do_twi_transfer() before actually starting a new transfer.
415 *
416 * 2b - With support of the Alternative Command mode.
417 *
418 * When a NACK condition is detected, the I2C controller also locks the
419 * THR (and sets the LOCK bit in the SR): even though the DMA controller
420 * is triggered by the TXRDY bit to write the next data into the THR,
421 * this data actually won't go on the I2C bus hence a second NACK is not
422 * generated.
423 */
424 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
425 at91_disable_twi_interrupts(dev);
426 complete(&dev->cmd_complete);
427 } else if (irqstatus & AT91_TWI_TXRDY) {
428 at91_twi_write_next_byte(dev);
429 }
430
431 /* catch error flags */
432 dev->transfer_status |= status;
433
434 return IRQ_HANDLED;
435 }
436
at91_do_twi_transfer(struct at91_twi_dev * dev)437 static int at91_do_twi_transfer(struct at91_twi_dev *dev)
438 {
439 int ret;
440 unsigned long time_left;
441 bool has_unre_flag = dev->pdata->has_unre_flag;
442 bool has_alt_cmd = dev->pdata->has_alt_cmd;
443
444 /*
445 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
446 * read flag but shows the state of the transmission at the time the
447 * Status Register is read. According to the programmer datasheet,
448 * TXCOMP is set when both holding register and internal shifter are
449 * empty and STOP condition has been sent.
450 * Consequently, we should enable NACK interrupt rather than TXCOMP to
451 * detect transmission failure.
452 * Indeed let's take the case of an i2c write command using DMA.
453 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
454 * TXCOMP bits are set together into the Status Register.
455 * LOCK is a clear on write bit, which is set to prevent the DMA
456 * controller from sending new data on the i2c bus after a NACK
457 * condition has happened. Once locked, this i2c peripheral stops
458 * triggering the DMA controller for new data but it is more than
459 * likely that a new DMA transaction is already in progress, writing
460 * into the Transmit Holding Register. Since the peripheral is locked,
461 * these new data won't be sent to the i2c bus but they will remain
462 * into the Transmit Holding Register, so TXCOMP bit is cleared.
463 * Then when the interrupt handler is called, the Status Register is
464 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
465 * manage the error properly, without waiting for timeout.
466 * This case can be reproduced easyly when writing into an at24 eeprom.
467 *
468 * Besides, the TXCOMP bit is already set before the i2c transaction
469 * has been started. For read transactions, this bit is cleared when
470 * writing the START bit into the Control Register. So the
471 * corresponding interrupt can safely be enabled just after.
472 * However for write transactions managed by the CPU, we first write
473 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
474 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
475 * the interrupt handler would be called immediately and the i2c command
476 * would be reported as completed.
477 * Also when a write transaction is managed by the DMA controller,
478 * enabling the TXCOMP interrupt in this function may lead to a race
479 * condition since we don't know whether the TXCOMP interrupt is enabled
480 * before or after the DMA has started to write into THR. So the TXCOMP
481 * interrupt is enabled later by at91_twi_write_data_dma_callback().
482 * Immediately after in that DMA callback, if the alternative command
483 * mode is not used, we still need to send the STOP condition manually
484 * writing the corresponding bit into the Control Register.
485 */
486
487 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
488 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
489
490 reinit_completion(&dev->cmd_complete);
491 dev->transfer_status = 0;
492
493 /* Clear pending interrupts, such as NACK. */
494 at91_twi_read(dev, AT91_TWI_SR);
495
496 if (dev->fifo_size) {
497 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
498
499 /* Reset FIFO mode register */
500 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
501 AT91_TWI_FMR_RXRDYM_MASK);
502 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
503 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
504 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
505
506 /* Flush FIFOs */
507 at91_twi_write(dev, AT91_TWI_CR,
508 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
509 }
510
511 if (!dev->buf_len) {
512 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
513 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
514 } else if (dev->msg->flags & I2C_M_RD) {
515 unsigned start_flags = AT91_TWI_START;
516
517 /* if only one byte is to be read, immediately stop transfer */
518 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
519 !(dev->msg->flags & I2C_M_RECV_LEN))
520 start_flags |= AT91_TWI_STOP;
521 at91_twi_write(dev, AT91_TWI_CR, start_flags);
522 /*
523 * When using dma without alternative command mode, the last
524 * byte has to be read manually in order to not send the stop
525 * command too late and then to receive extra data.
526 * In practice, there are some issues if you use the dma to
527 * read n-1 bytes because of latency.
528 * Reading n-2 bytes with dma and the two last ones manually
529 * seems to be the best solution.
530 */
531 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
532 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
533 at91_twi_read_data_dma(dev);
534 } else {
535 at91_twi_write(dev, AT91_TWI_IER,
536 AT91_TWI_TXCOMP |
537 AT91_TWI_NACK |
538 AT91_TWI_RXRDY);
539 }
540 } else {
541 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
542 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
543 at91_twi_write_data_dma(dev);
544 } else {
545 at91_twi_write_next_byte(dev);
546 at91_twi_write(dev, AT91_TWI_IER,
547 AT91_TWI_TXCOMP | AT91_TWI_NACK |
548 (dev->buf_len ? AT91_TWI_TXRDY : 0));
549 }
550 }
551
552 time_left = wait_for_completion_timeout(&dev->cmd_complete,
553 dev->adapter.timeout);
554 if (time_left == 0) {
555 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
556 dev_err(dev->dev, "controller timed out\n");
557 at91_init_twi_bus(dev);
558 ret = -ETIMEDOUT;
559 goto error;
560 }
561 if (dev->transfer_status & AT91_TWI_NACK) {
562 dev_dbg(dev->dev, "received nack\n");
563 ret = -EREMOTEIO;
564 goto error;
565 }
566 if (dev->transfer_status & AT91_TWI_OVRE) {
567 dev_err(dev->dev, "overrun while reading\n");
568 ret = -EIO;
569 goto error;
570 }
571 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
572 dev_err(dev->dev, "underrun while writing\n");
573 ret = -EIO;
574 goto error;
575 }
576 if ((has_alt_cmd || dev->fifo_size) &&
577 (dev->transfer_status & AT91_TWI_LOCK)) {
578 dev_err(dev->dev, "tx locked\n");
579 ret = -EIO;
580 goto error;
581 }
582 if (dev->recv_len_abort) {
583 dev_err(dev->dev, "invalid smbus block length recvd\n");
584 ret = -EPROTO;
585 goto error;
586 }
587
588 dev_dbg(dev->dev, "transfer complete\n");
589
590 return 0;
591
592 error:
593 /* first stop DMA transfer if still in progress */
594 at91_twi_dma_cleanup(dev);
595 /* then flush THR/FIFO and unlock TX if locked */
596 if ((has_alt_cmd || dev->fifo_size) &&
597 (dev->transfer_status & AT91_TWI_LOCK)) {
598 dev_dbg(dev->dev, "unlock tx\n");
599 at91_twi_write(dev, AT91_TWI_CR,
600 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
601 }
602 return ret;
603 }
604
at91_twi_xfer(struct i2c_adapter * adap,struct i2c_msg * msg,int num)605 static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
606 {
607 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
608 int ret;
609 unsigned int_addr_flag = 0;
610 struct i2c_msg *m_start = msg;
611 bool is_read;
612
613 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
614
615 ret = pm_runtime_get_sync(dev->dev);
616 if (ret < 0)
617 goto out;
618
619 if (num == 2) {
620 int internal_address = 0;
621 int i;
622
623 /* 1st msg is put into the internal address, start with 2nd */
624 m_start = &msg[1];
625 for (i = 0; i < msg->len; ++i) {
626 const unsigned addr = msg->buf[msg->len - 1 - i];
627
628 internal_address |= addr << (8 * i);
629 int_addr_flag += AT91_TWI_IADRSZ_1;
630 }
631 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
632 }
633
634 dev->use_alt_cmd = false;
635 is_read = (m_start->flags & I2C_M_RD);
636 if (dev->pdata->has_alt_cmd) {
637 if (m_start->len > 0 &&
638 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
639 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
640 at91_twi_write(dev, AT91_TWI_ACR,
641 AT91_TWI_ACR_DATAL(m_start->len) |
642 ((is_read) ? AT91_TWI_ACR_DIR : 0));
643 dev->use_alt_cmd = true;
644 } else {
645 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
646 }
647 }
648
649 at91_twi_write(dev, AT91_TWI_MMR,
650 (m_start->addr << 16) |
651 int_addr_flag |
652 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
653
654 dev->buf_len = m_start->len;
655 dev->buf = m_start->buf;
656 dev->msg = m_start;
657 dev->recv_len_abort = false;
658
659 ret = at91_do_twi_transfer(dev);
660
661 ret = (ret < 0) ? ret : num;
662 out:
663 pm_runtime_mark_last_busy(dev->dev);
664 pm_runtime_put_autosuspend(dev->dev);
665
666 return ret;
667 }
668
669 /*
670 * The hardware can handle at most two messages concatenated by a
671 * repeated start via it's internal address feature.
672 */
673 static const struct i2c_adapter_quirks at91_twi_quirks = {
674 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
675 .max_comb_1st_msg_len = 3,
676 };
677
at91_twi_func(struct i2c_adapter * adapter)678 static u32 at91_twi_func(struct i2c_adapter *adapter)
679 {
680 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
681 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
682 }
683
684 static const struct i2c_algorithm at91_twi_algorithm = {
685 .master_xfer = at91_twi_xfer,
686 .functionality = at91_twi_func,
687 };
688
at91_twi_configure_dma(struct at91_twi_dev * dev,u32 phy_addr)689 static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
690 {
691 int ret = 0;
692 struct dma_slave_config slave_config;
693 struct at91_twi_dma *dma = &dev->dma;
694 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
695
696 /*
697 * The actual width of the access will be chosen in
698 * dmaengine_prep_slave_sg():
699 * for each buffer in the scatter-gather list, if its size is aligned
700 * to addr_width then addr_width accesses will be performed to transfer
701 * the buffer. On the other hand, if the buffer size is not aligned to
702 * addr_width then the buffer is transferred using single byte accesses.
703 * Please refer to the Atmel eXtended DMA controller driver.
704 * When FIFOs are used, the TXRDYM threshold can always be set to
705 * trigger the XDMAC when at least 4 data can be written into the TX
706 * FIFO, even if single byte accesses are performed.
707 * However the RXRDYM threshold must be set to fit the access width,
708 * deduced from buffer length, so the XDMAC is triggered properly to
709 * read data from the RX FIFO.
710 */
711 if (dev->fifo_size)
712 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
713
714 memset(&slave_config, 0, sizeof(slave_config));
715 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
716 slave_config.src_addr_width = addr_width;
717 slave_config.src_maxburst = 1;
718 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
719 slave_config.dst_addr_width = addr_width;
720 slave_config.dst_maxburst = 1;
721 slave_config.device_fc = false;
722
723 dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
724 if (IS_ERR(dma->chan_tx)) {
725 ret = PTR_ERR(dma->chan_tx);
726 dma->chan_tx = NULL;
727 goto error;
728 }
729
730 dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
731 if (IS_ERR(dma->chan_rx)) {
732 ret = PTR_ERR(dma->chan_rx);
733 dma->chan_rx = NULL;
734 goto error;
735 }
736
737 slave_config.direction = DMA_MEM_TO_DEV;
738 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
739 dev_err(dev->dev, "failed to configure tx channel\n");
740 ret = -EINVAL;
741 goto error;
742 }
743
744 slave_config.direction = DMA_DEV_TO_MEM;
745 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
746 dev_err(dev->dev, "failed to configure rx channel\n");
747 ret = -EINVAL;
748 goto error;
749 }
750
751 sg_init_table(dma->sg, 2);
752 dma->buf_mapped = false;
753 dma->xfer_in_progress = false;
754 dev->use_dma = true;
755
756 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
757 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
758
759 return ret;
760
761 error:
762 if (ret != -EPROBE_DEFER)
763 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
764 if (dma->chan_rx)
765 dma_release_channel(dma->chan_rx);
766 if (dma->chan_tx)
767 dma_release_channel(dma->chan_tx);
768 return ret;
769 }
770
at91_twi_probe_master(struct platform_device * pdev,u32 phy_addr,struct at91_twi_dev * dev)771 int at91_twi_probe_master(struct platform_device *pdev,
772 u32 phy_addr, struct at91_twi_dev *dev)
773 {
774 int rc;
775
776 init_completion(&dev->cmd_complete);
777
778 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
779 dev_name(dev->dev), dev);
780 if (rc) {
781 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
782 return rc;
783 }
784
785 if (dev->dev->of_node) {
786 rc = at91_twi_configure_dma(dev, phy_addr);
787 if (rc == -EPROBE_DEFER)
788 return rc;
789 }
790
791 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
792 &dev->fifo_size)) {
793 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
794 }
795
796 at91_calc_twi_clock(dev);
797
798 dev->adapter.algo = &at91_twi_algorithm;
799 dev->adapter.quirks = &at91_twi_quirks;
800
801 return 0;
802 }
803