1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OneNAND driver for OMAP2 / OMAP3
4 *
5 * Copyright © 2005-2006 Nokia Corporation
6 *
7 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
8 * IRQ and DMA support written by Timo Teras
9 */
10
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/onenand.h>
15 #include <linux/mtd/partitions.h>
16 #include <linux/of_device.h>
17 #include <linux/omap-gpmc.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dmaengine.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/gpio/consumer.h>
26
27 #include <asm/mach/flash.h>
28
29 #define DRIVER_NAME "omap2-onenand"
30
31 #define ONENAND_BUFRAM_SIZE (1024 * 5)
32
33 struct omap2_onenand {
34 struct platform_device *pdev;
35 int gpmc_cs;
36 unsigned long phys_base;
37 struct gpio_desc *int_gpiod;
38 struct mtd_info mtd;
39 struct onenand_chip onenand;
40 struct completion irq_done;
41 struct completion dma_done;
42 struct dma_chan *dma_chan;
43 };
44
omap2_onenand_dma_complete_func(void * completion)45 static void omap2_onenand_dma_complete_func(void *completion)
46 {
47 complete(completion);
48 }
49
omap2_onenand_interrupt(int irq,void * dev_id)50 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
51 {
52 struct omap2_onenand *c = dev_id;
53
54 complete(&c->irq_done);
55
56 return IRQ_HANDLED;
57 }
58
read_reg(struct omap2_onenand * c,int reg)59 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
60 {
61 return readw(c->onenand.base + reg);
62 }
63
write_reg(struct omap2_onenand * c,unsigned short value,int reg)64 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
65 int reg)
66 {
67 writew(value, c->onenand.base + reg);
68 }
69
omap2_onenand_set_cfg(struct omap2_onenand * c,bool sr,bool sw,int latency,int burst_len)70 static int omap2_onenand_set_cfg(struct omap2_onenand *c,
71 bool sr, bool sw,
72 int latency, int burst_len)
73 {
74 unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
75
76 reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
77
78 switch (burst_len) {
79 case 0: /* continuous */
80 break;
81 case 4:
82 reg |= ONENAND_SYS_CFG1_BL_4;
83 break;
84 case 8:
85 reg |= ONENAND_SYS_CFG1_BL_8;
86 break;
87 case 16:
88 reg |= ONENAND_SYS_CFG1_BL_16;
89 break;
90 case 32:
91 reg |= ONENAND_SYS_CFG1_BL_32;
92 break;
93 default:
94 return -EINVAL;
95 }
96
97 if (latency > 5)
98 reg |= ONENAND_SYS_CFG1_HF;
99 if (latency > 7)
100 reg |= ONENAND_SYS_CFG1_VHF;
101 if (sr)
102 reg |= ONENAND_SYS_CFG1_SYNC_READ;
103 if (sw)
104 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
105
106 write_reg(c, reg, ONENAND_REG_SYS_CFG1);
107
108 return 0;
109 }
110
omap2_onenand_get_freq(int ver)111 static int omap2_onenand_get_freq(int ver)
112 {
113 switch ((ver >> 4) & 0xf) {
114 case 0:
115 return 40;
116 case 1:
117 return 54;
118 case 2:
119 return 66;
120 case 3:
121 return 83;
122 case 4:
123 return 104;
124 }
125
126 return -EINVAL;
127 }
128
wait_err(char * msg,int state,unsigned int ctrl,unsigned int intr)129 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
130 {
131 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
132 msg, state, ctrl, intr);
133 }
134
wait_warn(char * msg,int state,unsigned int ctrl,unsigned int intr)135 static void wait_warn(char *msg, int state, unsigned int ctrl,
136 unsigned int intr)
137 {
138 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
139 "intr 0x%04x\n", msg, state, ctrl, intr);
140 }
141
omap2_onenand_wait(struct mtd_info * mtd,int state)142 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
143 {
144 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
145 struct onenand_chip *this = mtd->priv;
146 unsigned int intr = 0;
147 unsigned int ctrl, ctrl_mask;
148 unsigned long timeout;
149 u32 syscfg;
150
151 if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
152 state == FL_VERIFYING_ERASE) {
153 int i = 21;
154 unsigned int intr_flags = ONENAND_INT_MASTER;
155
156 switch (state) {
157 case FL_RESETTING:
158 intr_flags |= ONENAND_INT_RESET;
159 break;
160 case FL_PREPARING_ERASE:
161 intr_flags |= ONENAND_INT_ERASE;
162 break;
163 case FL_VERIFYING_ERASE:
164 i = 101;
165 break;
166 }
167
168 while (--i) {
169 udelay(1);
170 intr = read_reg(c, ONENAND_REG_INTERRUPT);
171 if (intr & ONENAND_INT_MASTER)
172 break;
173 }
174 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
175 if (ctrl & ONENAND_CTRL_ERROR) {
176 wait_err("controller error", state, ctrl, intr);
177 return -EIO;
178 }
179 if ((intr & intr_flags) == intr_flags)
180 return 0;
181 /* Continue in wait for interrupt branch */
182 }
183
184 if (state != FL_READING) {
185 int result;
186
187 /* Turn interrupts on */
188 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
189 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
190 syscfg |= ONENAND_SYS_CFG1_IOBE;
191 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
192 /* Add a delay to let GPIO settle */
193 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
194 }
195
196 reinit_completion(&c->irq_done);
197 result = gpiod_get_value(c->int_gpiod);
198 if (result < 0) {
199 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
200 intr = read_reg(c, ONENAND_REG_INTERRUPT);
201 wait_err("gpio error", state, ctrl, intr);
202 return result;
203 } else if (result == 0) {
204 int retry_cnt = 0;
205 retry:
206 if (!wait_for_completion_io_timeout(&c->irq_done,
207 msecs_to_jiffies(20))) {
208 /* Timeout after 20ms */
209 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
210 if (ctrl & ONENAND_CTRL_ONGO &&
211 !this->ongoing) {
212 /*
213 * The operation seems to be still going
214 * so give it some more time.
215 */
216 retry_cnt += 1;
217 if (retry_cnt < 3)
218 goto retry;
219 intr = read_reg(c,
220 ONENAND_REG_INTERRUPT);
221 wait_err("timeout", state, ctrl, intr);
222 return -EIO;
223 }
224 intr = read_reg(c, ONENAND_REG_INTERRUPT);
225 if ((intr & ONENAND_INT_MASTER) == 0)
226 wait_warn("timeout", state, ctrl, intr);
227 }
228 }
229 } else {
230 int retry_cnt = 0;
231
232 /* Turn interrupts off */
233 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
234 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
235 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
236
237 timeout = jiffies + msecs_to_jiffies(20);
238 while (1) {
239 if (time_before(jiffies, timeout)) {
240 intr = read_reg(c, ONENAND_REG_INTERRUPT);
241 if (intr & ONENAND_INT_MASTER)
242 break;
243 } else {
244 /* Timeout after 20ms */
245 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
246 if (ctrl & ONENAND_CTRL_ONGO) {
247 /*
248 * The operation seems to be still going
249 * so give it some more time.
250 */
251 retry_cnt += 1;
252 if (retry_cnt < 3) {
253 timeout = jiffies +
254 msecs_to_jiffies(20);
255 continue;
256 }
257 }
258 break;
259 }
260 }
261 }
262
263 intr = read_reg(c, ONENAND_REG_INTERRUPT);
264 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
265
266 if (intr & ONENAND_INT_READ) {
267 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
268
269 if (ecc) {
270 unsigned int addr1, addr8;
271
272 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
273 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
274 if (ecc & ONENAND_ECC_2BIT_ALL) {
275 printk(KERN_ERR "onenand_wait: ECC error = "
276 "0x%04x, addr1 %#x, addr8 %#x\n",
277 ecc, addr1, addr8);
278 mtd->ecc_stats.failed++;
279 return -EBADMSG;
280 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
281 printk(KERN_NOTICE "onenand_wait: correctable "
282 "ECC error = 0x%04x, addr1 %#x, "
283 "addr8 %#x\n", ecc, addr1, addr8);
284 mtd->ecc_stats.corrected++;
285 }
286 }
287 } else if (state == FL_READING) {
288 wait_err("timeout", state, ctrl, intr);
289 return -EIO;
290 }
291
292 if (ctrl & ONENAND_CTRL_ERROR) {
293 wait_err("controller error", state, ctrl, intr);
294 if (ctrl & ONENAND_CTRL_LOCK)
295 printk(KERN_ERR "onenand_wait: "
296 "Device is write protected!!!\n");
297 return -EIO;
298 }
299
300 ctrl_mask = 0xFE9F;
301 if (this->ongoing)
302 ctrl_mask &= ~0x8000;
303
304 if (ctrl & ctrl_mask)
305 wait_warn("unexpected controller status", state, ctrl, intr);
306
307 return 0;
308 }
309
omap2_onenand_bufferram_offset(struct mtd_info * mtd,int area)310 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
311 {
312 struct onenand_chip *this = mtd->priv;
313
314 if (ONENAND_CURRENT_BUFFERRAM(this)) {
315 if (area == ONENAND_DATARAM)
316 return this->writesize;
317 if (area == ONENAND_SPARERAM)
318 return mtd->oobsize;
319 }
320
321 return 0;
322 }
323
omap2_onenand_dma_transfer(struct omap2_onenand * c,dma_addr_t src,dma_addr_t dst,size_t count)324 static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
325 dma_addr_t src, dma_addr_t dst,
326 size_t count)
327 {
328 struct dma_async_tx_descriptor *tx;
329 dma_cookie_t cookie;
330
331 tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
332 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
333 if (!tx) {
334 dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
335 return -EIO;
336 }
337
338 reinit_completion(&c->dma_done);
339
340 tx->callback = omap2_onenand_dma_complete_func;
341 tx->callback_param = &c->dma_done;
342
343 cookie = tx->tx_submit(tx);
344 if (dma_submit_error(cookie)) {
345 dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
346 return -EIO;
347 }
348
349 dma_async_issue_pending(c->dma_chan);
350
351 if (!wait_for_completion_io_timeout(&c->dma_done,
352 msecs_to_jiffies(20))) {
353 dmaengine_terminate_sync(c->dma_chan);
354 return -ETIMEDOUT;
355 }
356
357 return 0;
358 }
359
omap2_onenand_read_bufferram(struct mtd_info * mtd,int area,unsigned char * buffer,int offset,size_t count)360 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
361 unsigned char *buffer, int offset,
362 size_t count)
363 {
364 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
365 struct onenand_chip *this = mtd->priv;
366 struct device *dev = &c->pdev->dev;
367 void *buf = (void *)buffer;
368 dma_addr_t dma_src, dma_dst;
369 int bram_offset, err;
370 size_t xtra;
371
372 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
373 /*
374 * If the buffer address is not DMA-able, len is not long enough to
375 * make DMA transfers profitable or if invoked from panic_write()
376 * fallback to PIO mode.
377 */
378 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
379 count < 384 || mtd->oops_panic_write)
380 goto out_copy;
381
382 xtra = count & 3;
383 if (xtra) {
384 count -= xtra;
385 memcpy(buf + count, this->base + bram_offset + count, xtra);
386 }
387
388 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
389 dma_src = c->phys_base + bram_offset;
390
391 if (dma_mapping_error(dev, dma_dst)) {
392 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
393 goto out_copy;
394 }
395
396 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
397 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
398 if (!err)
399 return 0;
400
401 dev_err(dev, "timeout waiting for DMA\n");
402
403 out_copy:
404 memcpy(buf, this->base + bram_offset, count);
405 return 0;
406 }
407
omap2_onenand_write_bufferram(struct mtd_info * mtd,int area,const unsigned char * buffer,int offset,size_t count)408 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
409 const unsigned char *buffer,
410 int offset, size_t count)
411 {
412 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
413 struct onenand_chip *this = mtd->priv;
414 struct device *dev = &c->pdev->dev;
415 void *buf = (void *)buffer;
416 dma_addr_t dma_src, dma_dst;
417 int bram_offset, err;
418
419 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
420 /*
421 * If the buffer address is not DMA-able, len is not long enough to
422 * make DMA transfers profitable or if invoked from panic_write()
423 * fallback to PIO mode.
424 */
425 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
426 count < 384 || mtd->oops_panic_write)
427 goto out_copy;
428
429 dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
430 dma_dst = c->phys_base + bram_offset;
431 if (dma_mapping_error(dev, dma_src)) {
432 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
433 goto out_copy;
434 }
435
436 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
437 dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
438 if (!err)
439 return 0;
440
441 dev_err(dev, "timeout waiting for DMA\n");
442
443 out_copy:
444 memcpy(this->base + bram_offset, buf, count);
445 return 0;
446 }
447
omap2_onenand_shutdown(struct platform_device * pdev)448 static void omap2_onenand_shutdown(struct platform_device *pdev)
449 {
450 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
451
452 /* With certain content in the buffer RAM, the OMAP boot ROM code
453 * can recognize the flash chip incorrectly. Zero it out before
454 * soft reset.
455 */
456 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
457 }
458
omap2_onenand_probe(struct platform_device * pdev)459 static int omap2_onenand_probe(struct platform_device *pdev)
460 {
461 u32 val;
462 dma_cap_mask_t mask;
463 int freq, latency, r;
464 struct resource *res;
465 struct omap2_onenand *c;
466 struct gpmc_onenand_info info;
467 struct device *dev = &pdev->dev;
468 struct device_node *np = dev->of_node;
469
470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
471 if (!res) {
472 dev_err(dev, "error getting memory resource\n");
473 return -EINVAL;
474 }
475
476 r = of_property_read_u32(np, "reg", &val);
477 if (r) {
478 dev_err(dev, "reg not found in DT\n");
479 return r;
480 }
481
482 c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
483 if (!c)
484 return -ENOMEM;
485
486 init_completion(&c->irq_done);
487 init_completion(&c->dma_done);
488 c->gpmc_cs = val;
489 c->phys_base = res->start;
490
491 c->onenand.base = devm_ioremap_resource(dev, res);
492 if (IS_ERR(c->onenand.base))
493 return PTR_ERR(c->onenand.base);
494
495 c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
496 if (IS_ERR(c->int_gpiod)) {
497 /* Just try again if this happens */
498 return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
499 }
500
501 if (c->int_gpiod) {
502 r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
503 omap2_onenand_interrupt,
504 IRQF_TRIGGER_RISING, "onenand", c);
505 if (r)
506 return r;
507
508 c->onenand.wait = omap2_onenand_wait;
509 }
510
511 dma_cap_zero(mask);
512 dma_cap_set(DMA_MEMCPY, mask);
513
514 c->dma_chan = dma_request_channel(mask, NULL, NULL);
515 if (c->dma_chan) {
516 c->onenand.read_bufferram = omap2_onenand_read_bufferram;
517 c->onenand.write_bufferram = omap2_onenand_write_bufferram;
518 }
519
520 c->pdev = pdev;
521 c->mtd.priv = &c->onenand;
522 c->mtd.dev.parent = dev;
523 mtd_set_of_node(&c->mtd, dev->of_node);
524
525 dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
526 c->gpmc_cs, c->phys_base, c->onenand.base,
527 c->dma_chan ? "DMA" : "PIO");
528
529 r = onenand_scan(&c->mtd, 1);
530 if (r < 0)
531 goto err_release_dma;
532
533 freq = omap2_onenand_get_freq(c->onenand.version_id);
534 if (freq > 0) {
535 switch (freq) {
536 case 104:
537 latency = 7;
538 break;
539 case 83:
540 latency = 6;
541 break;
542 case 66:
543 latency = 5;
544 break;
545 case 56:
546 latency = 4;
547 break;
548 default: /* 40 MHz or lower */
549 latency = 3;
550 break;
551 }
552
553 r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
554 freq, latency, &info);
555 if (r)
556 goto err_release_onenand;
557
558 r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
559 latency, info.burst_len);
560 if (r)
561 goto err_release_onenand;
562
563 if (info.sync_read || info.sync_write)
564 dev_info(dev, "optimized timings for %d MHz\n", freq);
565 }
566
567 r = mtd_device_register(&c->mtd, NULL, 0);
568 if (r)
569 goto err_release_onenand;
570
571 platform_set_drvdata(pdev, c);
572
573 return 0;
574
575 err_release_onenand:
576 onenand_release(&c->mtd);
577 err_release_dma:
578 if (c->dma_chan)
579 dma_release_channel(c->dma_chan);
580
581 return r;
582 }
583
omap2_onenand_remove(struct platform_device * pdev)584 static int omap2_onenand_remove(struct platform_device *pdev)
585 {
586 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
587
588 onenand_release(&c->mtd);
589 if (c->dma_chan)
590 dma_release_channel(c->dma_chan);
591 omap2_onenand_shutdown(pdev);
592
593 return 0;
594 }
595
596 static const struct of_device_id omap2_onenand_id_table[] = {
597 { .compatible = "ti,omap2-onenand", },
598 {},
599 };
600 MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
601
602 static struct platform_driver omap2_onenand_driver = {
603 .probe = omap2_onenand_probe,
604 .remove = omap2_onenand_remove,
605 .shutdown = omap2_onenand_shutdown,
606 .driver = {
607 .name = DRIVER_NAME,
608 .of_match_table = omap2_onenand_id_table,
609 },
610 };
611
612 module_platform_driver(omap2_onenand_driver);
613
614 MODULE_ALIAS("platform:" DRIVER_NAME);
615 MODULE_LICENSE("GPL");
616 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
617 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
618