1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Freescale GPMI NAND Flash Driver
4 *
5 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 */
8 #include <linux/delay.h>
9 #include <linux/clk.h>
10 #include <linux/slab.h>
11
12 #include "gpmi-nand.h"
13 #include "gpmi-regs.h"
14 #include "bch-regs.h"
15
16 /* Converts time to clock cycles */
17 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
18
19 #define MXS_SET_ADDR 0x4
20 #define MXS_CLR_ADDR 0x8
21 /*
22 * Clear the bit and poll it cleared. This is usually called with
23 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
24 * (bit 30).
25 */
clear_poll_bit(void __iomem * addr,u32 mask)26 static int clear_poll_bit(void __iomem *addr, u32 mask)
27 {
28 int timeout = 0x400;
29
30 /* clear the bit */
31 writel(mask, addr + MXS_CLR_ADDR);
32
33 /*
34 * SFTRST needs 3 GPMI clocks to settle, the reference manual
35 * recommends to wait 1us.
36 */
37 udelay(1);
38
39 /* poll the bit becoming clear */
40 while ((readl(addr) & mask) && --timeout)
41 /* nothing */;
42
43 return !timeout;
44 }
45
46 #define MODULE_CLKGATE (1 << 30)
47 #define MODULE_SFTRST (1 << 31)
48 /*
49 * The current mxs_reset_block() will do two things:
50 * [1] enable the module.
51 * [2] reset the module.
52 *
53 * In most of the cases, it's ok.
54 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
55 * If you try to soft reset the BCH block, it becomes unusable until
56 * the next hard reset. This case occurs in the NAND boot mode. When the board
57 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
58 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
59 * You will see a DMA timeout in this case. The bug has been fixed
60 * in the following chips, such as MX28.
61 *
62 * To avoid this bug, just add a new parameter `just_enable` for
63 * the mxs_reset_block(), and rewrite it here.
64 */
gpmi_reset_block(void __iomem * reset_addr,bool just_enable)65 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
66 {
67 int ret;
68 int timeout = 0x400;
69
70 /* clear and poll SFTRST */
71 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
72 if (unlikely(ret))
73 goto error;
74
75 /* clear CLKGATE */
76 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
77
78 if (!just_enable) {
79 /* set SFTRST to reset the block */
80 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
81 udelay(1);
82
83 /* poll CLKGATE becoming set */
84 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
85 /* nothing */;
86 if (unlikely(!timeout))
87 goto error;
88 }
89
90 /* clear and poll SFTRST */
91 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
92 if (unlikely(ret))
93 goto error;
94
95 /* clear and poll CLKGATE */
96 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
97 if (unlikely(ret))
98 goto error;
99
100 return 0;
101
102 error:
103 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
104 return -ETIMEDOUT;
105 }
106
__gpmi_enable_clk(struct gpmi_nand_data * this,bool v)107 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
108 {
109 struct clk *clk;
110 int ret;
111 int i;
112
113 for (i = 0; i < GPMI_CLK_MAX; i++) {
114 clk = this->resources.clock[i];
115 if (!clk)
116 break;
117
118 if (v) {
119 ret = clk_prepare_enable(clk);
120 if (ret)
121 goto err_clk;
122 } else {
123 clk_disable_unprepare(clk);
124 }
125 }
126 return 0;
127
128 err_clk:
129 for (; i > 0; i--)
130 clk_disable_unprepare(this->resources.clock[i - 1]);
131 return ret;
132 }
133
gpmi_enable_clk(struct gpmi_nand_data * this)134 int gpmi_enable_clk(struct gpmi_nand_data *this)
135 {
136 return __gpmi_enable_clk(this, true);
137 }
138
gpmi_disable_clk(struct gpmi_nand_data * this)139 int gpmi_disable_clk(struct gpmi_nand_data *this)
140 {
141 return __gpmi_enable_clk(this, false);
142 }
143
gpmi_init(struct gpmi_nand_data * this)144 int gpmi_init(struct gpmi_nand_data *this)
145 {
146 struct resources *r = &this->resources;
147 int ret;
148
149 ret = gpmi_enable_clk(this);
150 if (ret)
151 return ret;
152 ret = gpmi_reset_block(r->gpmi_regs, false);
153 if (ret)
154 goto err_out;
155
156 /*
157 * Reset BCH here, too. We got failures otherwise :(
158 * See later BCH reset for explanation of MX23 handling
159 */
160 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
161 if (ret)
162 goto err_out;
163
164 /* Choose NAND mode. */
165 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
166
167 /* Set the IRQ polarity. */
168 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
169 r->gpmi_regs + HW_GPMI_CTRL1_SET);
170
171 /* Disable Write-Protection. */
172 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
173
174 /* Select BCH ECC. */
175 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
176
177 /*
178 * Decouple the chip select from dma channel. We use dma0 for all
179 * the chips.
180 */
181 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
182
183 gpmi_disable_clk(this);
184 return 0;
185 err_out:
186 gpmi_disable_clk(this);
187 return ret;
188 }
189
190 /* This function is very useful. It is called only when the bug occur. */
gpmi_dump_info(struct gpmi_nand_data * this)191 void gpmi_dump_info(struct gpmi_nand_data *this)
192 {
193 struct resources *r = &this->resources;
194 struct bch_geometry *geo = &this->bch_geometry;
195 u32 reg;
196 int i;
197
198 dev_err(this->dev, "Show GPMI registers :\n");
199 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
200 reg = readl(r->gpmi_regs + i * 0x10);
201 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
202 }
203
204 /* start to print out the BCH info */
205 dev_err(this->dev, "Show BCH registers :\n");
206 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
207 reg = readl(r->bch_regs + i * 0x10);
208 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
209 }
210 dev_err(this->dev, "BCH Geometry :\n"
211 "GF length : %u\n"
212 "ECC Strength : %u\n"
213 "Page Size in Bytes : %u\n"
214 "Metadata Size in Bytes : %u\n"
215 "ECC Chunk Size in Bytes: %u\n"
216 "ECC Chunk Count : %u\n"
217 "Payload Size in Bytes : %u\n"
218 "Auxiliary Size in Bytes: %u\n"
219 "Auxiliary Status Offset: %u\n"
220 "Block Mark Byte Offset : %u\n"
221 "Block Mark Bit Offset : %u\n",
222 geo->gf_len,
223 geo->ecc_strength,
224 geo->page_size,
225 geo->metadata_size,
226 geo->ecc_chunk_size,
227 geo->ecc_chunk_count,
228 geo->payload_size,
229 geo->auxiliary_size,
230 geo->auxiliary_status_offset,
231 geo->block_mark_byte_offset,
232 geo->block_mark_bit_offset);
233 }
234
235 /* Configures the geometry for BCH. */
bch_set_geometry(struct gpmi_nand_data * this)236 int bch_set_geometry(struct gpmi_nand_data *this)
237 {
238 struct resources *r = &this->resources;
239 struct bch_geometry *bch_geo = &this->bch_geometry;
240 unsigned int block_count;
241 unsigned int block_size;
242 unsigned int metadata_size;
243 unsigned int ecc_strength;
244 unsigned int page_size;
245 unsigned int gf_len;
246 int ret;
247
248 ret = common_nfc_set_geometry(this);
249 if (ret)
250 return ret;
251
252 block_count = bch_geo->ecc_chunk_count - 1;
253 block_size = bch_geo->ecc_chunk_size;
254 metadata_size = bch_geo->metadata_size;
255 ecc_strength = bch_geo->ecc_strength >> 1;
256 page_size = bch_geo->page_size;
257 gf_len = bch_geo->gf_len;
258
259 ret = gpmi_enable_clk(this);
260 if (ret)
261 return ret;
262
263 /*
264 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
265 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
266 * On the other hand, the MX28 needs the reset, because one case has been
267 * seen where the BCH produced ECC errors constantly after 10000
268 * consecutive reboots. The latter case has not been seen on the MX23
269 * yet, still we don't know if it could happen there as well.
270 */
271 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
272 if (ret)
273 goto err_out;
274
275 /* Configure layout 0. */
276 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
277 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
278 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
279 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
280 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
281 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
282
283 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
284 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
285 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
286 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
287 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
288
289 /* Set *all* chip selects to use layout 0. */
290 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
291
292 /* Enable interrupts. */
293 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
294 r->bch_regs + HW_BCH_CTRL_SET);
295
296 gpmi_disable_clk(this);
297 return 0;
298 err_out:
299 gpmi_disable_clk(this);
300 return ret;
301 }
302
303 /*
304 * <1> Firstly, we should know what's the GPMI-clock means.
305 * The GPMI-clock is the internal clock in the gpmi nand controller.
306 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
307 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
308 *
309 * <2> Secondly, we should know what's the frequency on the nand chip pins.
310 * The frequency on the nand chip pins is derived from the GPMI-clock.
311 * We can get it from the following equation:
312 *
313 * F = G / (DS + DH)
314 *
315 * F : the frequency on the nand chip pins.
316 * G : the GPMI clock, such as 100MHz.
317 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
318 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
319 *
320 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
321 * the nand EDO(extended Data Out) timing could be applied.
322 * The GPMI implements a feedback read strobe to sample the read data.
323 * The feedback read strobe can be delayed to support the nand EDO timing
324 * where the read strobe may deasserts before the read data is valid, and
325 * read data is valid for some time after read strobe.
326 *
327 * The following figure illustrates some aspects of a NAND Flash read:
328 *
329 * |<---tREA---->|
330 * | |
331 * | | |
332 * |<--tRP-->| |
333 * | | |
334 * __ ___|__________________________________
335 * RDN \________/ |
336 * |
337 * /---------\
338 * Read Data --------------< >---------
339 * \---------/
340 * | |
341 * |<-D->|
342 * FeedbackRDN ________ ____________
343 * \___________/
344 *
345 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
346 *
347 *
348 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
349 *
350 * 4.1) From the aspect of the nand chip pins:
351 * Delay = (tREA + C - tRP) {1}
352 *
353 * tREA : the maximum read access time.
354 * C : a constant to adjust the delay. default is 4000ps.
355 * tRP : the read pulse width, which is exactly:
356 * tRP = (GPMI-clock-period) * DATA_SETUP
357 *
358 * 4.2) From the aspect of the GPMI nand controller:
359 * Delay = RDN_DELAY * 0.125 * RP {2}
360 *
361 * RP : the DLL reference period.
362 * if (GPMI-clock-period > DLL_THRETHOLD)
363 * RP = GPMI-clock-period / 2;
364 * else
365 * RP = GPMI-clock-period;
366 *
367 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
368 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
369 * is 16000ps, but in mx6q, we use 12000ps.
370 *
371 * 4.3) since {1} equals {2}, we get:
372 *
373 * (tREA + 4000 - tRP) * 8
374 * RDN_DELAY = ----------------------- {3}
375 * RP
376 */
gpmi_nfc_compute_timings(struct gpmi_nand_data * this,const struct nand_sdr_timings * sdr)377 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
378 const struct nand_sdr_timings *sdr)
379 {
380 struct gpmi_nfc_hardware_timing *hw = &this->hw;
381 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
382 unsigned int period_ps, reference_period_ps;
383 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
384 unsigned int tRP_ps;
385 bool use_half_period;
386 int sample_delay_ps, sample_delay_factor;
387 u16 busy_timeout_cycles;
388 u8 wrn_dly_sel;
389
390 if (sdr->tRC_min >= 30000) {
391 /* ONFI non-EDO modes [0-3] */
392 hw->clk_rate = 22000000;
393 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
394 } else if (sdr->tRC_min >= 25000) {
395 /* ONFI EDO mode 4 */
396 hw->clk_rate = 80000000;
397 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
398 } else {
399 /* ONFI EDO mode 5 */
400 hw->clk_rate = 100000000;
401 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
402 }
403
404 /* SDR core timings are given in picoseconds */
405 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
406
407 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
408 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
409 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
410 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
411
412 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
413 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
414 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
415 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
416
417 /*
418 * Derive NFC ideal delay from {3}:
419 *
420 * (tREA + 4000 - tRP) * 8
421 * RDN_DELAY = -----------------------
422 * RP
423 */
424 if (period_ps > dll_threshold_ps) {
425 use_half_period = true;
426 reference_period_ps = period_ps / 2;
427 } else {
428 use_half_period = false;
429 reference_period_ps = period_ps;
430 }
431
432 tRP_ps = data_setup_cycles * period_ps;
433 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
434 if (sample_delay_ps > 0)
435 sample_delay_factor = sample_delay_ps / reference_period_ps;
436 else
437 sample_delay_factor = 0;
438
439 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
440 if (sample_delay_factor)
441 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
442 BM_GPMI_CTRL1_DLL_ENABLE |
443 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
444 }
445
gpmi_nfc_apply_timings(struct gpmi_nand_data * this)446 void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
447 {
448 struct gpmi_nfc_hardware_timing *hw = &this->hw;
449 struct resources *r = &this->resources;
450 void __iomem *gpmi_regs = r->gpmi_regs;
451 unsigned int dll_wait_time_us;
452
453 clk_set_rate(r->clock[0], hw->clk_rate);
454
455 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
456 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
457
458 /*
459 * Clear several CTRL1 fields, DLL must be disabled when setting
460 * RDN_DELAY or HALF_PERIOD.
461 */
462 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
463 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
464
465 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
466 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
467 if (!dll_wait_time_us)
468 dll_wait_time_us = 1;
469
470 /* Wait for the DLL to settle. */
471 udelay(dll_wait_time_us);
472 }
473
gpmi_setup_data_interface(struct mtd_info * mtd,int chipnr,const struct nand_data_interface * conf)474 int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
475 const struct nand_data_interface *conf)
476 {
477 struct nand_chip *chip = mtd_to_nand(mtd);
478 struct gpmi_nand_data *this = nand_get_controller_data(chip);
479 const struct nand_sdr_timings *sdr;
480
481 /* Retrieve required NAND timings */
482 sdr = nand_get_sdr_timings(conf);
483 if (IS_ERR(sdr))
484 return PTR_ERR(sdr);
485
486 /* Only MX6 GPMI controller can reach EDO timings */
487 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
488 return -ENOTSUPP;
489
490 /* Stop here if this call was just a check */
491 if (chipnr < 0)
492 return 0;
493
494 /* Do the actual derivation of the controller timings */
495 gpmi_nfc_compute_timings(this, sdr);
496
497 this->hw.must_apply_timings = true;
498
499 return 0;
500 }
501
502 /* Clears a BCH interrupt. */
gpmi_clear_bch(struct gpmi_nand_data * this)503 void gpmi_clear_bch(struct gpmi_nand_data *this)
504 {
505 struct resources *r = &this->resources;
506 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
507 }
508
509 /* Returns the Ready/Busy status of the given chip. */
gpmi_is_ready(struct gpmi_nand_data * this,unsigned chip)510 int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
511 {
512 struct resources *r = &this->resources;
513 uint32_t mask = 0;
514 uint32_t reg = 0;
515
516 if (GPMI_IS_MX23(this)) {
517 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
518 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
519 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
520 /*
521 * In the imx6, all the ready/busy pins are bound
522 * together. So we only need to check chip 0.
523 */
524 if (GPMI_IS_MX6(this))
525 chip = 0;
526
527 /* MX28 shares the same R/B register as MX6Q. */
528 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
529 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
530 } else
531 dev_err(this->dev, "unknown arch.\n");
532 return reg & mask;
533 }
534
gpmi_send_command(struct gpmi_nand_data * this)535 int gpmi_send_command(struct gpmi_nand_data *this)
536 {
537 struct dma_chan *channel = get_dma_chan(this);
538 struct dma_async_tx_descriptor *desc;
539 struct scatterlist *sgl;
540 int chip = this->current_chip;
541 int ret;
542 u32 pio[3];
543
544 /* [1] send out the PIO words */
545 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
546 | BM_GPMI_CTRL0_WORD_LENGTH
547 | BF_GPMI_CTRL0_CS(chip, this)
548 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
549 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
550 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
551 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
552 pio[1] = pio[2] = 0;
553 desc = dmaengine_prep_slave_sg(channel,
554 (struct scatterlist *)pio,
555 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
556 if (!desc)
557 return -EINVAL;
558
559 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
560 sgl = &this->cmd_sgl;
561
562 sg_init_one(sgl, this->cmd_buffer, this->command_length);
563 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
564 desc = dmaengine_prep_slave_sg(channel,
565 sgl, 1, DMA_MEM_TO_DEV,
566 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
567 if (!desc)
568 return -EINVAL;
569
570 /* [3] submit the DMA */
571 ret = start_dma_without_bch_irq(this, desc);
572
573 dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
574
575 return ret;
576 }
577
gpmi_send_data(struct gpmi_nand_data * this,const void * buf,int len)578 int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
579 {
580 struct dma_async_tx_descriptor *desc;
581 struct dma_chan *channel = get_dma_chan(this);
582 int chip = this->current_chip;
583 int ret;
584 uint32_t command_mode;
585 uint32_t address;
586 u32 pio[2];
587
588 /* [1] PIO */
589 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
590 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
591
592 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
593 | BM_GPMI_CTRL0_WORD_LENGTH
594 | BF_GPMI_CTRL0_CS(chip, this)
595 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
596 | BF_GPMI_CTRL0_ADDRESS(address)
597 | BF_GPMI_CTRL0_XFER_COUNT(len);
598 pio[1] = 0;
599 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
600 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
601 if (!desc)
602 return -EINVAL;
603
604 /* [2] send DMA request */
605 prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
606 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
607 1, DMA_MEM_TO_DEV,
608 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
609 if (!desc)
610 return -EINVAL;
611
612 /* [3] submit the DMA */
613 ret = start_dma_without_bch_irq(this, desc);
614
615 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
616
617 return ret;
618 }
619
gpmi_read_data(struct gpmi_nand_data * this,void * buf,int len)620 int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
621 {
622 struct dma_async_tx_descriptor *desc;
623 struct dma_chan *channel = get_dma_chan(this);
624 int chip = this->current_chip;
625 int ret;
626 u32 pio[2];
627 bool direct;
628
629 /* [1] : send PIO */
630 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
631 | BM_GPMI_CTRL0_WORD_LENGTH
632 | BF_GPMI_CTRL0_CS(chip, this)
633 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
634 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
635 | BF_GPMI_CTRL0_XFER_COUNT(len);
636 pio[1] = 0;
637 desc = dmaengine_prep_slave_sg(channel,
638 (struct scatterlist *)pio,
639 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
640 if (!desc)
641 return -EINVAL;
642
643 /* [2] : send DMA request */
644 direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
645 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
646 1, DMA_DEV_TO_MEM,
647 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
648 if (!desc)
649 return -EINVAL;
650
651 /* [3] : submit the DMA */
652
653 ret = start_dma_without_bch_irq(this, desc);
654
655 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
656 if (!direct)
657 memcpy(buf, this->data_buffer_dma, len);
658
659 return ret;
660 }
661
gpmi_send_page(struct gpmi_nand_data * this,dma_addr_t payload,dma_addr_t auxiliary)662 int gpmi_send_page(struct gpmi_nand_data *this,
663 dma_addr_t payload, dma_addr_t auxiliary)
664 {
665 struct bch_geometry *geo = &this->bch_geometry;
666 uint32_t command_mode;
667 uint32_t address;
668 uint32_t ecc_command;
669 uint32_t buffer_mask;
670 struct dma_async_tx_descriptor *desc;
671 struct dma_chan *channel = get_dma_chan(this);
672 int chip = this->current_chip;
673 u32 pio[6];
674
675 /* A DMA descriptor that does an ECC page read. */
676 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
677 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
678 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
679 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
680 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
681
682 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
683 | BM_GPMI_CTRL0_WORD_LENGTH
684 | BF_GPMI_CTRL0_CS(chip, this)
685 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
686 | BF_GPMI_CTRL0_ADDRESS(address)
687 | BF_GPMI_CTRL0_XFER_COUNT(0);
688 pio[1] = 0;
689 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
690 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
691 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
692 pio[3] = geo->page_size;
693 pio[4] = payload;
694 pio[5] = auxiliary;
695
696 desc = dmaengine_prep_slave_sg(channel,
697 (struct scatterlist *)pio,
698 ARRAY_SIZE(pio), DMA_TRANS_NONE,
699 DMA_CTRL_ACK);
700 if (!desc)
701 return -EINVAL;
702
703 return start_dma_with_bch_irq(this, desc);
704 }
705
gpmi_read_page(struct gpmi_nand_data * this,dma_addr_t payload,dma_addr_t auxiliary)706 int gpmi_read_page(struct gpmi_nand_data *this,
707 dma_addr_t payload, dma_addr_t auxiliary)
708 {
709 struct bch_geometry *geo = &this->bch_geometry;
710 uint32_t command_mode;
711 uint32_t address;
712 uint32_t ecc_command;
713 uint32_t buffer_mask;
714 struct dma_async_tx_descriptor *desc;
715 struct dma_chan *channel = get_dma_chan(this);
716 int chip = this->current_chip;
717 u32 pio[6];
718
719 /* [1] Wait for the chip to report ready. */
720 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
721 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
722
723 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
724 | BM_GPMI_CTRL0_WORD_LENGTH
725 | BF_GPMI_CTRL0_CS(chip, this)
726 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
727 | BF_GPMI_CTRL0_ADDRESS(address)
728 | BF_GPMI_CTRL0_XFER_COUNT(0);
729 pio[1] = 0;
730 desc = dmaengine_prep_slave_sg(channel,
731 (struct scatterlist *)pio, 2,
732 DMA_TRANS_NONE, 0);
733 if (!desc)
734 return -EINVAL;
735
736 /* [2] Enable the BCH block and read. */
737 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
738 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
739 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
740 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
741 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
742
743 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
744 | BM_GPMI_CTRL0_WORD_LENGTH
745 | BF_GPMI_CTRL0_CS(chip, this)
746 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
747 | BF_GPMI_CTRL0_ADDRESS(address)
748 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
749
750 pio[1] = 0;
751 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
752 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
753 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
754 pio[3] = geo->page_size;
755 pio[4] = payload;
756 pio[5] = auxiliary;
757 desc = dmaengine_prep_slave_sg(channel,
758 (struct scatterlist *)pio,
759 ARRAY_SIZE(pio), DMA_TRANS_NONE,
760 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
761 if (!desc)
762 return -EINVAL;
763
764 /* [3] Disable the BCH block */
765 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
766 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
767
768 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
769 | BM_GPMI_CTRL0_WORD_LENGTH
770 | BF_GPMI_CTRL0_CS(chip, this)
771 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
772 | BF_GPMI_CTRL0_ADDRESS(address)
773 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
774 pio[1] = 0;
775 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
776 desc = dmaengine_prep_slave_sg(channel,
777 (struct scatterlist *)pio, 3,
778 DMA_TRANS_NONE,
779 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
780 if (!desc)
781 return -EINVAL;
782
783 /* [4] submit the DMA */
784 return start_dma_with_bch_irq(this, desc);
785 }
786
787 /**
788 * gpmi_copy_bits - copy bits from one memory region to another
789 * @dst: destination buffer
790 * @dst_bit_off: bit offset we're starting to write at
791 * @src: source buffer
792 * @src_bit_off: bit offset we're starting to read from
793 * @nbits: number of bits to copy
794 *
795 * This functions copies bits from one memory region to another, and is used by
796 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
797 * aligned.
798 *
799 * src and dst should not overlap.
800 *
801 */
gpmi_copy_bits(u8 * dst,size_t dst_bit_off,const u8 * src,size_t src_bit_off,size_t nbits)802 void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
803 const u8 *src, size_t src_bit_off,
804 size_t nbits)
805 {
806 size_t i;
807 size_t nbytes;
808 u32 src_buffer = 0;
809 size_t bits_in_src_buffer = 0;
810
811 if (!nbits)
812 return;
813
814 /*
815 * Move src and dst pointers to the closest byte pointer and store bit
816 * offsets within a byte.
817 */
818 src += src_bit_off / 8;
819 src_bit_off %= 8;
820
821 dst += dst_bit_off / 8;
822 dst_bit_off %= 8;
823
824 /*
825 * Initialize the src_buffer value with bits available in the first
826 * byte of data so that we end up with a byte aligned src pointer.
827 */
828 if (src_bit_off) {
829 src_buffer = src[0] >> src_bit_off;
830 if (nbits >= (8 - src_bit_off)) {
831 bits_in_src_buffer += 8 - src_bit_off;
832 } else {
833 src_buffer &= GENMASK(nbits - 1, 0);
834 bits_in_src_buffer += nbits;
835 }
836 nbits -= bits_in_src_buffer;
837 src++;
838 }
839
840 /* Calculate the number of bytes that can be copied from src to dst. */
841 nbytes = nbits / 8;
842
843 /* Try to align dst to a byte boundary. */
844 if (dst_bit_off) {
845 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
846 src_buffer |= src[0] << bits_in_src_buffer;
847 bits_in_src_buffer += 8;
848 src++;
849 nbytes--;
850 }
851
852 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
853 dst[0] &= GENMASK(dst_bit_off - 1, 0);
854 dst[0] |= src_buffer << dst_bit_off;
855 src_buffer >>= (8 - dst_bit_off);
856 bits_in_src_buffer -= (8 - dst_bit_off);
857 dst_bit_off = 0;
858 dst++;
859 if (bits_in_src_buffer > 7) {
860 bits_in_src_buffer -= 8;
861 dst[0] = src_buffer;
862 dst++;
863 src_buffer >>= 8;
864 }
865 }
866 }
867
868 if (!bits_in_src_buffer && !dst_bit_off) {
869 /*
870 * Both src and dst pointers are byte aligned, thus we can
871 * just use the optimized memcpy function.
872 */
873 if (nbytes)
874 memcpy(dst, src, nbytes);
875 } else {
876 /*
877 * src buffer is not byte aligned, hence we have to copy each
878 * src byte to the src_buffer variable before extracting a byte
879 * to store in dst.
880 */
881 for (i = 0; i < nbytes; i++) {
882 src_buffer |= src[i] << bits_in_src_buffer;
883 dst[i] = src_buffer;
884 src_buffer >>= 8;
885 }
886 }
887 /* Update dst and src pointers */
888 dst += nbytes;
889 src += nbytes;
890
891 /*
892 * nbits is the number of remaining bits. It should not exceed 8 as
893 * we've already copied as much bytes as possible.
894 */
895 nbits %= 8;
896
897 /*
898 * If there's no more bits to copy to the destination and src buffer
899 * was already byte aligned, then we're done.
900 */
901 if (!nbits && !bits_in_src_buffer)
902 return;
903
904 /* Copy the remaining bits to src_buffer */
905 if (nbits)
906 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
907 bits_in_src_buffer;
908 bits_in_src_buffer += nbits;
909
910 /*
911 * In case there were not enough bits to get a byte aligned dst buffer
912 * prepare the src_buffer variable to match the dst organization (shift
913 * src_buffer by dst_bit_off and retrieve the least significant bits
914 * from dst).
915 */
916 if (dst_bit_off)
917 src_buffer = (src_buffer << dst_bit_off) |
918 (*dst & GENMASK(dst_bit_off - 1, 0));
919 bits_in_src_buffer += dst_bit_off;
920
921 /*
922 * Keep most significant bits from dst if we end up with an unaligned
923 * number of bits.
924 */
925 nbytes = bits_in_src_buffer / 8;
926 if (bits_in_src_buffer % 8) {
927 src_buffer |= (dst[nbytes] &
928 GENMASK(7, bits_in_src_buffer % 8)) <<
929 (nbytes * 8);
930 nbytes++;
931 }
932
933 /* Copy the remaining bytes to dst */
934 for (i = 0; i < nbytes; i++) {
935 dst[i] = src_buffer;
936 src_buffer >>= 8;
937 }
938 }
939