1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4 * Steven J. Hill <sjhill@realitydiluted.com>
5 * Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Info:
8 * Contains standard defines and IDs for NAND flash devices
9 *
10 * Changelog:
11 * See git changelog.
12 */
13 #ifndef __LINUX_MTD_RAWNAND_H
14 #define __LINUX_MTD_RAWNAND_H
15
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/mtd/flashchip.h>
19 #include <linux/mtd/bbm.h>
20 #include <linux/mtd/jedec.h>
21 #include <linux/mtd/onfi.h>
22 #include <linux/mutex.h>
23 #include <linux/of.h>
24 #include <linux/types.h>
25
26 struct nand_chip;
27 struct gpio_desc;
28
29 /* The maximum number of NAND chips in an array */
30 #define NAND_MAX_CHIPS 8
31
32 /*
33 * Constants for hardware specific CLE/ALE/NCE function
34 *
35 * These are bits which can be or'ed to set/clear multiple
36 * bits in one go.
37 */
38 /* Select the chip by setting nCE to low */
39 #define NAND_NCE 0x01
40 /* Select the command latch by setting CLE to high */
41 #define NAND_CLE 0x02
42 /* Select the address latch by setting ALE to high */
43 #define NAND_ALE 0x04
44
45 #define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
46 #define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
47 #define NAND_CTRL_CHANGE 0x80
48
49 /*
50 * Standard NAND flash commands
51 */
52 #define NAND_CMD_READ0 0
53 #define NAND_CMD_READ1 1
54 #define NAND_CMD_RNDOUT 5
55 #define NAND_CMD_PAGEPROG 0x10
56 #define NAND_CMD_READOOB 0x50
57 #define NAND_CMD_ERASE1 0x60
58 #define NAND_CMD_STATUS 0x70
59 #define NAND_CMD_SEQIN 0x80
60 #define NAND_CMD_RNDIN 0x85
61 #define NAND_CMD_READID 0x90
62 #define NAND_CMD_ERASE2 0xd0
63 #define NAND_CMD_PARAM 0xec
64 #define NAND_CMD_GET_FEATURES 0xee
65 #define NAND_CMD_SET_FEATURES 0xef
66 #define NAND_CMD_RESET 0xff
67
68 /* Extended commands for large page devices */
69 #define NAND_CMD_READSTART 0x30
70 #define NAND_CMD_READCACHESEQ 0x31
71 #define NAND_CMD_READCACHEEND 0x3f
72 #define NAND_CMD_RNDOUTSTART 0xE0
73 #define NAND_CMD_CACHEDPROG 0x15
74
75 #define NAND_CMD_NONE -1
76
77 /* Status bits */
78 #define NAND_STATUS_FAIL 0x01
79 #define NAND_STATUS_FAIL_N1 0x02
80 #define NAND_STATUS_TRUE_READY 0x20
81 #define NAND_STATUS_READY 0x40
82 #define NAND_STATUS_WP 0x80
83
84 #define NAND_DATA_IFACE_CHECK_ONLY -1
85
86 /*
87 * Constants for Hardware ECC
88 */
89 /* Reset Hardware ECC for read */
90 #define NAND_ECC_READ 0
91 /* Reset Hardware ECC for write */
92 #define NAND_ECC_WRITE 1
93 /* Enable Hardware ECC before syndrome is read back from flash */
94 #define NAND_ECC_READSYN 2
95
96 /*
97 * Enable generic NAND 'page erased' check. This check is only done when
98 * ecc.correct() returns -EBADMSG.
99 * Set this flag if your implementation does not fix bitflips in erased
100 * pages and you want to rely on the default implementation.
101 */
102 #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
103
104 /*
105 * Option constants for bizarre disfunctionality and real
106 * features.
107 */
108
109 /* Buswidth is 16 bit */
110 #define NAND_BUSWIDTH_16 BIT(1)
111
112 /*
113 * When using software implementation of Hamming, we can specify which byte
114 * ordering should be used.
115 */
116 #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
117
118 /* Chip has cache program function */
119 #define NAND_CACHEPRG BIT(3)
120 /* Options valid for Samsung large page devices */
121 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
122
123 /*
124 * Chip requires ready check on read (for auto-incremented sequential read).
125 * True only for small page devices; large page devices do not support
126 * autoincrement.
127 */
128 #define NAND_NEED_READRDY BIT(8)
129
130 /* Chip does not allow subpage writes */
131 #define NAND_NO_SUBPAGE_WRITE BIT(9)
132
133 /* Device is one of 'new' xD cards that expose fake nand command set */
134 #define NAND_BROKEN_XD BIT(10)
135
136 /* Device behaves just like nand, but is readonly */
137 #define NAND_ROM BIT(11)
138
139 /* Device supports subpage reads */
140 #define NAND_SUBPAGE_READ BIT(12)
141 /* Macros to identify the above */
142 #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
143
144 /*
145 * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
146 * patterns.
147 */
148 #define NAND_NEED_SCRAMBLING BIT(13)
149
150 /* Device needs 3rd row address cycle */
151 #define NAND_ROW_ADDR_3 BIT(14)
152
153 /* Non chip related options */
154 /* This option skips the bbt scan during initialization. */
155 #define NAND_SKIP_BBTSCAN BIT(16)
156 /* Chip may not exist, so silence any errors in scan */
157 #define NAND_SCAN_SILENT_NODEV BIT(18)
158
159 /*
160 * Autodetect nand buswidth with readid/onfi.
161 * This suppose the driver will configure the hardware in 8 bits mode
162 * when calling nand_scan_ident, and update its configuration
163 * before calling nand_scan_tail.
164 */
165 #define NAND_BUSWIDTH_AUTO BIT(19)
166
167 /*
168 * This option could be defined by controller drivers to protect against
169 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
170 */
171 #define NAND_USES_DMA BIT(20)
172
173 /*
174 * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
175 * on the default ->cmdfunc() implementation, you may want to let the core
176 * handle the tCCS delay which is required when a column change (RNDIN or
177 * RNDOUT) is requested.
178 * If your controller already takes care of this delay, you don't need to set
179 * this flag.
180 */
181 #define NAND_WAIT_TCCS BIT(21)
182
183 /*
184 * Whether the NAND chip is a boot medium. Drivers might use this information
185 * to select ECC algorithms supported by the boot ROM or similar restrictions.
186 */
187 #define NAND_IS_BOOT_MEDIUM BIT(22)
188
189 /*
190 * Do not try to tweak the timings at runtime. This is needed when the
191 * controller initializes the timings on itself or when it relies on
192 * configuration done by the bootloader.
193 */
194 #define NAND_KEEP_TIMINGS BIT(23)
195
196 /*
197 * There are different places where the manufacturer stores the factory bad
198 * block markers.
199 *
200 * Position within the block: Each of these pages needs to be checked for a
201 * bad block marking pattern.
202 */
203 #define NAND_BBM_FIRSTPAGE BIT(24)
204 #define NAND_BBM_SECONDPAGE BIT(25)
205 #define NAND_BBM_LASTPAGE BIT(26)
206
207 /*
208 * Some controllers with pipelined ECC engines override the BBM marker with
209 * data or ECC bytes, thus making bad block detection through bad block marker
210 * impossible. Let's flag those chips so the core knows it shouldn't check the
211 * BBM and consider all blocks good.
212 */
213 #define NAND_NO_BBM_QUIRK BIT(27)
214
215 /* Cell info constants */
216 #define NAND_CI_CHIPNR_MSK 0x03
217 #define NAND_CI_CELLTYPE_MSK 0x0C
218 #define NAND_CI_CELLTYPE_SHIFT 2
219
220 /* Position within the OOB data of the page */
221 #define NAND_BBM_POS_SMALL 5
222 #define NAND_BBM_POS_LARGE 0
223
224 /**
225 * struct nand_parameters - NAND generic parameters from the parameter page
226 * @model: Model name
227 * @supports_set_get_features: The NAND chip supports setting/getting features
228 * @supports_read_cache: The NAND chip supports read cache operations
229 * @set_feature_list: Bitmap of features that can be set
230 * @get_feature_list: Bitmap of features that can be get
231 * @onfi: ONFI specific parameters
232 */
233 struct nand_parameters {
234 /* Generic parameters */
235 const char *model;
236 bool supports_set_get_features;
237 bool supports_read_cache;
238 DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
239 DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
240
241 /* ONFI parameters */
242 struct onfi_params *onfi;
243 };
244
245 /* The maximum expected count of bytes in the NAND ID sequence */
246 #define NAND_MAX_ID_LEN 8
247
248 /**
249 * struct nand_id - NAND id structure
250 * @data: buffer containing the id bytes.
251 * @len: ID length.
252 */
253 struct nand_id {
254 u8 data[NAND_MAX_ID_LEN];
255 int len;
256 };
257
258 /**
259 * struct nand_ecc_step_info - ECC step information of ECC engine
260 * @stepsize: data bytes per ECC step
261 * @strengths: array of supported strengths
262 * @nstrengths: number of supported strengths
263 */
264 struct nand_ecc_step_info {
265 int stepsize;
266 const int *strengths;
267 int nstrengths;
268 };
269
270 /**
271 * struct nand_ecc_caps - capability of ECC engine
272 * @stepinfos: array of ECC step information
273 * @nstepinfos: number of ECC step information
274 * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
275 */
276 struct nand_ecc_caps {
277 const struct nand_ecc_step_info *stepinfos;
278 int nstepinfos;
279 int (*calc_ecc_bytes)(int step_size, int strength);
280 };
281
282 /* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
283 #define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
284 static const int __name##_strengths[] = { __VA_ARGS__ }; \
285 static const struct nand_ecc_step_info __name##_stepinfo = { \
286 .stepsize = __step, \
287 .strengths = __name##_strengths, \
288 .nstrengths = ARRAY_SIZE(__name##_strengths), \
289 }; \
290 static const struct nand_ecc_caps __name = { \
291 .stepinfos = &__name##_stepinfo, \
292 .nstepinfos = 1, \
293 .calc_ecc_bytes = __calc, \
294 }
295
296 /**
297 * struct nand_ecc_ctrl - Control structure for ECC
298 * @engine_type: ECC engine type
299 * @placement: OOB bytes placement
300 * @algo: ECC algorithm
301 * @steps: number of ECC steps per page
302 * @size: data bytes per ECC step
303 * @bytes: ECC bytes per step
304 * @strength: max number of correctible bits per ECC step
305 * @total: total number of ECC bytes per page
306 * @prepad: padding information for syndrome based ECC generators
307 * @postpad: padding information for syndrome based ECC generators
308 * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
309 * @calc_buf: buffer for calculated ECC, size is oobsize.
310 * @code_buf: buffer for ECC read from flash, size is oobsize.
311 * @hwctl: function to control hardware ECC generator. Must only
312 * be provided if an hardware ECC is available
313 * @calculate: function for ECC calculation or readback from ECC hardware
314 * @correct: function for ECC correction, matching to ECC generator (sw/hw).
315 * Should return a positive number representing the number of
316 * corrected bitflips, -EBADMSG if the number of bitflips exceed
317 * ECC strength, or any other error code if the error is not
318 * directly related to correction.
319 * If -EBADMSG is returned the input buffers should be left
320 * untouched.
321 * @read_page_raw: function to read a raw page without ECC. This function
322 * should hide the specific layout used by the ECC
323 * controller and always return contiguous in-band and
324 * out-of-band data even if they're not stored
325 * contiguously on the NAND chip (e.g.
326 * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
327 * out-of-band data).
328 * @write_page_raw: function to write a raw page without ECC. This function
329 * should hide the specific layout used by the ECC
330 * controller and consider the passed data as contiguous
331 * in-band and out-of-band data. ECC controller is
332 * responsible for doing the appropriate transformations
333 * to adapt to its specific layout (e.g.
334 * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
335 * out-of-band data).
336 * @read_page: function to read a page according to the ECC generator
337 * requirements; returns maximum number of bitflips corrected in
338 * any single ECC step, -EIO hw error
339 * @read_subpage: function to read parts of the page covered by ECC;
340 * returns same as read_page()
341 * @write_subpage: function to write parts of the page covered by ECC.
342 * @write_page: function to write a page according to the ECC generator
343 * requirements.
344 * @write_oob_raw: function to write chip OOB data without ECC
345 * @read_oob_raw: function to read chip OOB data without ECC
346 * @read_oob: function to read chip OOB data
347 * @write_oob: function to write chip OOB data
348 */
349 struct nand_ecc_ctrl {
350 enum nand_ecc_engine_type engine_type;
351 enum nand_ecc_placement placement;
352 enum nand_ecc_algo algo;
353 int steps;
354 int size;
355 int bytes;
356 int total;
357 int strength;
358 int prepad;
359 int postpad;
360 unsigned int options;
361 u8 *calc_buf;
362 u8 *code_buf;
363 void (*hwctl)(struct nand_chip *chip, int mode);
364 int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
365 uint8_t *ecc_code);
366 int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
367 uint8_t *calc_ecc);
368 int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
369 int oob_required, int page);
370 int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
371 int oob_required, int page);
372 int (*read_page)(struct nand_chip *chip, uint8_t *buf,
373 int oob_required, int page);
374 int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
375 uint32_t len, uint8_t *buf, int page);
376 int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
377 uint32_t data_len, const uint8_t *data_buf,
378 int oob_required, int page);
379 int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
380 int oob_required, int page);
381 int (*write_oob_raw)(struct nand_chip *chip, int page);
382 int (*read_oob_raw)(struct nand_chip *chip, int page);
383 int (*read_oob)(struct nand_chip *chip, int page);
384 int (*write_oob)(struct nand_chip *chip, int page);
385 };
386
387 /**
388 * struct nand_sdr_timings - SDR NAND chip timings
389 *
390 * This struct defines the timing requirements of a SDR NAND chip.
391 * These information can be found in every NAND datasheets and the timings
392 * meaning are described in the ONFI specifications:
393 * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
394 * (chapter 4.15 Timing Parameters)
395 *
396 * All these timings are expressed in picoseconds.
397 *
398 * @tBERS_max: Block erase time
399 * @tCCS_min: Change column setup time
400 * @tPROG_max: Page program time
401 * @tR_max: Page read time
402 * @tALH_min: ALE hold time
403 * @tADL_min: ALE to data loading time
404 * @tALS_min: ALE setup time
405 * @tAR_min: ALE to RE# delay
406 * @tCEA_max: CE# access time
407 * @tCEH_min: CE# high hold time
408 * @tCH_min: CE# hold time
409 * @tCHZ_max: CE# high to output hi-Z
410 * @tCLH_min: CLE hold time
411 * @tCLR_min: CLE to RE# delay
412 * @tCLS_min: CLE setup time
413 * @tCOH_min: CE# high to output hold
414 * @tCS_min: CE# setup time
415 * @tDH_min: Data hold time
416 * @tDS_min: Data setup time
417 * @tFEAT_max: Busy time for Set Features and Get Features
418 * @tIR_min: Output hi-Z to RE# low
419 * @tITC_max: Interface and Timing Mode Change time
420 * @tRC_min: RE# cycle time
421 * @tREA_max: RE# access time
422 * @tREH_min: RE# high hold time
423 * @tRHOH_min: RE# high to output hold
424 * @tRHW_min: RE# high to WE# low
425 * @tRHZ_max: RE# high to output hi-Z
426 * @tRLOH_min: RE# low to output hold
427 * @tRP_min: RE# pulse width
428 * @tRR_min: Ready to RE# low (data only)
429 * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
430 * rising edge of R/B#.
431 * @tWB_max: WE# high to SR[6] low
432 * @tWC_min: WE# cycle time
433 * @tWH_min: WE# high hold time
434 * @tWHR_min: WE# high to RE# low
435 * @tWP_min: WE# pulse width
436 * @tWW_min: WP# transition to WE# low
437 */
438 struct nand_sdr_timings {
439 u64 tBERS_max;
440 u32 tCCS_min;
441 u64 tPROG_max;
442 u64 tR_max;
443 u32 tALH_min;
444 u32 tADL_min;
445 u32 tALS_min;
446 u32 tAR_min;
447 u32 tCEA_max;
448 u32 tCEH_min;
449 u32 tCH_min;
450 u32 tCHZ_max;
451 u32 tCLH_min;
452 u32 tCLR_min;
453 u32 tCLS_min;
454 u32 tCOH_min;
455 u32 tCS_min;
456 u32 tDH_min;
457 u32 tDS_min;
458 u32 tFEAT_max;
459 u32 tIR_min;
460 u32 tITC_max;
461 u32 tRC_min;
462 u32 tREA_max;
463 u32 tREH_min;
464 u32 tRHOH_min;
465 u32 tRHW_min;
466 u32 tRHZ_max;
467 u32 tRLOH_min;
468 u32 tRP_min;
469 u32 tRR_min;
470 u64 tRST_max;
471 u32 tWB_max;
472 u32 tWC_min;
473 u32 tWH_min;
474 u32 tWHR_min;
475 u32 tWP_min;
476 u32 tWW_min;
477 };
478
479 /**
480 * struct nand_nvddr_timings - NV-DDR NAND chip timings
481 *
482 * This struct defines the timing requirements of a NV-DDR NAND data interface.
483 * These information can be found in every NAND datasheets and the timings
484 * meaning are described in the ONFI specifications:
485 * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
486 * (chapter 4.18.2 NV-DDR)
487 *
488 * All these timings are expressed in picoseconds.
489 *
490 * @tBERS_max: Block erase time
491 * @tCCS_min: Change column setup time
492 * @tPROG_max: Page program time
493 * @tR_max: Page read time
494 * @tAC_min: Access window of DQ[7:0] from CLK
495 * @tAC_max: Access window of DQ[7:0] from CLK
496 * @tADL_min: ALE to data loading time
497 * @tCAD_min: Command, Address, Data delay
498 * @tCAH_min: Command/Address DQ hold time
499 * @tCALH_min: W/R_n, CLE and ALE hold time
500 * @tCALS_min: W/R_n, CLE and ALE setup time
501 * @tCAS_min: Command/address DQ setup time
502 * @tCEH_min: CE# high hold time
503 * @tCH_min: CE# hold time
504 * @tCK_min: Average clock cycle time
505 * @tCS_min: CE# setup time
506 * @tDH_min: Data hold time
507 * @tDQSCK_min: Start of the access window of DQS from CLK
508 * @tDQSCK_max: End of the access window of DQS from CLK
509 * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
510 * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
511 * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
512 * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
513 * @tDS_min: Data setup time
514 * @tDSC_min: DQS cycle time
515 * @tFEAT_max: Busy time for Set Features and Get Features
516 * @tITC_max: Interface and Timing Mode Change time
517 * @tQHS_max: Data hold skew factor
518 * @tRHW_min: Data output cycle to command, address, or data input cycle
519 * @tRR_min: Ready to RE# low (data only)
520 * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
521 * rising edge of R/B#.
522 * @tWB_max: WE# high to SR[6] low
523 * @tWHR_min: WE# high to RE# low
524 * @tWRCK_min: W/R_n low to data output cycle
525 * @tWW_min: WP# transition to WE# low
526 */
527 struct nand_nvddr_timings {
528 u64 tBERS_max;
529 u32 tCCS_min;
530 u64 tPROG_max;
531 u64 tR_max;
532 u32 tAC_min;
533 u32 tAC_max;
534 u32 tADL_min;
535 u32 tCAD_min;
536 u32 tCAH_min;
537 u32 tCALH_min;
538 u32 tCALS_min;
539 u32 tCAS_min;
540 u32 tCEH_min;
541 u32 tCH_min;
542 u32 tCK_min;
543 u32 tCS_min;
544 u32 tDH_min;
545 u32 tDQSCK_min;
546 u32 tDQSCK_max;
547 u32 tDQSD_min;
548 u32 tDQSD_max;
549 u32 tDQSHZ_max;
550 u32 tDQSQ_max;
551 u32 tDS_min;
552 u32 tDSC_min;
553 u32 tFEAT_max;
554 u32 tITC_max;
555 u32 tQHS_max;
556 u32 tRHW_min;
557 u32 tRR_min;
558 u32 tRST_max;
559 u32 tWB_max;
560 u32 tWHR_min;
561 u32 tWRCK_min;
562 u32 tWW_min;
563 };
564
565 /*
566 * While timings related to the data interface itself are mostly different
567 * between SDR and NV-DDR, timings related to the internal chip behavior are
568 * common. IOW, the following entries which describe the internal delays have
569 * the same definition and are shared in both SDR and NV-DDR timing structures:
570 * - tADL_min
571 * - tBERS_max
572 * - tCCS_min
573 * - tFEAT_max
574 * - tPROG_max
575 * - tR_max
576 * - tRR_min
577 * - tRST_max
578 * - tWB_max
579 *
580 * The below macros return the value of a given timing, no matter the interface.
581 */
582 #define NAND_COMMON_TIMING_PS(conf, timing_name) \
583 nand_interface_is_sdr(conf) ? \
584 nand_get_sdr_timings(conf)->timing_name : \
585 nand_get_nvddr_timings(conf)->timing_name
586
587 #define NAND_COMMON_TIMING_MS(conf, timing_name) \
588 PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
589
590 #define NAND_COMMON_TIMING_NS(conf, timing_name) \
591 PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
592
593 /**
594 * enum nand_interface_type - NAND interface type
595 * @NAND_SDR_IFACE: Single Data Rate interface
596 * @NAND_NVDDR_IFACE: Double Data Rate interface
597 */
598 enum nand_interface_type {
599 NAND_SDR_IFACE,
600 NAND_NVDDR_IFACE,
601 };
602
603 /**
604 * struct nand_interface_config - NAND interface timing
605 * @type: type of the timing
606 * @timings: The timing information
607 * @timings.mode: Timing mode as defined in the specification
608 * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
609 * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
610 */
611 struct nand_interface_config {
612 enum nand_interface_type type;
613 struct nand_timings {
614 unsigned int mode;
615 union {
616 struct nand_sdr_timings sdr;
617 struct nand_nvddr_timings nvddr;
618 };
619 } timings;
620 };
621
622 /**
623 * nand_interface_is_sdr - get the interface type
624 * @conf: The data interface
625 */
nand_interface_is_sdr(const struct nand_interface_config * conf)626 static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
627 {
628 return conf->type == NAND_SDR_IFACE;
629 }
630
631 /**
632 * nand_interface_is_nvddr - get the interface type
633 * @conf: The data interface
634 */
nand_interface_is_nvddr(const struct nand_interface_config * conf)635 static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
636 {
637 return conf->type == NAND_NVDDR_IFACE;
638 }
639
640 /**
641 * nand_get_sdr_timings - get SDR timing from data interface
642 * @conf: The data interface
643 */
644 static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config * conf)645 nand_get_sdr_timings(const struct nand_interface_config *conf)
646 {
647 if (!nand_interface_is_sdr(conf))
648 return ERR_PTR(-EINVAL);
649
650 return &conf->timings.sdr;
651 }
652
653 /**
654 * nand_get_nvddr_timings - get NV-DDR timing from data interface
655 * @conf: The data interface
656 */
657 static inline const struct nand_nvddr_timings *
nand_get_nvddr_timings(const struct nand_interface_config * conf)658 nand_get_nvddr_timings(const struct nand_interface_config *conf)
659 {
660 if (!nand_interface_is_nvddr(conf))
661 return ERR_PTR(-EINVAL);
662
663 return &conf->timings.nvddr;
664 }
665
666 /**
667 * struct nand_op_cmd_instr - Definition of a command instruction
668 * @opcode: the command to issue in one cycle
669 */
670 struct nand_op_cmd_instr {
671 u8 opcode;
672 };
673
674 /**
675 * struct nand_op_addr_instr - Definition of an address instruction
676 * @naddrs: length of the @addrs array
677 * @addrs: array containing the address cycles to issue
678 */
679 struct nand_op_addr_instr {
680 unsigned int naddrs;
681 const u8 *addrs;
682 };
683
684 /**
685 * struct nand_op_data_instr - Definition of a data instruction
686 * @len: number of data bytes to move
687 * @buf: buffer to fill
688 * @buf.in: buffer to fill when reading from the NAND chip
689 * @buf.out: buffer to read from when writing to the NAND chip
690 * @force_8bit: force 8-bit access
691 *
692 * Please note that "in" and "out" are inverted from the ONFI specification
693 * and are from the controller perspective, so a "in" is a read from the NAND
694 * chip while a "out" is a write to the NAND chip.
695 */
696 struct nand_op_data_instr {
697 unsigned int len;
698 union {
699 void *in;
700 const void *out;
701 } buf;
702 bool force_8bit;
703 };
704
705 /**
706 * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
707 * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
708 */
709 struct nand_op_waitrdy_instr {
710 unsigned int timeout_ms;
711 };
712
713 /**
714 * enum nand_op_instr_type - Definition of all instruction types
715 * @NAND_OP_CMD_INSTR: command instruction
716 * @NAND_OP_ADDR_INSTR: address instruction
717 * @NAND_OP_DATA_IN_INSTR: data in instruction
718 * @NAND_OP_DATA_OUT_INSTR: data out instruction
719 * @NAND_OP_WAITRDY_INSTR: wait ready instruction
720 */
721 enum nand_op_instr_type {
722 NAND_OP_CMD_INSTR,
723 NAND_OP_ADDR_INSTR,
724 NAND_OP_DATA_IN_INSTR,
725 NAND_OP_DATA_OUT_INSTR,
726 NAND_OP_WAITRDY_INSTR,
727 };
728
729 /**
730 * struct nand_op_instr - Instruction object
731 * @type: the instruction type
732 * @ctx: extra data associated to the instruction. You'll have to use the
733 * appropriate element depending on @type
734 * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
735 * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
736 * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
737 * or %NAND_OP_DATA_OUT_INSTR
738 * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
739 * @delay_ns: delay the controller should apply after the instruction has been
740 * issued on the bus. Most modern controllers have internal timings
741 * control logic, and in this case, the controller driver can ignore
742 * this field.
743 */
744 struct nand_op_instr {
745 enum nand_op_instr_type type;
746 union {
747 struct nand_op_cmd_instr cmd;
748 struct nand_op_addr_instr addr;
749 struct nand_op_data_instr data;
750 struct nand_op_waitrdy_instr waitrdy;
751 } ctx;
752 unsigned int delay_ns;
753 };
754
755 /*
756 * Special handling must be done for the WAITRDY timeout parameter as it usually
757 * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
758 * tBERS (during an erase) which all of them are u64 values that cannot be
759 * divided by usual kernel macros and must be handled with the special
760 * DIV_ROUND_UP_ULL() macro.
761 *
762 * Cast to type of dividend is needed here to guarantee that the result won't
763 * be an unsigned long long when the dividend is an unsigned long (or smaller),
764 * which is what the compiler does when it sees ternary operator with 2
765 * different return types (picks the largest type to make sure there's no
766 * loss).
767 */
768 #define __DIVIDE(dividend, divisor) ({ \
769 (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
770 DIV_ROUND_UP(dividend, divisor) : \
771 DIV_ROUND_UP_ULL(dividend, divisor)); \
772 })
773 #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
774 #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
775
776 #define NAND_OP_CMD(id, ns) \
777 { \
778 .type = NAND_OP_CMD_INSTR, \
779 .ctx.cmd.opcode = id, \
780 .delay_ns = ns, \
781 }
782
783 #define NAND_OP_ADDR(ncycles, cycles, ns) \
784 { \
785 .type = NAND_OP_ADDR_INSTR, \
786 .ctx.addr = { \
787 .naddrs = ncycles, \
788 .addrs = cycles, \
789 }, \
790 .delay_ns = ns, \
791 }
792
793 #define NAND_OP_DATA_IN(l, b, ns) \
794 { \
795 .type = NAND_OP_DATA_IN_INSTR, \
796 .ctx.data = { \
797 .len = l, \
798 .buf.in = b, \
799 .force_8bit = false, \
800 }, \
801 .delay_ns = ns, \
802 }
803
804 #define NAND_OP_DATA_OUT(l, b, ns) \
805 { \
806 .type = NAND_OP_DATA_OUT_INSTR, \
807 .ctx.data = { \
808 .len = l, \
809 .buf.out = b, \
810 .force_8bit = false, \
811 }, \
812 .delay_ns = ns, \
813 }
814
815 #define NAND_OP_8BIT_DATA_IN(l, b, ns) \
816 { \
817 .type = NAND_OP_DATA_IN_INSTR, \
818 .ctx.data = { \
819 .len = l, \
820 .buf.in = b, \
821 .force_8bit = true, \
822 }, \
823 .delay_ns = ns, \
824 }
825
826 #define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
827 { \
828 .type = NAND_OP_DATA_OUT_INSTR, \
829 .ctx.data = { \
830 .len = l, \
831 .buf.out = b, \
832 .force_8bit = true, \
833 }, \
834 .delay_ns = ns, \
835 }
836
837 #define NAND_OP_WAIT_RDY(tout_ms, ns) \
838 { \
839 .type = NAND_OP_WAITRDY_INSTR, \
840 .ctx.waitrdy.timeout_ms = tout_ms, \
841 .delay_ns = ns, \
842 }
843
844 /**
845 * struct nand_subop - a sub operation
846 * @cs: the CS line to select for this NAND sub-operation
847 * @instrs: array of instructions
848 * @ninstrs: length of the @instrs array
849 * @first_instr_start_off: offset to start from for the first instruction
850 * of the sub-operation
851 * @last_instr_end_off: offset to end at (excluded) for the last instruction
852 * of the sub-operation
853 *
854 * Both @first_instr_start_off and @last_instr_end_off only apply to data or
855 * address instructions.
856 *
857 * When an operation cannot be handled as is by the NAND controller, it will
858 * be split by the parser into sub-operations which will be passed to the
859 * controller driver.
860 */
861 struct nand_subop {
862 unsigned int cs;
863 const struct nand_op_instr *instrs;
864 unsigned int ninstrs;
865 unsigned int first_instr_start_off;
866 unsigned int last_instr_end_off;
867 };
868
869 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
870 unsigned int op_id);
871 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
872 unsigned int op_id);
873 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
874 unsigned int op_id);
875 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
876 unsigned int op_id);
877
878 /**
879 * struct nand_op_parser_addr_constraints - Constraints for address instructions
880 * @maxcycles: maximum number of address cycles the controller can issue in a
881 * single step
882 */
883 struct nand_op_parser_addr_constraints {
884 unsigned int maxcycles;
885 };
886
887 /**
888 * struct nand_op_parser_data_constraints - Constraints for data instructions
889 * @maxlen: maximum data length that the controller can handle in a single step
890 */
891 struct nand_op_parser_data_constraints {
892 unsigned int maxlen;
893 };
894
895 /**
896 * struct nand_op_parser_pattern_elem - One element of a pattern
897 * @type: the instructuction type
898 * @optional: whether this element of the pattern is optional or mandatory
899 * @ctx: address or data constraint
900 * @ctx.addr: address constraint (number of cycles)
901 * @ctx.data: data constraint (data length)
902 */
903 struct nand_op_parser_pattern_elem {
904 enum nand_op_instr_type type;
905 bool optional;
906 union {
907 struct nand_op_parser_addr_constraints addr;
908 struct nand_op_parser_data_constraints data;
909 } ctx;
910 };
911
912 #define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
913 { \
914 .type = NAND_OP_CMD_INSTR, \
915 .optional = _opt, \
916 }
917
918 #define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
919 { \
920 .type = NAND_OP_ADDR_INSTR, \
921 .optional = _opt, \
922 .ctx.addr.maxcycles = _maxcycles, \
923 }
924
925 #define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
926 { \
927 .type = NAND_OP_DATA_IN_INSTR, \
928 .optional = _opt, \
929 .ctx.data.maxlen = _maxlen, \
930 }
931
932 #define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
933 { \
934 .type = NAND_OP_DATA_OUT_INSTR, \
935 .optional = _opt, \
936 .ctx.data.maxlen = _maxlen, \
937 }
938
939 #define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
940 { \
941 .type = NAND_OP_WAITRDY_INSTR, \
942 .optional = _opt, \
943 }
944
945 /**
946 * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
947 * @elems: array of pattern elements
948 * @nelems: number of pattern elements in @elems array
949 * @exec: the function that will issue a sub-operation
950 *
951 * A pattern is a list of elements, each element reprensenting one instruction
952 * with its constraints. The pattern itself is used by the core to match NAND
953 * chip operation with NAND controller operations.
954 * Once a match between a NAND controller operation pattern and a NAND chip
955 * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
956 * hook is called so that the controller driver can issue the operation on the
957 * bus.
958 *
959 * Controller drivers should declare as many patterns as they support and pass
960 * this list of patterns (created with the help of the following macro) to
961 * the nand_op_parser_exec_op() helper.
962 */
963 struct nand_op_parser_pattern {
964 const struct nand_op_parser_pattern_elem *elems;
965 unsigned int nelems;
966 int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
967 };
968
969 #define NAND_OP_PARSER_PATTERN(_exec, ...) \
970 { \
971 .exec = _exec, \
972 .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
973 .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
974 sizeof(struct nand_op_parser_pattern_elem), \
975 }
976
977 /**
978 * struct nand_op_parser - NAND controller operation parser descriptor
979 * @patterns: array of supported patterns
980 * @npatterns: length of the @patterns array
981 *
982 * The parser descriptor is just an array of supported patterns which will be
983 * iterated by nand_op_parser_exec_op() everytime it tries to execute an
984 * NAND operation (or tries to determine if a specific operation is supported).
985 *
986 * It is worth mentioning that patterns will be tested in their declaration
987 * order, and the first match will be taken, so it's important to order patterns
988 * appropriately so that simple/inefficient patterns are placed at the end of
989 * the list. Usually, this is where you put single instruction patterns.
990 */
991 struct nand_op_parser {
992 const struct nand_op_parser_pattern *patterns;
993 unsigned int npatterns;
994 };
995
996 #define NAND_OP_PARSER(...) \
997 { \
998 .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
999 .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
1000 sizeof(struct nand_op_parser_pattern), \
1001 }
1002
1003 /**
1004 * struct nand_operation - NAND operation descriptor
1005 * @cs: the CS line to select for this NAND operation
1006 * @instrs: array of instructions to execute
1007 * @ninstrs: length of the @instrs array
1008 *
1009 * The actual operation structure that will be passed to chip->exec_op().
1010 */
1011 struct nand_operation {
1012 unsigned int cs;
1013 const struct nand_op_instr *instrs;
1014 unsigned int ninstrs;
1015 };
1016
1017 #define NAND_OPERATION(_cs, _instrs) \
1018 { \
1019 .cs = _cs, \
1020 .instrs = _instrs, \
1021 .ninstrs = ARRAY_SIZE(_instrs), \
1022 }
1023
1024 int nand_op_parser_exec_op(struct nand_chip *chip,
1025 const struct nand_op_parser *parser,
1026 const struct nand_operation *op, bool check_only);
1027
nand_op_trace(const char * prefix,const struct nand_op_instr * instr)1028 static inline void nand_op_trace(const char *prefix,
1029 const struct nand_op_instr *instr)
1030 {
1031 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
1032 switch (instr->type) {
1033 case NAND_OP_CMD_INSTR:
1034 pr_debug("%sCMD [0x%02x]\n", prefix,
1035 instr->ctx.cmd.opcode);
1036 break;
1037 case NAND_OP_ADDR_INSTR:
1038 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
1039 instr->ctx.addr.naddrs,
1040 instr->ctx.addr.naddrs < 64 ?
1041 instr->ctx.addr.naddrs : 64,
1042 instr->ctx.addr.addrs);
1043 break;
1044 case NAND_OP_DATA_IN_INSTR:
1045 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
1046 instr->ctx.data.len,
1047 instr->ctx.data.force_8bit ?
1048 ", force 8-bit" : "");
1049 break;
1050 case NAND_OP_DATA_OUT_INSTR:
1051 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
1052 instr->ctx.data.len,
1053 instr->ctx.data.force_8bit ?
1054 ", force 8-bit" : "");
1055 break;
1056 case NAND_OP_WAITRDY_INSTR:
1057 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
1058 instr->ctx.waitrdy.timeout_ms);
1059 break;
1060 }
1061 #endif
1062 }
1063
1064 /**
1065 * struct nand_controller_ops - Controller operations
1066 *
1067 * @attach_chip: this method is called after the NAND detection phase after
1068 * flash ID and MTD fields such as erase size, page size and OOB
1069 * size have been set up. ECC requirements are available if
1070 * provided by the NAND chip or device tree. Typically used to
1071 * choose the appropriate ECC configuration and allocate
1072 * associated resources.
1073 * This hook is optional.
1074 * @detach_chip: free all resources allocated/claimed in
1075 * nand_controller_ops->attach_chip().
1076 * This hook is optional.
1077 * @exec_op: controller specific method to execute NAND operations.
1078 * This method replaces chip->legacy.cmdfunc(),
1079 * chip->legacy.{read,write}_{buf,byte,word}(),
1080 * chip->legacy.dev_ready() and chip->legacy.waitfunc().
1081 * @setup_interface: setup the data interface and timing. If chipnr is set to
1082 * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
1083 * should not be applied but only checked.
1084 * This hook is optional.
1085 */
1086 struct nand_controller_ops {
1087 int (*attach_chip)(struct nand_chip *chip);
1088 void (*detach_chip)(struct nand_chip *chip);
1089 int (*exec_op)(struct nand_chip *chip,
1090 const struct nand_operation *op,
1091 bool check_only);
1092 int (*setup_interface)(struct nand_chip *chip, int chipnr,
1093 const struct nand_interface_config *conf);
1094 };
1095
1096 /**
1097 * struct nand_controller - Structure used to describe a NAND controller
1098 *
1099 * @lock: lock used to serialize accesses to the NAND controller
1100 * @ops: NAND controller operations.
1101 * @supported_op: NAND controller known-to-be-supported operations,
1102 * only writable by the core after initial checking.
1103 * @supported_op.data_only_read: The controller supports reading more data from
1104 * the bus without restarting an entire read operation nor
1105 * changing the column.
1106 * @supported_op.cont_read: The controller supports sequential cache reads.
1107 */
1108 struct nand_controller {
1109 struct mutex lock;
1110 const struct nand_controller_ops *ops;
1111 struct {
1112 unsigned int data_only_read: 1;
1113 unsigned int cont_read: 1;
1114 } supported_op;
1115 };
1116
nand_controller_init(struct nand_controller * nfc)1117 static inline void nand_controller_init(struct nand_controller *nfc)
1118 {
1119 mutex_init(&nfc->lock);
1120 }
1121
1122 /**
1123 * struct nand_legacy - NAND chip legacy fields/hooks
1124 * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
1125 * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
1126 * @select_chip: select/deselect a specific target/die
1127 * @read_byte: read one byte from the chip
1128 * @write_byte: write a single byte to the chip on the low 8 I/O lines
1129 * @write_buf: write data from the buffer to the chip
1130 * @read_buf: read data from the chip into the buffer
1131 * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
1132 * to write command and address
1133 * @cmdfunc: hardware specific function for writing commands to the chip.
1134 * @dev_ready: hardware specific function for accessing device ready/busy line.
1135 * If set to NULL no access to ready/busy is available and the
1136 * ready/busy information is read from the chip status register.
1137 * @waitfunc: hardware specific function for wait on ready.
1138 * @block_bad: check if a block is bad, using OOB markers
1139 * @block_markbad: mark a block bad
1140 * @set_features: set the NAND chip features
1141 * @get_features: get the NAND chip features
1142 * @chip_delay: chip dependent delay for transferring data from array to read
1143 * regs (tR).
1144 * @dummy_controller: dummy controller implementation for drivers that can
1145 * only control a single chip
1146 *
1147 * If you look at this structure you're already wrong. These fields/hooks are
1148 * all deprecated.
1149 */
1150 struct nand_legacy {
1151 void __iomem *IO_ADDR_R;
1152 void __iomem *IO_ADDR_W;
1153 void (*select_chip)(struct nand_chip *chip, int cs);
1154 u8 (*read_byte)(struct nand_chip *chip);
1155 void (*write_byte)(struct nand_chip *chip, u8 byte);
1156 void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
1157 void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
1158 void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
1159 void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
1160 int page_addr);
1161 int (*dev_ready)(struct nand_chip *chip);
1162 int (*waitfunc)(struct nand_chip *chip);
1163 int (*block_bad)(struct nand_chip *chip, loff_t ofs);
1164 int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
1165 int (*set_features)(struct nand_chip *chip, int feature_addr,
1166 u8 *subfeature_para);
1167 int (*get_features)(struct nand_chip *chip, int feature_addr,
1168 u8 *subfeature_para);
1169 int chip_delay;
1170 struct nand_controller dummy_controller;
1171 };
1172
1173 /**
1174 * struct nand_chip_ops - NAND chip operations
1175 * @suspend: Suspend operation
1176 * @resume: Resume operation
1177 * @lock_area: Lock operation
1178 * @unlock_area: Unlock operation
1179 * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
1180 * @choose_interface_config: Choose the best interface configuration
1181 */
1182 struct nand_chip_ops {
1183 int (*suspend)(struct nand_chip *chip);
1184 void (*resume)(struct nand_chip *chip);
1185 int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1186 int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1187 int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
1188 int (*choose_interface_config)(struct nand_chip *chip,
1189 struct nand_interface_config *iface);
1190 };
1191
1192 /**
1193 * struct nand_manufacturer - NAND manufacturer structure
1194 * @desc: The manufacturer description
1195 * @priv: Private information for the manufacturer driver
1196 */
1197 struct nand_manufacturer {
1198 const struct nand_manufacturer_desc *desc;
1199 void *priv;
1200 };
1201
1202 /**
1203 * struct nand_secure_region - NAND secure region structure
1204 * @offset: Offset of the start of the secure region
1205 * @size: Size of the secure region
1206 */
1207 struct nand_secure_region {
1208 u64 offset;
1209 u64 size;
1210 };
1211
1212 /**
1213 * struct nand_chip - NAND Private Flash Chip Data
1214 * @base: Inherit from the generic NAND device
1215 * @id: Holds NAND ID
1216 * @parameters: Holds generic parameters under an easily readable form
1217 * @manufacturer: Manufacturer information
1218 * @ops: NAND chip operations
1219 * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
1220 * to use any of these fields/hooks, and if you're modifying an
1221 * existing driver that is using those fields/hooks, you should
1222 * consider reworking the driver and avoid using them.
1223 * @options: Various chip options. They can partly be set to inform nand_scan
1224 * about special functionality. See the defines for further
1225 * explanation.
1226 * @current_interface_config: The currently used NAND interface configuration
1227 * @best_interface_config: The best NAND interface configuration which fits both
1228 * the NAND chip and NAND controller constraints. If
1229 * unset, the default reset interface configuration must
1230 * be used.
1231 * @bbt_erase_shift: Number of address bits in a bbt entry
1232 * @bbt_options: Bad block table specific options. All options used here must
1233 * come from bbm.h. By default, these options will be copied to
1234 * the appropriate nand_bbt_descr's.
1235 * @badblockpos: Bad block marker position in the oob area
1236 * @badblockbits: Minimum number of set bits in a good block's bad block marker
1237 * position; i.e., BBM = 11110111b is good when badblockbits = 7
1238 * @bbt_td: Bad block table descriptor for flash lookup
1239 * @bbt_md: Bad block table mirror descriptor
1240 * @badblock_pattern: Bad block scan pattern used for initial bad block scan
1241 * @bbt: Bad block table pointer
1242 * @page_shift: Number of address bits in a page (column address bits)
1243 * @phys_erase_shift: Number of address bits in a physical eraseblock
1244 * @chip_shift: Number of address bits in one chip
1245 * @pagemask: Page number mask = number of (pages / chip) - 1
1246 * @subpagesize: Holds the subpagesize
1247 * @data_buf: Buffer for data, size is (page size + oobsize)
1248 * @oob_poi: pointer on the OOB area covered by data_buf
1249 * @pagecache: Structure containing page cache related fields
1250 * @pagecache.bitflips: Number of bitflips of the cached page
1251 * @pagecache.page: Page number currently in the cache. -1 means no page is
1252 * currently cached
1253 * @buf_align: Minimum buffer alignment required by a platform
1254 * @lock: Lock protecting the suspended field. Also used to serialize accesses
1255 * to the NAND device
1256 * @suspended: Set to 1 when the device is suspended, 0 when it's not
1257 * @resume_wq: wait queue to sleep if rawnand is in suspended state.
1258 * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
1259 * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
1260 * NAND Controller drivers should not modify this value, but they're
1261 * allowed to read it.
1262 * @read_retries: The number of read retry modes supported
1263 * @secure_regions: Structure containing the secure regions info
1264 * @nr_secure_regions: Number of secure regions
1265 * @cont_read: Sequential page read internals
1266 * @cont_read.ongoing: Whether a continuous read is ongoing or not
1267 * @cont_read.first_page: Start of the continuous read operation
1268 * @cont_read.last_page: End of the continuous read operation
1269 * @controller: The hardware controller structure which is shared among multiple
1270 * independent devices
1271 * @ecc: The ECC controller structure
1272 * @priv: Chip private data
1273 */
1274 struct nand_chip {
1275 struct nand_device base;
1276 struct nand_id id;
1277 struct nand_parameters parameters;
1278 struct nand_manufacturer manufacturer;
1279 struct nand_chip_ops ops;
1280 struct nand_legacy legacy;
1281 unsigned int options;
1282
1283 /* Data interface */
1284 const struct nand_interface_config *current_interface_config;
1285 struct nand_interface_config *best_interface_config;
1286
1287 /* Bad block information */
1288 unsigned int bbt_erase_shift;
1289 unsigned int bbt_options;
1290 unsigned int badblockpos;
1291 unsigned int badblockbits;
1292 struct nand_bbt_descr *bbt_td;
1293 struct nand_bbt_descr *bbt_md;
1294 struct nand_bbt_descr *badblock_pattern;
1295 u8 *bbt;
1296
1297 /* Device internal layout */
1298 unsigned int page_shift;
1299 unsigned int phys_erase_shift;
1300 unsigned int chip_shift;
1301 unsigned int pagemask;
1302 unsigned int subpagesize;
1303
1304 /* Buffers */
1305 u8 *data_buf;
1306 u8 *oob_poi;
1307 struct {
1308 unsigned int bitflips;
1309 int page;
1310 } pagecache;
1311 unsigned long buf_align;
1312
1313 /* Internals */
1314 struct mutex lock;
1315 unsigned int suspended : 1;
1316 wait_queue_head_t resume_wq;
1317 int cur_cs;
1318 int read_retries;
1319 struct nand_secure_region *secure_regions;
1320 u8 nr_secure_regions;
1321 struct {
1322 bool ongoing;
1323 unsigned int first_page;
1324 unsigned int last_page;
1325 } cont_read;
1326
1327 /* Externals */
1328 struct nand_controller *controller;
1329 struct nand_ecc_ctrl ecc;
1330 void *priv;
1331 };
1332
mtd_to_nand(struct mtd_info * mtd)1333 static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
1334 {
1335 return container_of(mtd, struct nand_chip, base.mtd);
1336 }
1337
nand_to_mtd(struct nand_chip * chip)1338 static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
1339 {
1340 return &chip->base.mtd;
1341 }
1342
nand_get_controller_data(struct nand_chip * chip)1343 static inline void *nand_get_controller_data(struct nand_chip *chip)
1344 {
1345 return chip->priv;
1346 }
1347
nand_set_controller_data(struct nand_chip * chip,void * priv)1348 static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
1349 {
1350 chip->priv = priv;
1351 }
1352
nand_set_manufacturer_data(struct nand_chip * chip,void * priv)1353 static inline void nand_set_manufacturer_data(struct nand_chip *chip,
1354 void *priv)
1355 {
1356 chip->manufacturer.priv = priv;
1357 }
1358
nand_get_manufacturer_data(struct nand_chip * chip)1359 static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
1360 {
1361 return chip->manufacturer.priv;
1362 }
1363
nand_set_flash_node(struct nand_chip * chip,struct device_node * np)1364 static inline void nand_set_flash_node(struct nand_chip *chip,
1365 struct device_node *np)
1366 {
1367 mtd_set_of_node(nand_to_mtd(chip), np);
1368 }
1369
nand_get_flash_node(struct nand_chip * chip)1370 static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
1371 {
1372 return mtd_get_of_node(nand_to_mtd(chip));
1373 }
1374
1375 /**
1376 * nand_get_interface_config - Retrieve the current interface configuration
1377 * of a NAND chip
1378 * @chip: The NAND chip
1379 */
1380 static inline const struct nand_interface_config *
nand_get_interface_config(struct nand_chip * chip)1381 nand_get_interface_config(struct nand_chip *chip)
1382 {
1383 return chip->current_interface_config;
1384 }
1385
1386 /*
1387 * A helper for defining older NAND chips where the second ID byte fully
1388 * defined the chip, including the geometry (chip size, eraseblock size, page
1389 * size). All these chips have 512 bytes NAND page size.
1390 */
1391 #define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
1392 { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
1393 .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
1394
1395 /*
1396 * A helper for defining newer chips which report their page size and
1397 * eraseblock size via the extended ID bytes.
1398 *
1399 * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
1400 * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
1401 * device ID now only represented a particular total chip size (and voltage,
1402 * buswidth), and the page size, eraseblock size, and OOB size could vary while
1403 * using the same device ID.
1404 */
1405 #define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
1406 { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
1407 .options = (opts) }
1408
1409 #define NAND_ECC_INFO(_strength, _step) \
1410 { .strength_ds = (_strength), .step_ds = (_step) }
1411 #define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
1412 #define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
1413
1414 /**
1415 * struct nand_flash_dev - NAND Flash Device ID Structure
1416 * @name: a human-readable name of the NAND chip
1417 * @dev_id: the device ID (the second byte of the full chip ID array)
1418 * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
1419 * memory address as ``id[0]``)
1420 * @dev_id: device ID part of the full chip ID array (refers the same memory
1421 * address as ``id[1]``)
1422 * @id: full device ID array
1423 * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
1424 * well as the eraseblock size) is determined from the extended NAND
1425 * chip ID array)
1426 * @chipsize: total chip size in MiB
1427 * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
1428 * @options: stores various chip bit options
1429 * @id_len: The valid length of the @id.
1430 * @oobsize: OOB size
1431 * @ecc: ECC correctability and step information from the datasheet.
1432 * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
1433 * @ecc_strength_ds in nand_chip{}.
1434 * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
1435 * @ecc_step_ds in nand_chip{}, also from the datasheet.
1436 * For example, the "4bit ECC for each 512Byte" can be set with
1437 * NAND_ECC_INFO(4, 512).
1438 */
1439 struct nand_flash_dev {
1440 char *name;
1441 union {
1442 struct {
1443 uint8_t mfr_id;
1444 uint8_t dev_id;
1445 };
1446 uint8_t id[NAND_MAX_ID_LEN];
1447 };
1448 unsigned int pagesize;
1449 unsigned int chipsize;
1450 unsigned int erasesize;
1451 unsigned int options;
1452 uint16_t id_len;
1453 uint16_t oobsize;
1454 struct {
1455 uint16_t strength_ds;
1456 uint16_t step_ds;
1457 } ecc;
1458 };
1459
1460 int nand_create_bbt(struct nand_chip *chip);
1461
1462 /*
1463 * Check if it is a SLC nand.
1464 * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
1465 * We do not distinguish the MLC and TLC now.
1466 */
nand_is_slc(struct nand_chip * chip)1467 static inline bool nand_is_slc(struct nand_chip *chip)
1468 {
1469 WARN(nanddev_bits_per_cell(&chip->base) == 0,
1470 "chip->bits_per_cell is used uninitialized\n");
1471 return nanddev_bits_per_cell(&chip->base) == 1;
1472 }
1473
1474 /**
1475 * nand_opcode_8bits - Check if the opcode's address should be sent only on the
1476 * lower 8 bits
1477 * @command: opcode to check
1478 */
nand_opcode_8bits(unsigned int command)1479 static inline int nand_opcode_8bits(unsigned int command)
1480 {
1481 switch (command) {
1482 case NAND_CMD_READID:
1483 case NAND_CMD_PARAM:
1484 case NAND_CMD_GET_FEATURES:
1485 case NAND_CMD_SET_FEATURES:
1486 return 1;
1487 default:
1488 break;
1489 }
1490 return 0;
1491 }
1492
1493 int rawnand_sw_hamming_init(struct nand_chip *chip);
1494 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
1495 const unsigned char *buf,
1496 unsigned char *code);
1497 int rawnand_sw_hamming_correct(struct nand_chip *chip,
1498 unsigned char *buf,
1499 unsigned char *read_ecc,
1500 unsigned char *calc_ecc);
1501 void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
1502 int rawnand_sw_bch_init(struct nand_chip *chip);
1503 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
1504 unsigned char *read_ecc, unsigned char *calc_ecc);
1505 void rawnand_sw_bch_cleanup(struct nand_chip *chip);
1506
1507 int nand_check_erased_ecc_chunk(void *data, int datalen,
1508 void *ecc, int ecclen,
1509 void *extraoob, int extraooblen,
1510 int threshold);
1511
1512 int nand_ecc_choose_conf(struct nand_chip *chip,
1513 const struct nand_ecc_caps *caps, int oobavail);
1514
1515 /* Default write_oob implementation */
1516 int nand_write_oob_std(struct nand_chip *chip, int page);
1517
1518 /* Default read_oob implementation */
1519 int nand_read_oob_std(struct nand_chip *chip, int page);
1520
1521 /* Stub used by drivers that do not support GET/SET FEATURES operations */
1522 int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
1523 u8 *subfeature_param);
1524
1525 /* read_page_raw implementations */
1526 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
1527 int page);
1528 int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1529 int oob_required, int page);
1530
1531 /* write_page_raw implementations */
1532 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1533 int oob_required, int page);
1534 int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1535 int oob_required, int page);
1536
1537 /* Reset and initialize a NAND device */
1538 int nand_reset(struct nand_chip *chip, int chipnr);
1539
1540 /* NAND operation helpers */
1541 int nand_reset_op(struct nand_chip *chip);
1542 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1543 unsigned int len);
1544 int nand_status_op(struct nand_chip *chip, u8 *status);
1545 int nand_exit_status_op(struct nand_chip *chip);
1546 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1547 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1548 unsigned int offset_in_page, void *buf, unsigned int len);
1549 int nand_change_read_column_op(struct nand_chip *chip,
1550 unsigned int offset_in_page, void *buf,
1551 unsigned int len, bool force_8bit);
1552 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1553 unsigned int offset_in_page, void *buf, unsigned int len);
1554 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1555 unsigned int offset_in_page, const void *buf,
1556 unsigned int len);
1557 int nand_prog_page_end_op(struct nand_chip *chip);
1558 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1559 unsigned int offset_in_page, const void *buf,
1560 unsigned int len);
1561 int nand_change_write_column_op(struct nand_chip *chip,
1562 unsigned int offset_in_page, const void *buf,
1563 unsigned int len, bool force_8bit);
1564 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1565 bool force_8bit, bool check_only);
1566 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1567 unsigned int len, bool force_8bit);
1568 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
1569 int oob_required, int page);
1570
1571 /* Scan and identify a NAND device */
1572 int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
1573 struct nand_flash_dev *ids);
1574
nand_scan(struct nand_chip * chip,unsigned int max_chips)1575 static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
1576 {
1577 return nand_scan_with_ids(chip, max_chips, NULL);
1578 }
1579
1580 /* Internal helper for board drivers which need to override command function */
1581 void nand_wait_ready(struct nand_chip *chip);
1582
1583 /*
1584 * Free resources held by the NAND device, must be called on error after a
1585 * sucessful nand_scan().
1586 */
1587 void nand_cleanup(struct nand_chip *chip);
1588
1589 /*
1590 * External helper for controller drivers that have to implement the WAITRDY
1591 * instruction and have no physical pin to check it.
1592 */
1593 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
1594 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
1595 unsigned long timeout_ms);
1596
1597 /* Select/deselect a NAND target. */
1598 void nand_select_target(struct nand_chip *chip, unsigned int cs);
1599 void nand_deselect_target(struct nand_chip *chip);
1600
1601 /* Bitops */
1602 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
1603 unsigned int src_off, unsigned int nbits);
1604
1605 /**
1606 * nand_get_data_buf() - Get the internal page buffer
1607 * @chip: NAND chip object
1608 *
1609 * Returns the pre-allocated page buffer after invalidating the cache. This
1610 * function should be used by drivers that do not want to allocate their own
1611 * bounce buffer and still need such a buffer for specific operations (most
1612 * commonly when reading OOB data only).
1613 *
1614 * Be careful to never call this function in the write/write_oob path, because
1615 * the core may have placed the data to be written out in this buffer.
1616 *
1617 * Return: pointer to the page cache buffer
1618 */
nand_get_data_buf(struct nand_chip * chip)1619 static inline void *nand_get_data_buf(struct nand_chip *chip)
1620 {
1621 chip->pagecache.page = -1;
1622
1623 return chip->data_buf;
1624 }
1625
1626 /* Parse the gpio-cs property */
1627 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
1628 unsigned int *ncs_array);
1629
1630 #endif /* __LINUX_MTD_RAWNAND_H */
1631