xref: /linux/include/linux/mtd/rawnand.h (revision bbcd80f5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4  *                        Steven J. Hill <sjhill@realitydiluted.com>
5  *		          Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Info:
8  *	Contains standard defines and IDs for NAND flash devices
9  *
10  * Changelog:
11  *	See git changelog.
12  */
13 #ifndef __LINUX_MTD_RAWNAND_H
14 #define __LINUX_MTD_RAWNAND_H
15 
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/mtd/flashchip.h>
19 #include <linux/mtd/bbm.h>
20 #include <linux/mtd/jedec.h>
21 #include <linux/mtd/onfi.h>
22 #include <linux/mutex.h>
23 #include <linux/of.h>
24 #include <linux/types.h>
25 
26 struct nand_chip;
27 struct gpio_desc;
28 
29 /* The maximum number of NAND chips in an array */
30 #define NAND_MAX_CHIPS		8
31 
32 /*
33  * Constants for hardware specific CLE/ALE/NCE function
34  *
35  * These are bits which can be or'ed to set/clear multiple
36  * bits in one go.
37  */
38 /* Select the chip by setting nCE to low */
39 #define NAND_NCE		0x01
40 /* Select the command latch by setting CLE to high */
41 #define NAND_CLE		0x02
42 /* Select the address latch by setting ALE to high */
43 #define NAND_ALE		0x04
44 
45 #define NAND_CTRL_CLE		(NAND_NCE | NAND_CLE)
46 #define NAND_CTRL_ALE		(NAND_NCE | NAND_ALE)
47 #define NAND_CTRL_CHANGE	0x80
48 
49 /*
50  * Standard NAND flash commands
51  */
52 #define NAND_CMD_READ0		0
53 #define NAND_CMD_READ1		1
54 #define NAND_CMD_RNDOUT		5
55 #define NAND_CMD_PAGEPROG	0x10
56 #define NAND_CMD_READOOB	0x50
57 #define NAND_CMD_ERASE1		0x60
58 #define NAND_CMD_STATUS		0x70
59 #define NAND_CMD_SEQIN		0x80
60 #define NAND_CMD_RNDIN		0x85
61 #define NAND_CMD_READID		0x90
62 #define NAND_CMD_ERASE2		0xd0
63 #define NAND_CMD_PARAM		0xec
64 #define NAND_CMD_GET_FEATURES	0xee
65 #define NAND_CMD_SET_FEATURES	0xef
66 #define NAND_CMD_RESET		0xff
67 
68 /* Extended commands for large page devices */
69 #define NAND_CMD_READSTART	0x30
70 #define NAND_CMD_READCACHESEQ	0x31
71 #define NAND_CMD_READCACHEEND	0x3f
72 #define NAND_CMD_RNDOUTSTART	0xE0
73 #define NAND_CMD_CACHEDPROG	0x15
74 
75 #define NAND_CMD_NONE		-1
76 
77 /* Status bits */
78 #define NAND_STATUS_FAIL	0x01
79 #define NAND_STATUS_FAIL_N1	0x02
80 #define NAND_STATUS_TRUE_READY	0x20
81 #define NAND_STATUS_READY	0x40
82 #define NAND_STATUS_WP		0x80
83 
84 #define NAND_DATA_IFACE_CHECK_ONLY	-1
85 
86 /*
87  * Constants for Hardware ECC
88  */
89 /* Reset Hardware ECC for read */
90 #define NAND_ECC_READ		0
91 /* Reset Hardware ECC for write */
92 #define NAND_ECC_WRITE		1
93 /* Enable Hardware ECC before syndrome is read back from flash */
94 #define NAND_ECC_READSYN	2
95 
96 /*
97  * Enable generic NAND 'page erased' check. This check is only done when
98  * ecc.correct() returns -EBADMSG.
99  * Set this flag if your implementation does not fix bitflips in erased
100  * pages and you want to rely on the default implementation.
101  */
102 #define NAND_ECC_GENERIC_ERASED_CHECK	BIT(0)
103 
104 /*
105  * Option constants for bizarre disfunctionality and real
106  * features.
107  */
108 
109 /* Buswidth is 16 bit */
110 #define NAND_BUSWIDTH_16	BIT(1)
111 
112 /*
113  * When using software implementation of Hamming, we can specify which byte
114  * ordering should be used.
115  */
116 #define NAND_ECC_SOFT_HAMMING_SM_ORDER	BIT(2)
117 
118 /* Chip has cache program function */
119 #define NAND_CACHEPRG		BIT(3)
120 /* Options valid for Samsung large page devices */
121 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
122 
123 /*
124  * Chip requires ready check on read (for auto-incremented sequential read).
125  * True only for small page devices; large page devices do not support
126  * autoincrement.
127  */
128 #define NAND_NEED_READRDY	BIT(8)
129 
130 /* Chip does not allow subpage writes */
131 #define NAND_NO_SUBPAGE_WRITE	BIT(9)
132 
133 /* Device is one of 'new' xD cards that expose fake nand command set */
134 #define NAND_BROKEN_XD		BIT(10)
135 
136 /* Device behaves just like nand, but is readonly */
137 #define NAND_ROM		BIT(11)
138 
139 /* Device supports subpage reads */
140 #define NAND_SUBPAGE_READ	BIT(12)
141 /* Macros to identify the above */
142 #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
143 
144 /*
145  * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
146  * patterns.
147  */
148 #define NAND_NEED_SCRAMBLING	BIT(13)
149 
150 /* Device needs 3rd row address cycle */
151 #define NAND_ROW_ADDR_3		BIT(14)
152 
153 /* Non chip related options */
154 /* This option skips the bbt scan during initialization. */
155 #define NAND_SKIP_BBTSCAN	BIT(16)
156 /* Chip may not exist, so silence any errors in scan */
157 #define NAND_SCAN_SILENT_NODEV	BIT(18)
158 
159 /*
160  * Autodetect nand buswidth with readid/onfi.
161  * This suppose the driver will configure the hardware in 8 bits mode
162  * when calling nand_scan_ident, and update its configuration
163  * before calling nand_scan_tail.
164  */
165 #define NAND_BUSWIDTH_AUTO      BIT(19)
166 
167 /*
168  * This option could be defined by controller drivers to protect against
169  * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
170  */
171 #define NAND_USES_DMA		BIT(20)
172 
173 /*
174  * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
175  * on the default ->cmdfunc() implementation, you may want to let the core
176  * handle the tCCS delay which is required when a column change (RNDIN or
177  * RNDOUT) is requested.
178  * If your controller already takes care of this delay, you don't need to set
179  * this flag.
180  */
181 #define NAND_WAIT_TCCS		BIT(21)
182 
183 /*
184  * Whether the NAND chip is a boot medium. Drivers might use this information
185  * to select ECC algorithms supported by the boot ROM or similar restrictions.
186  */
187 #define NAND_IS_BOOT_MEDIUM	BIT(22)
188 
189 /*
190  * Do not try to tweak the timings at runtime. This is needed when the
191  * controller initializes the timings on itself or when it relies on
192  * configuration done by the bootloader.
193  */
194 #define NAND_KEEP_TIMINGS	BIT(23)
195 
196 /*
197  * There are different places where the manufacturer stores the factory bad
198  * block markers.
199  *
200  * Position within the block: Each of these pages needs to be checked for a
201  * bad block marking pattern.
202  */
203 #define NAND_BBM_FIRSTPAGE	BIT(24)
204 #define NAND_BBM_SECONDPAGE	BIT(25)
205 #define NAND_BBM_LASTPAGE	BIT(26)
206 
207 /*
208  * Some controllers with pipelined ECC engines override the BBM marker with
209  * data or ECC bytes, thus making bad block detection through bad block marker
210  * impossible. Let's flag those chips so the core knows it shouldn't check the
211  * BBM and consider all blocks good.
212  */
213 #define NAND_NO_BBM_QUIRK	BIT(27)
214 
215 /* Cell info constants */
216 #define NAND_CI_CHIPNR_MSK	0x03
217 #define NAND_CI_CELLTYPE_MSK	0x0C
218 #define NAND_CI_CELLTYPE_SHIFT	2
219 
220 /* Position within the OOB data of the page */
221 #define NAND_BBM_POS_SMALL		5
222 #define NAND_BBM_POS_LARGE		0
223 
224 /**
225  * struct nand_parameters - NAND generic parameters from the parameter page
226  * @model: Model name
227  * @supports_set_get_features: The NAND chip supports setting/getting features
228  * @supports_read_cache: The NAND chip supports read cache operations
229  * @set_feature_list: Bitmap of features that can be set
230  * @get_feature_list: Bitmap of features that can be get
231  * @onfi: ONFI specific parameters
232  */
233 struct nand_parameters {
234 	/* Generic parameters */
235 	const char *model;
236 	bool supports_set_get_features;
237 	bool supports_read_cache;
238 	DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
239 	DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
240 
241 	/* ONFI parameters */
242 	struct onfi_params *onfi;
243 };
244 
245 /* The maximum expected count of bytes in the NAND ID sequence */
246 #define NAND_MAX_ID_LEN 8
247 
248 /**
249  * struct nand_id - NAND id structure
250  * @data: buffer containing the id bytes.
251  * @len: ID length.
252  */
253 struct nand_id {
254 	u8 data[NAND_MAX_ID_LEN];
255 	int len;
256 };
257 
258 /**
259  * struct nand_ecc_step_info - ECC step information of ECC engine
260  * @stepsize: data bytes per ECC step
261  * @strengths: array of supported strengths
262  * @nstrengths: number of supported strengths
263  */
264 struct nand_ecc_step_info {
265 	int stepsize;
266 	const int *strengths;
267 	int nstrengths;
268 };
269 
270 /**
271  * struct nand_ecc_caps - capability of ECC engine
272  * @stepinfos: array of ECC step information
273  * @nstepinfos: number of ECC step information
274  * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
275  */
276 struct nand_ecc_caps {
277 	const struct nand_ecc_step_info *stepinfos;
278 	int nstepinfos;
279 	int (*calc_ecc_bytes)(int step_size, int strength);
280 };
281 
282 /* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
283 #define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...)	\
284 static const int __name##_strengths[] = { __VA_ARGS__ };	\
285 static const struct nand_ecc_step_info __name##_stepinfo = {	\
286 	.stepsize = __step,					\
287 	.strengths = __name##_strengths,			\
288 	.nstrengths = ARRAY_SIZE(__name##_strengths),		\
289 };								\
290 static const struct nand_ecc_caps __name = {			\
291 	.stepinfos = &__name##_stepinfo,			\
292 	.nstepinfos = 1,					\
293 	.calc_ecc_bytes = __calc,				\
294 }
295 
296 /**
297  * struct nand_ecc_ctrl - Control structure for ECC
298  * @engine_type: ECC engine type
299  * @placement:	OOB bytes placement
300  * @algo:	ECC algorithm
301  * @steps:	number of ECC steps per page
302  * @size:	data bytes per ECC step
303  * @bytes:	ECC bytes per step
304  * @strength:	max number of correctible bits per ECC step
305  * @total:	total number of ECC bytes per page
306  * @prepad:	padding information for syndrome based ECC generators
307  * @postpad:	padding information for syndrome based ECC generators
308  * @options:	ECC specific options (see NAND_ECC_XXX flags defined above)
309  * @calc_buf:	buffer for calculated ECC, size is oobsize.
310  * @code_buf:	buffer for ECC read from flash, size is oobsize.
311  * @hwctl:	function to control hardware ECC generator. Must only
312  *		be provided if an hardware ECC is available
313  * @calculate:	function for ECC calculation or readback from ECC hardware
314  * @correct:	function for ECC correction, matching to ECC generator (sw/hw).
315  *		Should return a positive number representing the number of
316  *		corrected bitflips, -EBADMSG if the number of bitflips exceed
317  *		ECC strength, or any other error code if the error is not
318  *		directly related to correction.
319  *		If -EBADMSG is returned the input buffers should be left
320  *		untouched.
321  * @read_page_raw:	function to read a raw page without ECC. This function
322  *			should hide the specific layout used by the ECC
323  *			controller and always return contiguous in-band and
324  *			out-of-band data even if they're not stored
325  *			contiguously on the NAND chip (e.g.
326  *			NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
327  *			out-of-band data).
328  * @write_page_raw:	function to write a raw page without ECC. This function
329  *			should hide the specific layout used by the ECC
330  *			controller and consider the passed data as contiguous
331  *			in-band and out-of-band data. ECC controller is
332  *			responsible for doing the appropriate transformations
333  *			to adapt to its specific layout (e.g.
334  *			NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
335  *			out-of-band data).
336  * @read_page:	function to read a page according to the ECC generator
337  *		requirements; returns maximum number of bitflips corrected in
338  *		any single ECC step, -EIO hw error
339  * @read_subpage:	function to read parts of the page covered by ECC;
340  *			returns same as read_page()
341  * @write_subpage:	function to write parts of the page covered by ECC.
342  * @write_page:	function to write a page according to the ECC generator
343  *		requirements.
344  * @write_oob_raw:	function to write chip OOB data without ECC
345  * @read_oob_raw:	function to read chip OOB data without ECC
346  * @read_oob:	function to read chip OOB data
347  * @write_oob:	function to write chip OOB data
348  */
349 struct nand_ecc_ctrl {
350 	enum nand_ecc_engine_type engine_type;
351 	enum nand_ecc_placement placement;
352 	enum nand_ecc_algo algo;
353 	int steps;
354 	int size;
355 	int bytes;
356 	int total;
357 	int strength;
358 	int prepad;
359 	int postpad;
360 	unsigned int options;
361 	u8 *calc_buf;
362 	u8 *code_buf;
363 	void (*hwctl)(struct nand_chip *chip, int mode);
364 	int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
365 			 uint8_t *ecc_code);
366 	int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
367 		       uint8_t *calc_ecc);
368 	int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
369 			     int oob_required, int page);
370 	int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
371 			      int oob_required, int page);
372 	int (*read_page)(struct nand_chip *chip, uint8_t *buf,
373 			 int oob_required, int page);
374 	int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
375 			    uint32_t len, uint8_t *buf, int page);
376 	int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
377 			     uint32_t data_len, const uint8_t *data_buf,
378 			     int oob_required, int page);
379 	int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
380 			  int oob_required, int page);
381 	int (*write_oob_raw)(struct nand_chip *chip, int page);
382 	int (*read_oob_raw)(struct nand_chip *chip, int page);
383 	int (*read_oob)(struct nand_chip *chip, int page);
384 	int (*write_oob)(struct nand_chip *chip, int page);
385 };
386 
387 /**
388  * struct nand_sdr_timings - SDR NAND chip timings
389  *
390  * This struct defines the timing requirements of a SDR NAND chip.
391  * These information can be found in every NAND datasheets and the timings
392  * meaning are described in the ONFI specifications:
393  * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
394  * (chapter 4.15 Timing Parameters)
395  *
396  * All these timings are expressed in picoseconds.
397  *
398  * @tBERS_max: Block erase time
399  * @tCCS_min: Change column setup time
400  * @tPROG_max: Page program time
401  * @tR_max: Page read time
402  * @tALH_min: ALE hold time
403  * @tADL_min: ALE to data loading time
404  * @tALS_min: ALE setup time
405  * @tAR_min: ALE to RE# delay
406  * @tCEA_max: CE# access time
407  * @tCEH_min: CE# high hold time
408  * @tCH_min:  CE# hold time
409  * @tCHZ_max: CE# high to output hi-Z
410  * @tCLH_min: CLE hold time
411  * @tCLR_min: CLE to RE# delay
412  * @tCLS_min: CLE setup time
413  * @tCOH_min: CE# high to output hold
414  * @tCS_min: CE# setup time
415  * @tDH_min: Data hold time
416  * @tDS_min: Data setup time
417  * @tFEAT_max: Busy time for Set Features and Get Features
418  * @tIR_min: Output hi-Z to RE# low
419  * @tITC_max: Interface and Timing Mode Change time
420  * @tRC_min: RE# cycle time
421  * @tREA_max: RE# access time
422  * @tREH_min: RE# high hold time
423  * @tRHOH_min: RE# high to output hold
424  * @tRHW_min: RE# high to WE# low
425  * @tRHZ_max: RE# high to output hi-Z
426  * @tRLOH_min: RE# low to output hold
427  * @tRP_min: RE# pulse width
428  * @tRR_min: Ready to RE# low (data only)
429  * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
430  *	      rising edge of R/B#.
431  * @tWB_max: WE# high to SR[6] low
432  * @tWC_min: WE# cycle time
433  * @tWH_min: WE# high hold time
434  * @tWHR_min: WE# high to RE# low
435  * @tWP_min: WE# pulse width
436  * @tWW_min: WP# transition to WE# low
437  */
438 struct nand_sdr_timings {
439 	u64 tBERS_max;
440 	u32 tCCS_min;
441 	u64 tPROG_max;
442 	u64 tR_max;
443 	u32 tALH_min;
444 	u32 tADL_min;
445 	u32 tALS_min;
446 	u32 tAR_min;
447 	u32 tCEA_max;
448 	u32 tCEH_min;
449 	u32 tCH_min;
450 	u32 tCHZ_max;
451 	u32 tCLH_min;
452 	u32 tCLR_min;
453 	u32 tCLS_min;
454 	u32 tCOH_min;
455 	u32 tCS_min;
456 	u32 tDH_min;
457 	u32 tDS_min;
458 	u32 tFEAT_max;
459 	u32 tIR_min;
460 	u32 tITC_max;
461 	u32 tRC_min;
462 	u32 tREA_max;
463 	u32 tREH_min;
464 	u32 tRHOH_min;
465 	u32 tRHW_min;
466 	u32 tRHZ_max;
467 	u32 tRLOH_min;
468 	u32 tRP_min;
469 	u32 tRR_min;
470 	u64 tRST_max;
471 	u32 tWB_max;
472 	u32 tWC_min;
473 	u32 tWH_min;
474 	u32 tWHR_min;
475 	u32 tWP_min;
476 	u32 tWW_min;
477 };
478 
479 /**
480  * struct nand_nvddr_timings - NV-DDR NAND chip timings
481  *
482  * This struct defines the timing requirements of a NV-DDR NAND data interface.
483  * These information can be found in every NAND datasheets and the timings
484  * meaning are described in the ONFI specifications:
485  * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
486  * (chapter 4.18.2 NV-DDR)
487  *
488  * All these timings are expressed in picoseconds.
489  *
490  * @tBERS_max: Block erase time
491  * @tCCS_min: Change column setup time
492  * @tPROG_max: Page program time
493  * @tR_max: Page read time
494  * @tAC_min: Access window of DQ[7:0] from CLK
495  * @tAC_max: Access window of DQ[7:0] from CLK
496  * @tADL_min: ALE to data loading time
497  * @tCAD_min: Command, Address, Data delay
498  * @tCAH_min: Command/Address DQ hold time
499  * @tCALH_min: W/R_n, CLE and ALE hold time
500  * @tCALS_min: W/R_n, CLE and ALE setup time
501  * @tCAS_min: Command/address DQ setup time
502  * @tCEH_min: CE# high hold time
503  * @tCH_min:  CE# hold time
504  * @tCK_min: Average clock cycle time
505  * @tCS_min: CE# setup time
506  * @tDH_min: Data hold time
507  * @tDQSCK_min: Start of the access window of DQS from CLK
508  * @tDQSCK_max: End of the access window of DQS from CLK
509  * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
510  * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
511  * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
512  * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
513  * @tDS_min: Data setup time
514  * @tDSC_min: DQS cycle time
515  * @tFEAT_max: Busy time for Set Features and Get Features
516  * @tITC_max: Interface and Timing Mode Change time
517  * @tQHS_max: Data hold skew factor
518  * @tRHW_min: Data output cycle to command, address, or data input cycle
519  * @tRR_min: Ready to RE# low (data only)
520  * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
521  *	      rising edge of R/B#.
522  * @tWB_max: WE# high to SR[6] low
523  * @tWHR_min: WE# high to RE# low
524  * @tWRCK_min: W/R_n low to data output cycle
525  * @tWW_min: WP# transition to WE# low
526  */
527 struct nand_nvddr_timings {
528 	u64 tBERS_max;
529 	u32 tCCS_min;
530 	u64 tPROG_max;
531 	u64 tR_max;
532 	u32 tAC_min;
533 	u32 tAC_max;
534 	u32 tADL_min;
535 	u32 tCAD_min;
536 	u32 tCAH_min;
537 	u32 tCALH_min;
538 	u32 tCALS_min;
539 	u32 tCAS_min;
540 	u32 tCEH_min;
541 	u32 tCH_min;
542 	u32 tCK_min;
543 	u32 tCS_min;
544 	u32 tDH_min;
545 	u32 tDQSCK_min;
546 	u32 tDQSCK_max;
547 	u32 tDQSD_min;
548 	u32 tDQSD_max;
549 	u32 tDQSHZ_max;
550 	u32 tDQSQ_max;
551 	u32 tDS_min;
552 	u32 tDSC_min;
553 	u32 tFEAT_max;
554 	u32 tITC_max;
555 	u32 tQHS_max;
556 	u32 tRHW_min;
557 	u32 tRR_min;
558 	u32 tRST_max;
559 	u32 tWB_max;
560 	u32 tWHR_min;
561 	u32 tWRCK_min;
562 	u32 tWW_min;
563 };
564 
565 /*
566  * While timings related to the data interface itself are mostly different
567  * between SDR and NV-DDR, timings related to the internal chip behavior are
568  * common. IOW, the following entries which describe the internal delays have
569  * the same definition and are shared in both SDR and NV-DDR timing structures:
570  * - tADL_min
571  * - tBERS_max
572  * - tCCS_min
573  * - tFEAT_max
574  * - tPROG_max
575  * - tR_max
576  * - tRR_min
577  * - tRST_max
578  * - tWB_max
579  *
580  * The below macros return the value of a given timing, no matter the interface.
581  */
582 #define NAND_COMMON_TIMING_PS(conf, timing_name)		\
583 	nand_interface_is_sdr(conf) ?				\
584 		nand_get_sdr_timings(conf)->timing_name :	\
585 		nand_get_nvddr_timings(conf)->timing_name
586 
587 #define NAND_COMMON_TIMING_MS(conf, timing_name) \
588 	PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
589 
590 #define NAND_COMMON_TIMING_NS(conf, timing_name) \
591 	PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
592 
593 /**
594  * enum nand_interface_type - NAND interface type
595  * @NAND_SDR_IFACE:	Single Data Rate interface
596  * @NAND_NVDDR_IFACE:	Double Data Rate interface
597  */
598 enum nand_interface_type {
599 	NAND_SDR_IFACE,
600 	NAND_NVDDR_IFACE,
601 };
602 
603 /**
604  * struct nand_interface_config - NAND interface timing
605  * @type:	 type of the timing
606  * @timings:	 The timing information
607  * @timings.mode: Timing mode as defined in the specification
608  * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
609  * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
610  */
611 struct nand_interface_config {
612 	enum nand_interface_type type;
613 	struct nand_timings {
614 		unsigned int mode;
615 		union {
616 			struct nand_sdr_timings sdr;
617 			struct nand_nvddr_timings nvddr;
618 		};
619 	} timings;
620 };
621 
622 /**
623  * nand_interface_is_sdr - get the interface type
624  * @conf:	The data interface
625  */
nand_interface_is_sdr(const struct nand_interface_config * conf)626 static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
627 {
628 	return conf->type == NAND_SDR_IFACE;
629 }
630 
631 /**
632  * nand_interface_is_nvddr - get the interface type
633  * @conf:	The data interface
634  */
nand_interface_is_nvddr(const struct nand_interface_config * conf)635 static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
636 {
637 	return conf->type == NAND_NVDDR_IFACE;
638 }
639 
640 /**
641  * nand_get_sdr_timings - get SDR timing from data interface
642  * @conf:	The data interface
643  */
644 static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config * conf)645 nand_get_sdr_timings(const struct nand_interface_config *conf)
646 {
647 	if (!nand_interface_is_sdr(conf))
648 		return ERR_PTR(-EINVAL);
649 
650 	return &conf->timings.sdr;
651 }
652 
653 /**
654  * nand_get_nvddr_timings - get NV-DDR timing from data interface
655  * @conf:	The data interface
656  */
657 static inline const struct nand_nvddr_timings *
nand_get_nvddr_timings(const struct nand_interface_config * conf)658 nand_get_nvddr_timings(const struct nand_interface_config *conf)
659 {
660 	if (!nand_interface_is_nvddr(conf))
661 		return ERR_PTR(-EINVAL);
662 
663 	return &conf->timings.nvddr;
664 }
665 
666 /**
667  * struct nand_op_cmd_instr - Definition of a command instruction
668  * @opcode: the command to issue in one cycle
669  */
670 struct nand_op_cmd_instr {
671 	u8 opcode;
672 };
673 
674 /**
675  * struct nand_op_addr_instr - Definition of an address instruction
676  * @naddrs: length of the @addrs array
677  * @addrs: array containing the address cycles to issue
678  */
679 struct nand_op_addr_instr {
680 	unsigned int naddrs;
681 	const u8 *addrs;
682 };
683 
684 /**
685  * struct nand_op_data_instr - Definition of a data instruction
686  * @len: number of data bytes to move
687  * @buf: buffer to fill
688  * @buf.in: buffer to fill when reading from the NAND chip
689  * @buf.out: buffer to read from when writing to the NAND chip
690  * @force_8bit: force 8-bit access
691  *
692  * Please note that "in" and "out" are inverted from the ONFI specification
693  * and are from the controller perspective, so a "in" is a read from the NAND
694  * chip while a "out" is a write to the NAND chip.
695  */
696 struct nand_op_data_instr {
697 	unsigned int len;
698 	union {
699 		void *in;
700 		const void *out;
701 	} buf;
702 	bool force_8bit;
703 };
704 
705 /**
706  * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
707  * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
708  */
709 struct nand_op_waitrdy_instr {
710 	unsigned int timeout_ms;
711 };
712 
713 /**
714  * enum nand_op_instr_type - Definition of all instruction types
715  * @NAND_OP_CMD_INSTR: command instruction
716  * @NAND_OP_ADDR_INSTR: address instruction
717  * @NAND_OP_DATA_IN_INSTR: data in instruction
718  * @NAND_OP_DATA_OUT_INSTR: data out instruction
719  * @NAND_OP_WAITRDY_INSTR: wait ready instruction
720  */
721 enum nand_op_instr_type {
722 	NAND_OP_CMD_INSTR,
723 	NAND_OP_ADDR_INSTR,
724 	NAND_OP_DATA_IN_INSTR,
725 	NAND_OP_DATA_OUT_INSTR,
726 	NAND_OP_WAITRDY_INSTR,
727 };
728 
729 /**
730  * struct nand_op_instr - Instruction object
731  * @type: the instruction type
732  * @ctx:  extra data associated to the instruction. You'll have to use the
733  *        appropriate element depending on @type
734  * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
735  * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
736  * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
737  *	      or %NAND_OP_DATA_OUT_INSTR
738  * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
739  * @delay_ns: delay the controller should apply after the instruction has been
740  *	      issued on the bus. Most modern controllers have internal timings
741  *	      control logic, and in this case, the controller driver can ignore
742  *	      this field.
743  */
744 struct nand_op_instr {
745 	enum nand_op_instr_type type;
746 	union {
747 		struct nand_op_cmd_instr cmd;
748 		struct nand_op_addr_instr addr;
749 		struct nand_op_data_instr data;
750 		struct nand_op_waitrdy_instr waitrdy;
751 	} ctx;
752 	unsigned int delay_ns;
753 };
754 
755 /*
756  * Special handling must be done for the WAITRDY timeout parameter as it usually
757  * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
758  * tBERS (during an erase) which all of them are u64 values that cannot be
759  * divided by usual kernel macros and must be handled with the special
760  * DIV_ROUND_UP_ULL() macro.
761  *
762  * Cast to type of dividend is needed here to guarantee that the result won't
763  * be an unsigned long long when the dividend is an unsigned long (or smaller),
764  * which is what the compiler does when it sees ternary operator with 2
765  * different return types (picks the largest type to make sure there's no
766  * loss).
767  */
768 #define __DIVIDE(dividend, divisor) ({						\
769 	(__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ?	\
770 			       DIV_ROUND_UP(dividend, divisor) :		\
771 			       DIV_ROUND_UP_ULL(dividend, divisor)); 		\
772 	})
773 #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
774 #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
775 
776 #define NAND_OP_CMD(id, ns)						\
777 	{								\
778 		.type = NAND_OP_CMD_INSTR,				\
779 		.ctx.cmd.opcode = id,					\
780 		.delay_ns = ns,						\
781 	}
782 
783 #define NAND_OP_ADDR(ncycles, cycles, ns)				\
784 	{								\
785 		.type = NAND_OP_ADDR_INSTR,				\
786 		.ctx.addr = {						\
787 			.naddrs = ncycles,				\
788 			.addrs = cycles,				\
789 		},							\
790 		.delay_ns = ns,						\
791 	}
792 
793 #define NAND_OP_DATA_IN(l, b, ns)					\
794 	{								\
795 		.type = NAND_OP_DATA_IN_INSTR,				\
796 		.ctx.data = {						\
797 			.len = l,					\
798 			.buf.in = b,					\
799 			.force_8bit = false,				\
800 		},							\
801 		.delay_ns = ns,						\
802 	}
803 
804 #define NAND_OP_DATA_OUT(l, b, ns)					\
805 	{								\
806 		.type = NAND_OP_DATA_OUT_INSTR,				\
807 		.ctx.data = {						\
808 			.len = l,					\
809 			.buf.out = b,					\
810 			.force_8bit = false,				\
811 		},							\
812 		.delay_ns = ns,						\
813 	}
814 
815 #define NAND_OP_8BIT_DATA_IN(l, b, ns)					\
816 	{								\
817 		.type = NAND_OP_DATA_IN_INSTR,				\
818 		.ctx.data = {						\
819 			.len = l,					\
820 			.buf.in = b,					\
821 			.force_8bit = true,				\
822 		},							\
823 		.delay_ns = ns,						\
824 	}
825 
826 #define NAND_OP_8BIT_DATA_OUT(l, b, ns)					\
827 	{								\
828 		.type = NAND_OP_DATA_OUT_INSTR,				\
829 		.ctx.data = {						\
830 			.len = l,					\
831 			.buf.out = b,					\
832 			.force_8bit = true,				\
833 		},							\
834 		.delay_ns = ns,						\
835 	}
836 
837 #define NAND_OP_WAIT_RDY(tout_ms, ns)					\
838 	{								\
839 		.type = NAND_OP_WAITRDY_INSTR,				\
840 		.ctx.waitrdy.timeout_ms = tout_ms,			\
841 		.delay_ns = ns,						\
842 	}
843 
844 /**
845  * struct nand_subop - a sub operation
846  * @cs: the CS line to select for this NAND sub-operation
847  * @instrs: array of instructions
848  * @ninstrs: length of the @instrs array
849  * @first_instr_start_off: offset to start from for the first instruction
850  *			   of the sub-operation
851  * @last_instr_end_off: offset to end at (excluded) for the last instruction
852  *			of the sub-operation
853  *
854  * Both @first_instr_start_off and @last_instr_end_off only apply to data or
855  * address instructions.
856  *
857  * When an operation cannot be handled as is by the NAND controller, it will
858  * be split by the parser into sub-operations which will be passed to the
859  * controller driver.
860  */
861 struct nand_subop {
862 	unsigned int cs;
863 	const struct nand_op_instr *instrs;
864 	unsigned int ninstrs;
865 	unsigned int first_instr_start_off;
866 	unsigned int last_instr_end_off;
867 };
868 
869 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
870 					   unsigned int op_id);
871 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
872 					 unsigned int op_id);
873 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
874 					   unsigned int op_id);
875 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
876 				     unsigned int op_id);
877 
878 /**
879  * struct nand_op_parser_addr_constraints - Constraints for address instructions
880  * @maxcycles: maximum number of address cycles the controller can issue in a
881  *	       single step
882  */
883 struct nand_op_parser_addr_constraints {
884 	unsigned int maxcycles;
885 };
886 
887 /**
888  * struct nand_op_parser_data_constraints - Constraints for data instructions
889  * @maxlen: maximum data length that the controller can handle in a single step
890  */
891 struct nand_op_parser_data_constraints {
892 	unsigned int maxlen;
893 };
894 
895 /**
896  * struct nand_op_parser_pattern_elem - One element of a pattern
897  * @type: the instructuction type
898  * @optional: whether this element of the pattern is optional or mandatory
899  * @ctx: address or data constraint
900  * @ctx.addr: address constraint (number of cycles)
901  * @ctx.data: data constraint (data length)
902  */
903 struct nand_op_parser_pattern_elem {
904 	enum nand_op_instr_type type;
905 	bool optional;
906 	union {
907 		struct nand_op_parser_addr_constraints addr;
908 		struct nand_op_parser_data_constraints data;
909 	} ctx;
910 };
911 
912 #define NAND_OP_PARSER_PAT_CMD_ELEM(_opt)			\
913 	{							\
914 		.type = NAND_OP_CMD_INSTR,			\
915 		.optional = _opt,				\
916 	}
917 
918 #define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles)		\
919 	{							\
920 		.type = NAND_OP_ADDR_INSTR,			\
921 		.optional = _opt,				\
922 		.ctx.addr.maxcycles = _maxcycles,		\
923 	}
924 
925 #define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen)		\
926 	{							\
927 		.type = NAND_OP_DATA_IN_INSTR,			\
928 		.optional = _opt,				\
929 		.ctx.data.maxlen = _maxlen,			\
930 	}
931 
932 #define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen)		\
933 	{							\
934 		.type = NAND_OP_DATA_OUT_INSTR,			\
935 		.optional = _opt,				\
936 		.ctx.data.maxlen = _maxlen,			\
937 	}
938 
939 #define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt)			\
940 	{							\
941 		.type = NAND_OP_WAITRDY_INSTR,			\
942 		.optional = _opt,				\
943 	}
944 
945 /**
946  * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
947  * @elems: array of pattern elements
948  * @nelems: number of pattern elements in @elems array
949  * @exec: the function that will issue a sub-operation
950  *
951  * A pattern is a list of elements, each element reprensenting one instruction
952  * with its constraints. The pattern itself is used by the core to match NAND
953  * chip operation with NAND controller operations.
954  * Once a match between a NAND controller operation pattern and a NAND chip
955  * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
956  * hook is called so that the controller driver can issue the operation on the
957  * bus.
958  *
959  * Controller drivers should declare as many patterns as they support and pass
960  * this list of patterns (created with the help of the following macro) to
961  * the nand_op_parser_exec_op() helper.
962  */
963 struct nand_op_parser_pattern {
964 	const struct nand_op_parser_pattern_elem *elems;
965 	unsigned int nelems;
966 	int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
967 };
968 
969 #define NAND_OP_PARSER_PATTERN(_exec, ...)							\
970 	{											\
971 		.exec = _exec,									\
972 		.elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ },		\
973 		.nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) /	\
974 			  sizeof(struct nand_op_parser_pattern_elem),				\
975 	}
976 
977 /**
978  * struct nand_op_parser - NAND controller operation parser descriptor
979  * @patterns: array of supported patterns
980  * @npatterns: length of the @patterns array
981  *
982  * The parser descriptor is just an array of supported patterns which will be
983  * iterated by nand_op_parser_exec_op() everytime it tries to execute an
984  * NAND operation (or tries to determine if a specific operation is supported).
985  *
986  * It is worth mentioning that patterns will be tested in their declaration
987  * order, and the first match will be taken, so it's important to order patterns
988  * appropriately so that simple/inefficient patterns are placed at the end of
989  * the list. Usually, this is where you put single instruction patterns.
990  */
991 struct nand_op_parser {
992 	const struct nand_op_parser_pattern *patterns;
993 	unsigned int npatterns;
994 };
995 
996 #define NAND_OP_PARSER(...)									\
997 	{											\
998 		.patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ },		\
999 		.npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) /	\
1000 			     sizeof(struct nand_op_parser_pattern),				\
1001 	}
1002 
1003 /**
1004  * struct nand_operation - NAND operation descriptor
1005  * @cs: the CS line to select for this NAND operation
1006  * @deassert_wp: set to true when the operation requires the WP pin to be
1007  *		 de-asserted (ERASE, PROG, ...)
1008  * @instrs: array of instructions to execute
1009  * @ninstrs: length of the @instrs array
1010  *
1011  * The actual operation structure that will be passed to chip->exec_op().
1012  */
1013 struct nand_operation {
1014 	unsigned int cs;
1015 	bool deassert_wp;
1016 	const struct nand_op_instr *instrs;
1017 	unsigned int ninstrs;
1018 };
1019 
1020 #define NAND_OPERATION(_cs, _instrs)				\
1021 	{							\
1022 		.cs = _cs,					\
1023 		.instrs = _instrs,				\
1024 		.ninstrs = ARRAY_SIZE(_instrs),			\
1025 	}
1026 
1027 #define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs)		\
1028 	{							\
1029 		.cs = _cs,					\
1030 		.deassert_wp = true,				\
1031 		.instrs = _instrs,				\
1032 		.ninstrs = ARRAY_SIZE(_instrs),			\
1033 	}
1034 
1035 int nand_op_parser_exec_op(struct nand_chip *chip,
1036 			   const struct nand_op_parser *parser,
1037 			   const struct nand_operation *op, bool check_only);
1038 
nand_op_trace(const char * prefix,const struct nand_op_instr * instr)1039 static inline void nand_op_trace(const char *prefix,
1040 				 const struct nand_op_instr *instr)
1041 {
1042 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
1043 	switch (instr->type) {
1044 	case NAND_OP_CMD_INSTR:
1045 		pr_debug("%sCMD      [0x%02x]\n", prefix,
1046 			 instr->ctx.cmd.opcode);
1047 		break;
1048 	case NAND_OP_ADDR_INSTR:
1049 		pr_debug("%sADDR     [%d cyc: %*ph]\n", prefix,
1050 			 instr->ctx.addr.naddrs,
1051 			 instr->ctx.addr.naddrs < 64 ?
1052 			 instr->ctx.addr.naddrs : 64,
1053 			 instr->ctx.addr.addrs);
1054 		break;
1055 	case NAND_OP_DATA_IN_INSTR:
1056 		pr_debug("%sDATA_IN  [%d B%s]\n", prefix,
1057 			 instr->ctx.data.len,
1058 			 instr->ctx.data.force_8bit ?
1059 			 ", force 8-bit" : "");
1060 		break;
1061 	case NAND_OP_DATA_OUT_INSTR:
1062 		pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
1063 			 instr->ctx.data.len,
1064 			 instr->ctx.data.force_8bit ?
1065 			 ", force 8-bit" : "");
1066 		break;
1067 	case NAND_OP_WAITRDY_INSTR:
1068 		pr_debug("%sWAITRDY  [max %d ms]\n", prefix,
1069 			 instr->ctx.waitrdy.timeout_ms);
1070 		break;
1071 	}
1072 #endif
1073 }
1074 
1075 /**
1076  * struct nand_controller_ops - Controller operations
1077  *
1078  * @attach_chip: this method is called after the NAND detection phase after
1079  *		 flash ID and MTD fields such as erase size, page size and OOB
1080  *		 size have been set up. ECC requirements are available if
1081  *		 provided by the NAND chip or device tree. Typically used to
1082  *		 choose the appropriate ECC configuration and allocate
1083  *		 associated resources.
1084  *		 This hook is optional.
1085  * @detach_chip: free all resources allocated/claimed in
1086  *		 nand_controller_ops->attach_chip().
1087  *		 This hook is optional.
1088  * @exec_op:	 controller specific method to execute NAND operations.
1089  *		 This method replaces chip->legacy.cmdfunc(),
1090  *		 chip->legacy.{read,write}_{buf,byte,word}(),
1091  *		 chip->legacy.dev_ready() and chip->legacy.waitfunc().
1092  * @setup_interface: setup the data interface and timing. If chipnr is set to
1093  *		     %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
1094  *		     should not be applied but only checked.
1095  *		     This hook is optional.
1096  */
1097 struct nand_controller_ops {
1098 	int (*attach_chip)(struct nand_chip *chip);
1099 	void (*detach_chip)(struct nand_chip *chip);
1100 	int (*exec_op)(struct nand_chip *chip,
1101 		       const struct nand_operation *op,
1102 		       bool check_only);
1103 	int (*setup_interface)(struct nand_chip *chip, int chipnr,
1104 			       const struct nand_interface_config *conf);
1105 };
1106 
1107 /**
1108  * struct nand_controller - Structure used to describe a NAND controller
1109  *
1110  * @lock:		lock used to serialize accesses to the NAND controller
1111  * @ops:		NAND controller operations.
1112  * @supported_op:	NAND controller known-to-be-supported operations,
1113  *			only writable by the core after initial checking.
1114  * @supported_op.data_only_read: The controller supports reading more data from
1115  *			the bus without restarting an entire read operation nor
1116  *			changing the column.
1117  * @supported_op.cont_read: The controller supports sequential cache reads.
1118  * @controller_wp:	the controller is in charge of handling the WP pin.
1119  */
1120 struct nand_controller {
1121 	struct mutex lock;
1122 	const struct nand_controller_ops *ops;
1123 	struct {
1124 		unsigned int data_only_read: 1;
1125 		unsigned int cont_read: 1;
1126 	} supported_op;
1127 	bool controller_wp;
1128 };
1129 
nand_controller_init(struct nand_controller * nfc)1130 static inline void nand_controller_init(struct nand_controller *nfc)
1131 {
1132 	mutex_init(&nfc->lock);
1133 }
1134 
1135 /**
1136  * struct nand_legacy - NAND chip legacy fields/hooks
1137  * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
1138  * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
1139  * @select_chip: select/deselect a specific target/die
1140  * @read_byte: read one byte from the chip
1141  * @write_byte: write a single byte to the chip on the low 8 I/O lines
1142  * @write_buf: write data from the buffer to the chip
1143  * @read_buf: read data from the chip into the buffer
1144  * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
1145  *	      to write command and address
1146  * @cmdfunc: hardware specific function for writing commands to the chip.
1147  * @dev_ready: hardware specific function for accessing device ready/busy line.
1148  *	       If set to NULL no access to ready/busy is available and the
1149  *	       ready/busy information is read from the chip status register.
1150  * @waitfunc: hardware specific function for wait on ready.
1151  * @block_bad: check if a block is bad, using OOB markers
1152  * @block_markbad: mark a block bad
1153  * @set_features: set the NAND chip features
1154  * @get_features: get the NAND chip features
1155  * @chip_delay: chip dependent delay for transferring data from array to read
1156  *		regs (tR).
1157  * @dummy_controller: dummy controller implementation for drivers that can
1158  *		      only control a single chip
1159  *
1160  * If you look at this structure you're already wrong. These fields/hooks are
1161  * all deprecated.
1162  */
1163 struct nand_legacy {
1164 	void __iomem *IO_ADDR_R;
1165 	void __iomem *IO_ADDR_W;
1166 	void (*select_chip)(struct nand_chip *chip, int cs);
1167 	u8 (*read_byte)(struct nand_chip *chip);
1168 	void (*write_byte)(struct nand_chip *chip, u8 byte);
1169 	void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
1170 	void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
1171 	void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
1172 	void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
1173 			int page_addr);
1174 	int (*dev_ready)(struct nand_chip *chip);
1175 	int (*waitfunc)(struct nand_chip *chip);
1176 	int (*block_bad)(struct nand_chip *chip, loff_t ofs);
1177 	int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
1178 	int (*set_features)(struct nand_chip *chip, int feature_addr,
1179 			    u8 *subfeature_para);
1180 	int (*get_features)(struct nand_chip *chip, int feature_addr,
1181 			    u8 *subfeature_para);
1182 	int chip_delay;
1183 	struct nand_controller dummy_controller;
1184 };
1185 
1186 /**
1187  * struct nand_chip_ops - NAND chip operations
1188  * @suspend: Suspend operation
1189  * @resume: Resume operation
1190  * @lock_area: Lock operation
1191  * @unlock_area: Unlock operation
1192  * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
1193  * @choose_interface_config: Choose the best interface configuration
1194  */
1195 struct nand_chip_ops {
1196 	int (*suspend)(struct nand_chip *chip);
1197 	void (*resume)(struct nand_chip *chip);
1198 	int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1199 	int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1200 	int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
1201 	int (*choose_interface_config)(struct nand_chip *chip,
1202 				       struct nand_interface_config *iface);
1203 };
1204 
1205 /**
1206  * struct nand_manufacturer - NAND manufacturer structure
1207  * @desc: The manufacturer description
1208  * @priv: Private information for the manufacturer driver
1209  */
1210 struct nand_manufacturer {
1211 	const struct nand_manufacturer_desc *desc;
1212 	void *priv;
1213 };
1214 
1215 /**
1216  * struct nand_secure_region - NAND secure region structure
1217  * @offset: Offset of the start of the secure region
1218  * @size: Size of the secure region
1219  */
1220 struct nand_secure_region {
1221 	u64 offset;
1222 	u64 size;
1223 };
1224 
1225 /**
1226  * struct nand_chip - NAND Private Flash Chip Data
1227  * @base: Inherit from the generic NAND device
1228  * @id: Holds NAND ID
1229  * @parameters: Holds generic parameters under an easily readable form
1230  * @manufacturer: Manufacturer information
1231  * @ops: NAND chip operations
1232  * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
1233  *          to use any of these fields/hooks, and if you're modifying an
1234  *          existing driver that is using those fields/hooks, you should
1235  *          consider reworking the driver and avoid using them.
1236  * @options: Various chip options. They can partly be set to inform nand_scan
1237  *           about special functionality. See the defines for further
1238  *           explanation.
1239  * @current_interface_config: The currently used NAND interface configuration
1240  * @best_interface_config: The best NAND interface configuration which fits both
1241  *                         the NAND chip and NAND controller constraints. If
1242  *                         unset, the default reset interface configuration must
1243  *                         be used.
1244  * @bbt_erase_shift: Number of address bits in a bbt entry
1245  * @bbt_options: Bad block table specific options. All options used here must
1246  *               come from bbm.h. By default, these options will be copied to
1247  *               the appropriate nand_bbt_descr's.
1248  * @badblockpos: Bad block marker position in the oob area
1249  * @badblockbits: Minimum number of set bits in a good block's bad block marker
1250  *                position; i.e., BBM = 11110111b is good when badblockbits = 7
1251  * @bbt_td: Bad block table descriptor for flash lookup
1252  * @bbt_md: Bad block table mirror descriptor
1253  * @badblock_pattern: Bad block scan pattern used for initial bad block scan
1254  * @bbt: Bad block table pointer
1255  * @page_shift: Number of address bits in a page (column address bits)
1256  * @phys_erase_shift: Number of address bits in a physical eraseblock
1257  * @chip_shift: Number of address bits in one chip
1258  * @pagemask: Page number mask = number of (pages / chip) - 1
1259  * @subpagesize: Holds the subpagesize
1260  * @data_buf: Buffer for data, size is (page size + oobsize)
1261  * @oob_poi: pointer on the OOB area covered by data_buf
1262  * @pagecache: Structure containing page cache related fields
1263  * @pagecache.bitflips: Number of bitflips of the cached page
1264  * @pagecache.page: Page number currently in the cache. -1 means no page is
1265  *                  currently cached
1266  * @buf_align: Minimum buffer alignment required by a platform
1267  * @lock: Lock protecting the suspended field. Also used to serialize accesses
1268  *        to the NAND device
1269  * @suspended: Set to 1 when the device is suspended, 0 when it's not
1270  * @resume_wq: wait queue to sleep if rawnand is in suspended state.
1271  * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
1272  *          should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
1273  *          NAND Controller drivers should not modify this value, but they're
1274  *          allowed to read it.
1275  * @read_retries: The number of read retry modes supported
1276  * @secure_regions: Structure containing the secure regions info
1277  * @nr_secure_regions: Number of secure regions
1278  * @cont_read: Sequential page read internals
1279  * @cont_read.ongoing: Whether a continuous read is ongoing or not
1280  * @cont_read.first_page: Start of the continuous read operation
1281  * @cont_read.pause_page: End of the current sequential cache read operation
1282  * @cont_read.last_page: End of the continuous read operation
1283  * @controller: The hardware controller	structure which is shared among multiple
1284  *              independent devices
1285  * @ecc: The ECC controller structure
1286  * @priv: Chip private data
1287  */
1288 struct nand_chip {
1289 	struct nand_device base;
1290 	struct nand_id id;
1291 	struct nand_parameters parameters;
1292 	struct nand_manufacturer manufacturer;
1293 	struct nand_chip_ops ops;
1294 	struct nand_legacy legacy;
1295 	unsigned int options;
1296 
1297 	/* Data interface */
1298 	const struct nand_interface_config *current_interface_config;
1299 	struct nand_interface_config *best_interface_config;
1300 
1301 	/* Bad block information */
1302 	unsigned int bbt_erase_shift;
1303 	unsigned int bbt_options;
1304 	unsigned int badblockpos;
1305 	unsigned int badblockbits;
1306 	struct nand_bbt_descr *bbt_td;
1307 	struct nand_bbt_descr *bbt_md;
1308 	struct nand_bbt_descr *badblock_pattern;
1309 	u8 *bbt;
1310 
1311 	/* Device internal layout */
1312 	unsigned int page_shift;
1313 	unsigned int phys_erase_shift;
1314 	unsigned int chip_shift;
1315 	unsigned int pagemask;
1316 	unsigned int subpagesize;
1317 
1318 	/* Buffers */
1319 	u8 *data_buf;
1320 	u8 *oob_poi;
1321 	struct {
1322 		unsigned int bitflips;
1323 		int page;
1324 	} pagecache;
1325 	unsigned long buf_align;
1326 
1327 	/* Internals */
1328 	struct mutex lock;
1329 	unsigned int suspended : 1;
1330 	wait_queue_head_t resume_wq;
1331 	int cur_cs;
1332 	int read_retries;
1333 	struct nand_secure_region *secure_regions;
1334 	u8 nr_secure_regions;
1335 	struct {
1336 		bool ongoing;
1337 		unsigned int first_page;
1338 		unsigned int pause_page;
1339 		unsigned int last_page;
1340 	} cont_read;
1341 
1342 	/* Externals */
1343 	struct nand_controller *controller;
1344 	struct nand_ecc_ctrl ecc;
1345 	void *priv;
1346 };
1347 
mtd_to_nand(struct mtd_info * mtd)1348 static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
1349 {
1350 	return container_of(mtd, struct nand_chip, base.mtd);
1351 }
1352 
nand_to_mtd(struct nand_chip * chip)1353 static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
1354 {
1355 	return &chip->base.mtd;
1356 }
1357 
nand_get_controller_data(struct nand_chip * chip)1358 static inline void *nand_get_controller_data(struct nand_chip *chip)
1359 {
1360 	return chip->priv;
1361 }
1362 
nand_set_controller_data(struct nand_chip * chip,void * priv)1363 static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
1364 {
1365 	chip->priv = priv;
1366 }
1367 
nand_set_manufacturer_data(struct nand_chip * chip,void * priv)1368 static inline void nand_set_manufacturer_data(struct nand_chip *chip,
1369 					      void *priv)
1370 {
1371 	chip->manufacturer.priv = priv;
1372 }
1373 
nand_get_manufacturer_data(struct nand_chip * chip)1374 static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
1375 {
1376 	return chip->manufacturer.priv;
1377 }
1378 
nand_set_flash_node(struct nand_chip * chip,struct device_node * np)1379 static inline void nand_set_flash_node(struct nand_chip *chip,
1380 				       struct device_node *np)
1381 {
1382 	mtd_set_of_node(nand_to_mtd(chip), np);
1383 }
1384 
nand_get_flash_node(struct nand_chip * chip)1385 static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
1386 {
1387 	return mtd_get_of_node(nand_to_mtd(chip));
1388 }
1389 
1390 /**
1391  * nand_get_interface_config - Retrieve the current interface configuration
1392  *                             of a NAND chip
1393  * @chip: The NAND chip
1394  */
1395 static inline const struct nand_interface_config *
nand_get_interface_config(struct nand_chip * chip)1396 nand_get_interface_config(struct nand_chip *chip)
1397 {
1398 	return chip->current_interface_config;
1399 }
1400 
1401 /*
1402  * A helper for defining older NAND chips where the second ID byte fully
1403  * defined the chip, including the geometry (chip size, eraseblock size, page
1404  * size). All these chips have 512 bytes NAND page size.
1405  */
1406 #define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts)          \
1407 	{ .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
1408 	  .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
1409 
1410 /*
1411  * A helper for defining newer chips which report their page size and
1412  * eraseblock size via the extended ID bytes.
1413  *
1414  * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
1415  * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
1416  * device ID now only represented a particular total chip size (and voltage,
1417  * buswidth), and the page size, eraseblock size, and OOB size could vary while
1418  * using the same device ID.
1419  */
1420 #define EXTENDED_ID_NAND(nm, devid, chipsz, opts)                      \
1421 	{ .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
1422 	  .options = (opts) }
1423 
1424 #define NAND_ECC_INFO(_strength, _step)	\
1425 			{ .strength_ds = (_strength), .step_ds = (_step) }
1426 #define NAND_ECC_STRENGTH(type)		((type)->ecc.strength_ds)
1427 #define NAND_ECC_STEP(type)		((type)->ecc.step_ds)
1428 
1429 /**
1430  * struct nand_flash_dev - NAND Flash Device ID Structure
1431  * @name: a human-readable name of the NAND chip
1432  * @dev_id: the device ID (the second byte of the full chip ID array)
1433  * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
1434  *          memory address as ``id[0]``)
1435  * @dev_id: device ID part of the full chip ID array (refers the same memory
1436  *          address as ``id[1]``)
1437  * @id: full device ID array
1438  * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
1439  *            well as the eraseblock size) is determined from the extended NAND
1440  *            chip ID array)
1441  * @chipsize: total chip size in MiB
1442  * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
1443  * @options: stores various chip bit options
1444  * @id_len: The valid length of the @id.
1445  * @oobsize: OOB size
1446  * @ecc: ECC correctability and step information from the datasheet.
1447  * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
1448  *                   @ecc_strength_ds in nand_chip{}.
1449  * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
1450  *               @ecc_step_ds in nand_chip{}, also from the datasheet.
1451  *               For example, the "4bit ECC for each 512Byte" can be set with
1452  *               NAND_ECC_INFO(4, 512).
1453  */
1454 struct nand_flash_dev {
1455 	char *name;
1456 	union {
1457 		struct {
1458 			uint8_t mfr_id;
1459 			uint8_t dev_id;
1460 		};
1461 		uint8_t id[NAND_MAX_ID_LEN];
1462 	};
1463 	unsigned int pagesize;
1464 	unsigned int chipsize;
1465 	unsigned int erasesize;
1466 	unsigned int options;
1467 	uint16_t id_len;
1468 	uint16_t oobsize;
1469 	struct {
1470 		uint16_t strength_ds;
1471 		uint16_t step_ds;
1472 	} ecc;
1473 };
1474 
1475 int nand_create_bbt(struct nand_chip *chip);
1476 
1477 /*
1478  * Check if it is a SLC nand.
1479  * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
1480  * We do not distinguish the MLC and TLC now.
1481  */
nand_is_slc(struct nand_chip * chip)1482 static inline bool nand_is_slc(struct nand_chip *chip)
1483 {
1484 	WARN(nanddev_bits_per_cell(&chip->base) == 0,
1485 	     "chip->bits_per_cell is used uninitialized\n");
1486 	return nanddev_bits_per_cell(&chip->base) == 1;
1487 }
1488 
1489 /**
1490  * nand_opcode_8bits - Check if the opcode's address should be sent only on the
1491  *	lower 8 bits
1492  * @command: opcode to check
1493  */
nand_opcode_8bits(unsigned int command)1494 static inline int nand_opcode_8bits(unsigned int command)
1495 {
1496 	switch (command) {
1497 	case NAND_CMD_READID:
1498 	case NAND_CMD_PARAM:
1499 	case NAND_CMD_GET_FEATURES:
1500 	case NAND_CMD_SET_FEATURES:
1501 		return 1;
1502 	default:
1503 		break;
1504 	}
1505 	return 0;
1506 }
1507 
1508 int rawnand_sw_hamming_init(struct nand_chip *chip);
1509 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
1510 				 const unsigned char *buf,
1511 				 unsigned char *code);
1512 int rawnand_sw_hamming_correct(struct nand_chip *chip,
1513 			       unsigned char *buf,
1514 			       unsigned char *read_ecc,
1515 			       unsigned char *calc_ecc);
1516 void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
1517 int rawnand_sw_bch_init(struct nand_chip *chip);
1518 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
1519 			   unsigned char *read_ecc, unsigned char *calc_ecc);
1520 void rawnand_sw_bch_cleanup(struct nand_chip *chip);
1521 
1522 int nand_check_erased_ecc_chunk(void *data, int datalen,
1523 				void *ecc, int ecclen,
1524 				void *extraoob, int extraooblen,
1525 				int threshold);
1526 
1527 int nand_ecc_choose_conf(struct nand_chip *chip,
1528 			 const struct nand_ecc_caps *caps, int oobavail);
1529 
1530 /* Default write_oob implementation */
1531 int nand_write_oob_std(struct nand_chip *chip, int page);
1532 
1533 /* Default read_oob implementation */
1534 int nand_read_oob_std(struct nand_chip *chip, int page);
1535 
1536 /* Stub used by drivers that do not support GET/SET FEATURES operations */
1537 int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
1538 				  u8 *subfeature_param);
1539 
1540 /* read_page_raw implementations */
1541 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
1542 		       int page);
1543 int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1544 				  int oob_required, int page);
1545 
1546 /* write_page_raw implementations */
1547 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1548 			int oob_required, int page);
1549 int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1550 				   int oob_required, int page);
1551 
1552 /* Reset and initialize a NAND device */
1553 int nand_reset(struct nand_chip *chip, int chipnr);
1554 
1555 /* NAND operation helpers */
1556 int nand_reset_op(struct nand_chip *chip);
1557 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1558 		   unsigned int len);
1559 int nand_status_op(struct nand_chip *chip, u8 *status);
1560 int nand_exit_status_op(struct nand_chip *chip);
1561 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1562 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1563 		      unsigned int offset_in_page, void *buf, unsigned int len);
1564 int nand_change_read_column_op(struct nand_chip *chip,
1565 			       unsigned int offset_in_page, void *buf,
1566 			       unsigned int len, bool force_8bit);
1567 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1568 		     unsigned int offset_in_page, void *buf, unsigned int len);
1569 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1570 			    unsigned int offset_in_page, const void *buf,
1571 			    unsigned int len);
1572 int nand_prog_page_end_op(struct nand_chip *chip);
1573 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1574 		      unsigned int offset_in_page, const void *buf,
1575 		      unsigned int len);
1576 int nand_change_write_column_op(struct nand_chip *chip,
1577 				unsigned int offset_in_page, const void *buf,
1578 				unsigned int len, bool force_8bit);
1579 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1580 		      bool force_8bit, bool check_only);
1581 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1582 		       unsigned int len, bool force_8bit);
1583 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
1584 				   int oob_required, int page);
1585 
1586 /* Scan and identify a NAND device */
1587 int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
1588 		       struct nand_flash_dev *ids);
1589 
nand_scan(struct nand_chip * chip,unsigned int max_chips)1590 static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
1591 {
1592 	return nand_scan_with_ids(chip, max_chips, NULL);
1593 }
1594 
1595 /* Internal helper for board drivers which need to override command function */
1596 void nand_wait_ready(struct nand_chip *chip);
1597 
1598 /*
1599  * Free resources held by the NAND device, must be called on error after a
1600  * sucessful nand_scan().
1601  */
1602 void nand_cleanup(struct nand_chip *chip);
1603 
1604 /*
1605  * External helper for controller drivers that have to implement the WAITRDY
1606  * instruction and have no physical pin to check it.
1607  */
1608 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
1609 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
1610 		      unsigned long timeout_ms);
1611 
1612 /* Select/deselect a NAND target. */
1613 void nand_select_target(struct nand_chip *chip, unsigned int cs);
1614 void nand_deselect_target(struct nand_chip *chip);
1615 
1616 /* Bitops */
1617 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
1618 		       unsigned int src_off, unsigned int nbits);
1619 
1620 /**
1621  * nand_get_data_buf() - Get the internal page buffer
1622  * @chip: NAND chip object
1623  *
1624  * Returns the pre-allocated page buffer after invalidating the cache. This
1625  * function should be used by drivers that do not want to allocate their own
1626  * bounce buffer and still need such a buffer for specific operations (most
1627  * commonly when reading OOB data only).
1628  *
1629  * Be careful to never call this function in the write/write_oob path, because
1630  * the core may have placed the data to be written out in this buffer.
1631  *
1632  * Return: pointer to the page cache buffer
1633  */
nand_get_data_buf(struct nand_chip * chip)1634 static inline void *nand_get_data_buf(struct nand_chip *chip)
1635 {
1636 	chip->pagecache.page = -1;
1637 
1638 	return chip->data_buf;
1639 }
1640 
1641 /* Parse the gpio-cs property */
1642 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
1643 			     unsigned int *ncs_array);
1644 
1645 #endif /* __LINUX_MTD_RAWNAND_H */
1646