1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Freescale i.MX28 NAND flash driver
4  *
5  * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6  * on behalf of DENX Software Engineering GmbH
7  *
8  * Based on code from LTIB:
9  * Freescale GPMI NFC NAND Flash Driver
10  *
11  * Copyright (C) 2010 Freescale Semiconductor, Inc.
12  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
13  * Copyright 2017-2019 NXP
14  */
15 
16 #include <common.h>
17 #include <cpu_func.h>
18 #include <dm.h>
19 #include <dm/device_compat.h>
20 #include <malloc.h>
21 #include <mxs_nand.h>
22 #include <asm/arch/clock.h>
23 #include <asm/arch/imx-regs.h>
24 #include <asm/arch/sys_proto.h>
25 #include <asm/cache.h>
26 #include <asm/io.h>
27 #include <asm/mach-imx/regs-bch.h>
28 #include <asm/mach-imx/regs-gpmi.h>
29 #include <linux/errno.h>
30 #include <linux/mtd/rawnand.h>
31 #include <linux/sizes.h>
32 #include <linux/types.h>
33 
34 #define	MXS_NAND_DMA_DESCRIPTOR_COUNT		4
35 
36 #if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
37 	defined(CONFIG_IMX8M)
38 #define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	2
39 #else
40 #define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	0
41 #endif
42 #define	MXS_NAND_METADATA_SIZE			10
43 #define	MXS_NAND_BITS_PER_ECC_LEVEL		13
44 
45 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
46 #define	MXS_NAND_COMMAND_BUFFER_SIZE		32
47 #else
48 #define	MXS_NAND_COMMAND_BUFFER_SIZE		CONFIG_SYS_CACHELINE_SIZE
49 #endif
50 
51 #define	MXS_NAND_BCH_TIMEOUT			10000
52 
53 struct nand_ecclayout fake_ecc_layout;
54 
55 /*
56  * Cache management functions
57  */
58 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
mxs_nand_flush_data_buf(struct mxs_nand_info * info)59 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
60 {
61 	uint32_t addr = (uintptr_t)info->data_buf;
62 
63 	flush_dcache_range(addr, addr + info->data_buf_size);
64 }
65 
mxs_nand_inval_data_buf(struct mxs_nand_info * info)66 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
67 {
68 	uint32_t addr = (uintptr_t)info->data_buf;
69 
70 	invalidate_dcache_range(addr, addr + info->data_buf_size);
71 }
72 
mxs_nand_flush_cmd_buf(struct mxs_nand_info * info)73 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
74 {
75 	uint32_t addr = (uintptr_t)info->cmd_buf;
76 
77 	flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
78 }
79 #else
mxs_nand_flush_data_buf(struct mxs_nand_info * info)80 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
mxs_nand_inval_data_buf(struct mxs_nand_info * info)81 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
mxs_nand_flush_cmd_buf(struct mxs_nand_info * info)82 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
83 #endif
84 
mxs_nand_get_dma_desc(struct mxs_nand_info * info)85 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
86 {
87 	struct mxs_dma_desc *desc;
88 
89 	if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
90 		printf("MXS NAND: Too many DMA descriptors requested\n");
91 		return NULL;
92 	}
93 
94 	desc = info->desc[info->desc_index];
95 	info->desc_index++;
96 
97 	return desc;
98 }
99 
mxs_nand_return_dma_descs(struct mxs_nand_info * info)100 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
101 {
102 	int i;
103 	struct mxs_dma_desc *desc;
104 
105 	for (i = 0; i < info->desc_index; i++) {
106 		desc = info->desc[i];
107 		memset(desc, 0, sizeof(struct mxs_dma_desc));
108 		desc->address = (dma_addr_t)desc;
109 	}
110 
111 	info->desc_index = 0;
112 }
113 
mxs_nand_aux_status_offset(void)114 static uint32_t mxs_nand_aux_status_offset(void)
115 {
116 	return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
117 }
118 
mxs_nand_bbm_in_data_chunk(struct bch_geometry * geo,struct mtd_info * mtd,unsigned int * chunk_num)119 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
120 					      struct mtd_info *mtd,
121 					      unsigned int *chunk_num)
122 {
123 	unsigned int i, j;
124 
125 	if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
126 		dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
127 		return false;
128 	}
129 
130 	i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
131 		(geo->gf_len * geo->ecc_strength +
132 				geo->ecc_chunkn_size * 8);
133 
134 	j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
135 		(geo->gf_len * geo->ecc_strength +
136 				geo->ecc_chunkn_size * 8) * i;
137 
138 	if (j < geo->ecc_chunkn_size * 8) {
139 		*chunk_num = i + 1;
140 		dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
141 			geo->ecc_strength, *chunk_num);
142 		return true;
143 	}
144 
145 	return false;
146 }
147 
mxs_nand_calc_ecc_layout_by_info(struct bch_geometry * geo,struct mtd_info * mtd,unsigned int ecc_strength,unsigned int ecc_step)148 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
149 						   struct mtd_info *mtd,
150 						   unsigned int ecc_strength,
151 						   unsigned int ecc_step)
152 {
153 	struct nand_chip *chip = mtd_to_nand(mtd);
154 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
155 	unsigned int block_mark_bit_offset;
156 
157 	switch (ecc_step) {
158 	case SZ_512:
159 		geo->gf_len = 13;
160 		break;
161 	case SZ_1K:
162 		geo->gf_len = 14;
163 		break;
164 	default:
165 		return -EINVAL;
166 	}
167 
168 	geo->ecc_chunk0_size = ecc_step;
169 	geo->ecc_chunkn_size = ecc_step;
170 	geo->ecc_strength = round_up(ecc_strength, 2);
171 
172 	/* Keep the C >= O */
173 	if (geo->ecc_chunkn_size < mtd->oobsize)
174 		return -EINVAL;
175 
176 	if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
177 		return -EINVAL;
178 
179 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
180 
181 	/* For bit swap. */
182 	block_mark_bit_offset = mtd->writesize * 8 -
183 		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
184 				+ MXS_NAND_METADATA_SIZE * 8);
185 
186 	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
187 	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
188 
189 	return 0;
190 }
191 
mxs_nand_legacy_calc_ecc_layout(struct bch_geometry * geo,struct mtd_info * mtd)192 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
193 					   struct mtd_info *mtd)
194 {
195 	struct nand_chip *chip = mtd_to_nand(mtd);
196 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
197 	unsigned int block_mark_bit_offset;
198 
199 	/* The default for the length of Galois Field. */
200 	geo->gf_len = 13;
201 
202 	/* The default for chunk size. */
203 	geo->ecc_chunk0_size = 512;
204 	geo->ecc_chunkn_size = 512;
205 
206 	if (geo->ecc_chunkn_size < mtd->oobsize) {
207 		geo->gf_len = 14;
208 		geo->ecc_chunk0_size *= 2;
209 		geo->ecc_chunkn_size *= 2;
210 	}
211 
212 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
213 
214 	/*
215 	 * Determine the ECC layout with the formula:
216 	 *	ECC bits per chunk = (total page spare data bits) /
217 	 *		(bits per ECC level) / (chunks per page)
218 	 * where:
219 	 *	total page spare data bits =
220 	 *		(page oob size - meta data size) * (bits per byte)
221 	 */
222 	geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
223 			/ (geo->gf_len * geo->ecc_chunk_count);
224 
225 	geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
226 				nand_info->max_ecc_strength_supported);
227 
228 	block_mark_bit_offset = mtd->writesize * 8 -
229 		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
230 				+ MXS_NAND_METADATA_SIZE * 8);
231 
232 	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
233 	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
234 
235 	return 0;
236 }
237 
mxs_nand_calc_ecc_for_large_oob(struct bch_geometry * geo,struct mtd_info * mtd)238 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
239 					   struct mtd_info *mtd)
240 {
241 	struct nand_chip *chip = mtd_to_nand(mtd);
242 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
243 	unsigned int block_mark_bit_offset;
244 	unsigned int max_ecc;
245 	unsigned int bbm_chunk;
246 	unsigned int i;
247 
248 	/* sanity check for the minimum ecc nand required */
249 	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
250 		return -EINVAL;
251 	geo->ecc_strength = chip->ecc_strength_ds;
252 
253 	/* calculate the maximum ecc platform can support*/
254 	geo->gf_len = 14;
255 	geo->ecc_chunk0_size = 1024;
256 	geo->ecc_chunkn_size = 1024;
257 	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
258 	max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
259 			/ (geo->gf_len * geo->ecc_chunk_count);
260 	max_ecc = min(round_down(max_ecc, 2),
261 				nand_info->max_ecc_strength_supported);
262 
263 
264 	/* search a supported ecc strength that makes bbm */
265 	/* located in data chunk  */
266 	geo->ecc_strength = chip->ecc_strength_ds;
267 	while (!(geo->ecc_strength > max_ecc)) {
268 		if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
269 			break;
270 		geo->ecc_strength += 2;
271 	}
272 
273 	/* if none of them works, keep using the minimum ecc */
274 	/* nand required but changing ecc page layout  */
275 	if (geo->ecc_strength > max_ecc) {
276 		geo->ecc_strength = chip->ecc_strength_ds;
277 		/* add extra ecc for meta data */
278 		geo->ecc_chunk0_size = 0;
279 		geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
280 		geo->ecc_for_meta = 1;
281 		/* check if oob can afford this extra ecc chunk */
282 		if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
283 				geo->gf_len * geo->ecc_strength
284 				* geo->ecc_chunk_count) {
285 			printf("unsupported NAND chip with new layout\n");
286 			return -EINVAL;
287 		}
288 
289 		/* calculate in which chunk bbm located */
290 		bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
291 			geo->gf_len * geo->ecc_strength) /
292 			(geo->gf_len * geo->ecc_strength +
293 					geo->ecc_chunkn_size * 8) + 1;
294 	}
295 
296 	/* calculate the number of ecc chunk behind the bbm */
297 	i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
298 
299 	block_mark_bit_offset = mtd->writesize * 8 -
300 		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
301 				+ MXS_NAND_METADATA_SIZE * 8);
302 
303 	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
304 	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
305 
306 	return 0;
307 }
308 
309 /*
310  * Wait for BCH complete IRQ and clear the IRQ
311  */
mxs_nand_wait_for_bch_complete(struct mxs_nand_info * nand_info)312 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
313 {
314 	int timeout = MXS_NAND_BCH_TIMEOUT;
315 	int ret;
316 
317 	ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
318 		BCH_CTRL_COMPLETE_IRQ, timeout);
319 
320 	writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
321 
322 	return ret;
323 }
324 
325 /*
326  * This is the function that we install in the cmd_ctrl function pointer of the
327  * owning struct nand_chip. The only functions in the reference implementation
328  * that use these functions pointers are cmdfunc and select_chip.
329  *
330  * In this driver, we implement our own select_chip, so this function will only
331  * be called by the reference implementation's cmdfunc. For this reason, we can
332  * ignore the chip enable bit and concentrate only on sending bytes to the NAND
333  * Flash.
334  */
mxs_nand_cmd_ctrl(struct mtd_info * mtd,int data,unsigned int ctrl)335 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
336 {
337 	struct nand_chip *nand = mtd_to_nand(mtd);
338 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
339 	struct mxs_dma_desc *d;
340 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
341 	int ret;
342 
343 	/*
344 	 * If this condition is true, something is _VERY_ wrong in MTD
345 	 * subsystem!
346 	 */
347 	if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
348 		printf("MXS NAND: Command queue too long\n");
349 		return;
350 	}
351 
352 	/*
353 	 * Every operation begins with a command byte and a series of zero or
354 	 * more address bytes. These are distinguished by either the Address
355 	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
356 	 * asserted. When MTD is ready to execute the command, it will
357 	 * deasert both latch enables.
358 	 *
359 	 * Rather than run a separate DMA operation for every single byte, we
360 	 * queue them up and run a single DMA operation for the entire series
361 	 * of command and data bytes.
362 	 */
363 	if (ctrl & (NAND_ALE | NAND_CLE)) {
364 		if (data != NAND_CMD_NONE)
365 			nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
366 		return;
367 	}
368 
369 	/*
370 	 * If control arrives here, MTD has deasserted both the ALE and CLE,
371 	 * which means it's ready to run an operation. Check if we have any
372 	 * bytes to send.
373 	 */
374 	if (nand_info->cmd_queue_len == 0)
375 		return;
376 
377 	/* Compile the DMA descriptor -- a descriptor that sends command. */
378 	d = mxs_nand_get_dma_desc(nand_info);
379 	d->cmd.data =
380 		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
381 		MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
382 		MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
383 		(nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
384 
385 	d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
386 
387 	d->cmd.pio_words[0] =
388 		GPMI_CTRL0_COMMAND_MODE_WRITE |
389 		GPMI_CTRL0_WORD_LENGTH |
390 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
391 		GPMI_CTRL0_ADDRESS_NAND_CLE |
392 		GPMI_CTRL0_ADDRESS_INCREMENT |
393 		nand_info->cmd_queue_len;
394 
395 	mxs_dma_desc_append(channel, d);
396 
397 	/* Flush caches */
398 	mxs_nand_flush_cmd_buf(nand_info);
399 
400 	/* Execute the DMA chain. */
401 	ret = mxs_dma_go(channel);
402 	if (ret)
403 		printf("MXS NAND: Error sending command\n");
404 
405 	mxs_nand_return_dma_descs(nand_info);
406 
407 	/* Reset the command queue. */
408 	nand_info->cmd_queue_len = 0;
409 }
410 
411 /*
412  * Test if the NAND flash is ready.
413  */
mxs_nand_device_ready(struct mtd_info * mtd)414 static int mxs_nand_device_ready(struct mtd_info *mtd)
415 {
416 	struct nand_chip *chip = mtd_to_nand(mtd);
417 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
418 	uint32_t tmp;
419 
420 	tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
421 	tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
422 
423 	return tmp & 1;
424 }
425 
426 /*
427  * Select the NAND chip.
428  */
mxs_nand_select_chip(struct mtd_info * mtd,int chip)429 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
430 {
431 	struct nand_chip *nand = mtd_to_nand(mtd);
432 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
433 
434 	nand_info->cur_chip = chip;
435 }
436 
437 /*
438  * Handle block mark swapping.
439  *
440  * Note that, when this function is called, it doesn't know whether it's
441  * swapping the block mark, or swapping it *back* -- but it doesn't matter
442  * because the the operation is the same.
443  */
mxs_nand_swap_block_mark(struct bch_geometry * geo,uint8_t * data_buf,uint8_t * oob_buf)444 static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
445 				     uint8_t *data_buf, uint8_t *oob_buf)
446 {
447 	uint32_t bit_offset = geo->block_mark_bit_offset;
448 	uint32_t buf_offset = geo->block_mark_byte_offset;
449 
450 	uint32_t src;
451 	uint32_t dst;
452 
453 	/*
454 	 * Get the byte from the data area that overlays the block mark. Since
455 	 * the ECC engine applies its own view to the bits in the page, the
456 	 * physical block mark won't (in general) appear on a byte boundary in
457 	 * the data.
458 	 */
459 	src = data_buf[buf_offset] >> bit_offset;
460 	src |= data_buf[buf_offset + 1] << (8 - bit_offset);
461 
462 	dst = oob_buf[0];
463 
464 	oob_buf[0] = src;
465 
466 	data_buf[buf_offset] &= ~(0xff << bit_offset);
467 	data_buf[buf_offset + 1] &= 0xff << bit_offset;
468 
469 	data_buf[buf_offset] |= dst << bit_offset;
470 	data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
471 }
472 
473 /*
474  * Read data from NAND.
475  */
mxs_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int length)476 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
477 {
478 	struct nand_chip *nand = mtd_to_nand(mtd);
479 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
480 	struct mxs_dma_desc *d;
481 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
482 	int ret;
483 
484 	if (length > NAND_MAX_PAGESIZE) {
485 		printf("MXS NAND: DMA buffer too big\n");
486 		return;
487 	}
488 
489 	if (!buf) {
490 		printf("MXS NAND: DMA buffer is NULL\n");
491 		return;
492 	}
493 
494 	/* Compile the DMA descriptor - a descriptor that reads data. */
495 	d = mxs_nand_get_dma_desc(nand_info);
496 	d->cmd.data =
497 		MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
498 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
499 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
500 		(length << MXS_DMA_DESC_BYTES_OFFSET);
501 
502 	d->cmd.address = (dma_addr_t)nand_info->data_buf;
503 
504 	d->cmd.pio_words[0] =
505 		GPMI_CTRL0_COMMAND_MODE_READ |
506 		GPMI_CTRL0_WORD_LENGTH |
507 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
508 		GPMI_CTRL0_ADDRESS_NAND_DATA |
509 		length;
510 
511 	mxs_dma_desc_append(channel, d);
512 
513 	/*
514 	 * A DMA descriptor that waits for the command to end and the chip to
515 	 * become ready.
516 	 *
517 	 * I think we actually should *not* be waiting for the chip to become
518 	 * ready because, after all, we don't care. I think the original code
519 	 * did that and no one has re-thought it yet.
520 	 */
521 	d = mxs_nand_get_dma_desc(nand_info);
522 	d->cmd.data =
523 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
524 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
525 		MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
526 
527 	d->cmd.address = 0;
528 
529 	d->cmd.pio_words[0] =
530 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
531 		GPMI_CTRL0_WORD_LENGTH |
532 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
533 		GPMI_CTRL0_ADDRESS_NAND_DATA;
534 
535 	mxs_dma_desc_append(channel, d);
536 
537 	/* Invalidate caches */
538 	mxs_nand_inval_data_buf(nand_info);
539 
540 	/* Execute the DMA chain. */
541 	ret = mxs_dma_go(channel);
542 	if (ret) {
543 		printf("MXS NAND: DMA read error\n");
544 		goto rtn;
545 	}
546 
547 	/* Invalidate caches */
548 	mxs_nand_inval_data_buf(nand_info);
549 
550 	memcpy(buf, nand_info->data_buf, length);
551 
552 rtn:
553 	mxs_nand_return_dma_descs(nand_info);
554 }
555 
556 /*
557  * Write data to NAND.
558  */
mxs_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int length)559 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
560 				int length)
561 {
562 	struct nand_chip *nand = mtd_to_nand(mtd);
563 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
564 	struct mxs_dma_desc *d;
565 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
566 	int ret;
567 
568 	if (length > NAND_MAX_PAGESIZE) {
569 		printf("MXS NAND: DMA buffer too big\n");
570 		return;
571 	}
572 
573 	if (!buf) {
574 		printf("MXS NAND: DMA buffer is NULL\n");
575 		return;
576 	}
577 
578 	memcpy(nand_info->data_buf, buf, length);
579 
580 	/* Compile the DMA descriptor - a descriptor that writes data. */
581 	d = mxs_nand_get_dma_desc(nand_info);
582 	d->cmd.data =
583 		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
584 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
585 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
586 		(length << MXS_DMA_DESC_BYTES_OFFSET);
587 
588 	d->cmd.address = (dma_addr_t)nand_info->data_buf;
589 
590 	d->cmd.pio_words[0] =
591 		GPMI_CTRL0_COMMAND_MODE_WRITE |
592 		GPMI_CTRL0_WORD_LENGTH |
593 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
594 		GPMI_CTRL0_ADDRESS_NAND_DATA |
595 		length;
596 
597 	mxs_dma_desc_append(channel, d);
598 
599 	/* Flush caches */
600 	mxs_nand_flush_data_buf(nand_info);
601 
602 	/* Execute the DMA chain. */
603 	ret = mxs_dma_go(channel);
604 	if (ret)
605 		printf("MXS NAND: DMA write error\n");
606 
607 	mxs_nand_return_dma_descs(nand_info);
608 }
609 
610 /*
611  * Read a single byte from NAND.
612  */
mxs_nand_read_byte(struct mtd_info * mtd)613 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
614 {
615 	uint8_t buf;
616 	mxs_nand_read_buf(mtd, &buf, 1);
617 	return buf;
618 }
619 
mxs_nand_erased_page(struct mtd_info * mtd,struct nand_chip * nand,u8 * buf,int chunk,int page)620 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
621 				 u8 *buf, int chunk, int page)
622 {
623 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
624 	struct bch_geometry *geo = &nand_info->bch_geometry;
625 	unsigned int flip_bits = 0, flip_bits_noecc = 0;
626 	unsigned int threshold;
627 	unsigned int base = geo->ecc_chunkn_size * chunk;
628 	u32 *dma_buf = (u32 *)buf;
629 	int i;
630 
631 	threshold = geo->gf_len / 2;
632 	if (threshold > geo->ecc_strength)
633 		threshold = geo->ecc_strength;
634 
635 	for (i = 0; i < geo->ecc_chunkn_size; i++) {
636 		flip_bits += hweight8(~buf[base + i]);
637 		if (flip_bits > threshold)
638 			return false;
639 	}
640 
641 	nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
642 	nand->read_buf(mtd, buf, mtd->writesize);
643 
644 	for (i = 0; i < mtd->writesize / 4; i++) {
645 		flip_bits_noecc += hweight32(~dma_buf[i]);
646 		if (flip_bits_noecc > threshold)
647 			return false;
648 	}
649 
650 	mtd->ecc_stats.corrected += flip_bits;
651 
652 	memset(buf, 0xff, mtd->writesize);
653 
654 	printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
655 
656 	return true;
657 }
658 
659 /*
660  * Read a page from NAND.
661  */
mxs_nand_ecc_read_page(struct mtd_info * mtd,struct nand_chip * nand,uint8_t * buf,int oob_required,int page)662 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
663 					uint8_t *buf, int oob_required,
664 					int page)
665 {
666 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
667 	struct bch_geometry *geo = &nand_info->bch_geometry;
668 	struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
669 	struct mxs_dma_desc *d;
670 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
671 	uint32_t corrected = 0, failed = 0;
672 	uint8_t	*status;
673 	int i, ret;
674 	int flag = 0;
675 
676 	/* Compile the DMA descriptor - wait for ready. */
677 	d = mxs_nand_get_dma_desc(nand_info);
678 	d->cmd.data =
679 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
680 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
681 		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
682 
683 	d->cmd.address = 0;
684 
685 	d->cmd.pio_words[0] =
686 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
687 		GPMI_CTRL0_WORD_LENGTH |
688 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
689 		GPMI_CTRL0_ADDRESS_NAND_DATA;
690 
691 	mxs_dma_desc_append(channel, d);
692 
693 	/* Compile the DMA descriptor - enable the BCH block and read. */
694 	d = mxs_nand_get_dma_desc(nand_info);
695 	d->cmd.data =
696 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
697 		MXS_DMA_DESC_WAIT4END |	(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
698 
699 	d->cmd.address = 0;
700 
701 	d->cmd.pio_words[0] =
702 		GPMI_CTRL0_COMMAND_MODE_READ |
703 		GPMI_CTRL0_WORD_LENGTH |
704 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
705 		GPMI_CTRL0_ADDRESS_NAND_DATA |
706 		(mtd->writesize + mtd->oobsize);
707 	d->cmd.pio_words[1] = 0;
708 	d->cmd.pio_words[2] =
709 		GPMI_ECCCTRL_ENABLE_ECC |
710 		GPMI_ECCCTRL_ECC_CMD_DECODE |
711 		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
712 	d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
713 	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
714 	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
715 
716 	if (nand_info->en_randomizer) {
717 		d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
718 				       GPMI_ECCCTRL_RANDOMIZER_TYPE2;
719 		d->cmd.pio_words[3] |= (page % 256) << 16;
720 	}
721 
722 	mxs_dma_desc_append(channel, d);
723 
724 	/* Compile the DMA descriptor - disable the BCH block. */
725 	d = mxs_nand_get_dma_desc(nand_info);
726 	d->cmd.data =
727 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
728 		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
729 		(3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
730 
731 	d->cmd.address = 0;
732 
733 	d->cmd.pio_words[0] =
734 		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
735 		GPMI_CTRL0_WORD_LENGTH |
736 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
737 		GPMI_CTRL0_ADDRESS_NAND_DATA |
738 		(mtd->writesize + mtd->oobsize);
739 	d->cmd.pio_words[1] = 0;
740 	d->cmd.pio_words[2] = 0;
741 
742 	mxs_dma_desc_append(channel, d);
743 
744 	/* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
745 	d = mxs_nand_get_dma_desc(nand_info);
746 	d->cmd.data =
747 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
748 		MXS_DMA_DESC_DEC_SEM;
749 
750 	d->cmd.address = 0;
751 
752 	mxs_dma_desc_append(channel, d);
753 
754 	/* Invalidate caches */
755 	mxs_nand_inval_data_buf(nand_info);
756 
757 	/* Execute the DMA chain. */
758 	ret = mxs_dma_go(channel);
759 	if (ret) {
760 		printf("MXS NAND: DMA read error\n");
761 		goto rtn;
762 	}
763 
764 	ret = mxs_nand_wait_for_bch_complete(nand_info);
765 	if (ret) {
766 		printf("MXS NAND: BCH read timeout\n");
767 		goto rtn;
768 	}
769 
770 	mxs_nand_return_dma_descs(nand_info);
771 
772 	/* Invalidate caches */
773 	mxs_nand_inval_data_buf(nand_info);
774 
775 	/* Read DMA completed, now do the mark swapping. */
776 	mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
777 
778 	/* Loop over status bytes, accumulating ECC status. */
779 	status = nand_info->oob_buf + mxs_nand_aux_status_offset();
780 	for (i = 0; i < geo->ecc_chunk_count; i++) {
781 		if (status[i] == 0x00)
782 			continue;
783 
784 		if (status[i] == 0xff) {
785 			if (!nand_info->en_randomizer &&
786 			    (is_mx6dqp() || is_mx7() || is_mx6ul() ||
787 			     is_imx8() || is_imx8m()))
788 				if (readl(&bch_regs->hw_bch_debug1))
789 					flag = 1;
790 			continue;
791 		}
792 
793 		if (status[i] == 0xfe) {
794 			if (mxs_nand_erased_page(mtd, nand,
795 						 nand_info->data_buf, i, page))
796 				break;
797 			failed++;
798 			continue;
799 		}
800 
801 		corrected += status[i];
802 	}
803 
804 	/* Propagate ECC status to the owning MTD. */
805 	mtd->ecc_stats.failed += failed;
806 	mtd->ecc_stats.corrected += corrected;
807 
808 	/*
809 	 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
810 	 * details about our policy for delivering the OOB.
811 	 *
812 	 * We fill the caller's buffer with set bits, and then copy the block
813 	 * mark to the caller's buffer. Note that, if block mark swapping was
814 	 * necessary, it has already been done, so we can rely on the first
815 	 * byte of the auxiliary buffer to contain the block mark.
816 	 */
817 	memset(nand->oob_poi, 0xff, mtd->oobsize);
818 
819 	nand->oob_poi[0] = nand_info->oob_buf[0];
820 
821 	memcpy(buf, nand_info->data_buf, mtd->writesize);
822 
823 	if (flag)
824 		memset(buf, 0xff, mtd->writesize);
825 rtn:
826 	mxs_nand_return_dma_descs(nand_info);
827 
828 	return ret;
829 }
830 
831 /*
832  * Write a page to NAND.
833  */
mxs_nand_ecc_write_page(struct mtd_info * mtd,struct nand_chip * nand,const uint8_t * buf,int oob_required,int page)834 static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
835 				struct nand_chip *nand, const uint8_t *buf,
836 				int oob_required, int page)
837 {
838 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
839 	struct bch_geometry *geo = &nand_info->bch_geometry;
840 	struct mxs_dma_desc *d;
841 	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
842 	int ret;
843 
844 	memcpy(nand_info->data_buf, buf, mtd->writesize);
845 	memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
846 
847 	/* Handle block mark swapping. */
848 	mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
849 
850 	/* Compile the DMA descriptor - write data. */
851 	d = mxs_nand_get_dma_desc(nand_info);
852 	d->cmd.data =
853 		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
854 		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
855 		(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
856 
857 	d->cmd.address = 0;
858 
859 	d->cmd.pio_words[0] =
860 		GPMI_CTRL0_COMMAND_MODE_WRITE |
861 		GPMI_CTRL0_WORD_LENGTH |
862 		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
863 		GPMI_CTRL0_ADDRESS_NAND_DATA;
864 	d->cmd.pio_words[1] = 0;
865 	d->cmd.pio_words[2] =
866 		GPMI_ECCCTRL_ENABLE_ECC |
867 		GPMI_ECCCTRL_ECC_CMD_ENCODE |
868 		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
869 	d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
870 	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
871 	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
872 
873 	if (nand_info->en_randomizer) {
874 		d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
875 				       GPMI_ECCCTRL_RANDOMIZER_TYPE2;
876 		/*
877 		 * Write NAND page number needed to be randomized
878 		 * to GPMI_ECCCOUNT register.
879 		 *
880 		 * The value is between 0-255. For additional details
881 		 * check 9.6.6.4 of i.MX7D Applications Processor reference
882 		 */
883 		d->cmd.pio_words[3] |= (page % 256) << 16;
884 	}
885 
886 	mxs_dma_desc_append(channel, d);
887 
888 	/* Flush caches */
889 	mxs_nand_flush_data_buf(nand_info);
890 
891 	/* Execute the DMA chain. */
892 	ret = mxs_dma_go(channel);
893 	if (ret) {
894 		printf("MXS NAND: DMA write error\n");
895 		goto rtn;
896 	}
897 
898 	ret = mxs_nand_wait_for_bch_complete(nand_info);
899 	if (ret) {
900 		printf("MXS NAND: BCH write timeout\n");
901 		goto rtn;
902 	}
903 
904 rtn:
905 	mxs_nand_return_dma_descs(nand_info);
906 	return 0;
907 }
908 
909 /*
910  * Read OOB from NAND.
911  *
912  * This function is a veneer that replaces the function originally installed by
913  * the NAND Flash MTD code.
914  */
mxs_nand_hook_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)915 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
916 					struct mtd_oob_ops *ops)
917 {
918 	struct nand_chip *chip = mtd_to_nand(mtd);
919 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
920 	int ret;
921 
922 	if (ops->mode == MTD_OPS_RAW)
923 		nand_info->raw_oob_mode = 1;
924 	else
925 		nand_info->raw_oob_mode = 0;
926 
927 	ret = nand_info->hooked_read_oob(mtd, from, ops);
928 
929 	nand_info->raw_oob_mode = 0;
930 
931 	return ret;
932 }
933 
934 /*
935  * Write OOB to NAND.
936  *
937  * This function is a veneer that replaces the function originally installed by
938  * the NAND Flash MTD code.
939  */
mxs_nand_hook_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)940 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
941 					struct mtd_oob_ops *ops)
942 {
943 	struct nand_chip *chip = mtd_to_nand(mtd);
944 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
945 	int ret;
946 
947 	if (ops->mode == MTD_OPS_RAW)
948 		nand_info->raw_oob_mode = 1;
949 	else
950 		nand_info->raw_oob_mode = 0;
951 
952 	ret = nand_info->hooked_write_oob(mtd, to, ops);
953 
954 	nand_info->raw_oob_mode = 0;
955 
956 	return ret;
957 }
958 
959 /*
960  * Mark a block bad in NAND.
961  *
962  * This function is a veneer that replaces the function originally installed by
963  * the NAND Flash MTD code.
964  */
mxs_nand_hook_block_markbad(struct mtd_info * mtd,loff_t ofs)965 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
966 {
967 	struct nand_chip *chip = mtd_to_nand(mtd);
968 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
969 	int ret;
970 
971 	nand_info->marking_block_bad = 1;
972 
973 	ret = nand_info->hooked_block_markbad(mtd, ofs);
974 
975 	nand_info->marking_block_bad = 0;
976 
977 	return ret;
978 }
979 
980 /*
981  * There are several places in this driver where we have to handle the OOB and
982  * block marks. This is the function where things are the most complicated, so
983  * this is where we try to explain it all. All the other places refer back to
984  * here.
985  *
986  * These are the rules, in order of decreasing importance:
987  *
988  * 1) Nothing the caller does can be allowed to imperil the block mark, so all
989  *    write operations take measures to protect it.
990  *
991  * 2) In read operations, the first byte of the OOB we return must reflect the
992  *    true state of the block mark, no matter where that block mark appears in
993  *    the physical page.
994  *
995  * 3) ECC-based read operations return an OOB full of set bits (since we never
996  *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
997  *    return).
998  *
999  * 4) "Raw" read operations return a direct view of the physical bytes in the
1000  *    page, using the conventional definition of which bytes are data and which
1001  *    are OOB. This gives the caller a way to see the actual, physical bytes
1002  *    in the page, without the distortions applied by our ECC engine.
1003  *
1004  * What we do for this specific read operation depends on whether we're doing
1005  * "raw" read, or an ECC-based read.
1006  *
1007  * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1008  * easy. When reading a page, for example, the NAND Flash MTD code calls our
1009  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1010  * ECC-based or raw view of the page is implicit in which function it calls
1011  * (there is a similar pair of ECC-based/raw functions for writing).
1012  *
1013  * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1014  * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1015  * caller wants an ECC-based or raw view of the page is not propagated down to
1016  * this driver.
1017  *
1018  * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1019  * ecc.read_oob and ecc.write_oob function pointers in the owning
1020  * struct mtd_info with our own functions. These hook functions set the
1021  * raw_oob_mode field so that, when control finally arrives here, we'll know
1022  * what to do.
1023  */
mxs_nand_ecc_read_oob(struct mtd_info * mtd,struct nand_chip * nand,int page)1024 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
1025 				int page)
1026 {
1027 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1028 
1029 	/*
1030 	 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1031 	 * get the bytes from the physical page. If we're not doing a raw read,
1032 	 * we need to fill the buffer with set bits.
1033 	 */
1034 	if (nand_info->raw_oob_mode) {
1035 		/*
1036 		 * If control arrives here, we're doing a "raw" read. Send the
1037 		 * command to read the conventional OOB and read it.
1038 		 */
1039 		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1040 		nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1041 	} else {
1042 		/*
1043 		 * If control arrives here, we're not doing a "raw" read. Fill
1044 		 * the OOB buffer with set bits and correct the block mark.
1045 		 */
1046 		memset(nand->oob_poi, 0xff, mtd->oobsize);
1047 
1048 		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1049 		mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1050 	}
1051 
1052 	return 0;
1053 
1054 }
1055 
1056 /*
1057  * Write OOB data to NAND.
1058  */
mxs_nand_ecc_write_oob(struct mtd_info * mtd,struct nand_chip * nand,int page)1059 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1060 					int page)
1061 {
1062 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1063 	uint8_t block_mark = 0;
1064 
1065 	/*
1066 	 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1067 	 * the NAND Flash MTD model that make it essentially impossible to write
1068 	 * the out-of-band bytes.
1069 	 *
1070 	 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1071 	 * mark a block bad, we can do that.
1072 	 */
1073 
1074 	if (!nand_info->marking_block_bad) {
1075 		printf("NXS NAND: Writing OOB isn't supported\n");
1076 		return -EIO;
1077 	}
1078 
1079 	/* Write the block mark. */
1080 	nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1081 	nand->write_buf(mtd, &block_mark, 1);
1082 	nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1083 
1084 	/* Check if it worked. */
1085 	if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1086 		return -EIO;
1087 
1088 	return 0;
1089 }
1090 
1091 /*
1092  * Claims all blocks are good.
1093  *
1094  * In principle, this function is *only* called when the NAND Flash MTD system
1095  * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1096  * the driver for bad block information.
1097  *
1098  * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1099  * this function is *only* called when we take it away.
1100  *
1101  * Thus, this function is only called when we want *all* blocks to look good,
1102  * so it *always* return success.
1103  */
mxs_nand_block_bad(struct mtd_info * mtd,loff_t ofs)1104 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1105 {
1106 	return 0;
1107 }
1108 
mxs_nand_set_geometry(struct mtd_info * mtd,struct bch_geometry * geo)1109 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1110 {
1111 	struct nand_chip *chip = mtd_to_nand(mtd);
1112 	struct nand_chip *nand = mtd_to_nand(mtd);
1113 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1114 
1115 	if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1116 		printf("unsupported NAND chip, minimum ecc required %d\n"
1117 			, chip->ecc_strength_ds);
1118 		return -EINVAL;
1119 	}
1120 
1121 	if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1122 	     mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
1123 		dev_warn(mtd->dev, "use legacy bch geometry\n");
1124 		return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1125 	}
1126 
1127 	if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1128 		return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1129 
1130 	return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
1131 				chip->ecc_strength_ds, chip->ecc_step_ds);
1132 
1133 	return 0;
1134 }
1135 
1136 /*
1137  * At this point, the physical NAND Flash chips have been identified and
1138  * counted, so we know the physical geometry. This enables us to make some
1139  * important configuration decisions.
1140  *
1141  * The return value of this function propagates directly back to this driver's
1142  * board_nand_init(). Anything other than zero will cause this driver to
1143  * tear everything down and declare failure.
1144  */
mxs_nand_setup_ecc(struct mtd_info * mtd)1145 int mxs_nand_setup_ecc(struct mtd_info *mtd)
1146 {
1147 	struct nand_chip *nand = mtd_to_nand(mtd);
1148 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1149 	struct bch_geometry *geo = &nand_info->bch_geometry;
1150 	struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1151 	uint32_t tmp;
1152 	int ret;
1153 
1154 	nand_info->en_randomizer = 0;
1155 	nand_info->oobsize = mtd->oobsize;
1156 	nand_info->writesize = mtd->writesize;
1157 
1158 	ret = mxs_nand_set_geometry(mtd, geo);
1159 	if (ret)
1160 		return ret;
1161 
1162 	/* Configure BCH and set NFC geometry */
1163 	mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1164 
1165 	/* Configure layout 0 */
1166 	tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1167 	tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1168 	tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1169 	tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1170 	tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1171 		BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1172 	writel(tmp, &bch_regs->hw_bch_flash0layout0);
1173 	nand_info->bch_flash0layout0 = tmp;
1174 
1175 	tmp = (mtd->writesize + mtd->oobsize)
1176 		<< BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1177 	tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1178 	tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1179 	tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1180 		BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1181 	writel(tmp, &bch_regs->hw_bch_flash0layout1);
1182 	nand_info->bch_flash0layout1 = tmp;
1183 
1184 	/* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1185 	if (is_mx6dqp() || is_mx7() ||
1186 	    is_mx6ul() || is_imx8() || is_imx8m())
1187 		writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1188 		       &bch_regs->hw_bch_mode);
1189 
1190 	/* Set *all* chip selects to use layout 0 */
1191 	writel(0, &bch_regs->hw_bch_layoutselect);
1192 
1193 	/* Enable BCH complete interrupt */
1194 	writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1195 
1196 	/* Hook some operations at the MTD level. */
1197 	if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1198 		nand_info->hooked_read_oob = mtd->_read_oob;
1199 		mtd->_read_oob = mxs_nand_hook_read_oob;
1200 	}
1201 
1202 	if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1203 		nand_info->hooked_write_oob = mtd->_write_oob;
1204 		mtd->_write_oob = mxs_nand_hook_write_oob;
1205 	}
1206 
1207 	if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1208 		nand_info->hooked_block_markbad = mtd->_block_markbad;
1209 		mtd->_block_markbad = mxs_nand_hook_block_markbad;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Allocate DMA buffers
1217  */
mxs_nand_alloc_buffers(struct mxs_nand_info * nand_info)1218 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1219 {
1220 	uint8_t *buf;
1221 	const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1222 
1223 	nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1224 
1225 	/* DMA buffers */
1226 	buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1227 	if (!buf) {
1228 		printf("MXS NAND: Error allocating DMA buffers\n");
1229 		return -ENOMEM;
1230 	}
1231 
1232 	memset(buf, 0, nand_info->data_buf_size);
1233 
1234 	nand_info->data_buf = buf;
1235 	nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1236 	/* Command buffers */
1237 	nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1238 				MXS_NAND_COMMAND_BUFFER_SIZE);
1239 	if (!nand_info->cmd_buf) {
1240 		free(buf);
1241 		printf("MXS NAND: Error allocating command buffers\n");
1242 		return -ENOMEM;
1243 	}
1244 	memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1245 	nand_info->cmd_queue_len = 0;
1246 
1247 	return 0;
1248 }
1249 
1250 /*
1251  * Initializes the NFC hardware.
1252  */
mxs_nand_init_dma(struct mxs_nand_info * info)1253 static int mxs_nand_init_dma(struct mxs_nand_info *info)
1254 {
1255 	int i = 0, j, ret = 0;
1256 
1257 	info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1258 				MXS_NAND_DMA_DESCRIPTOR_COUNT);
1259 	if (!info->desc) {
1260 		ret = -ENOMEM;
1261 		goto err1;
1262 	}
1263 
1264 	/* Allocate the DMA descriptors. */
1265 	for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1266 		info->desc[i] = mxs_dma_desc_alloc();
1267 		if (!info->desc[i]) {
1268 			ret = -ENOMEM;
1269 			goto err2;
1270 		}
1271 	}
1272 
1273 	/* Init the DMA controller. */
1274 	mxs_dma_init();
1275 	for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1276 		j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1277 		ret = mxs_dma_init_channel(j);
1278 		if (ret)
1279 			goto err3;
1280 	}
1281 
1282 	/* Reset the GPMI block. */
1283 	mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1284 	mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1285 
1286 	/*
1287 	 * Choose NAND mode, set IRQ polarity, disable write protection and
1288 	 * select BCH ECC.
1289 	 */
1290 	clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1291 			GPMI_CTRL1_GPMI_MODE,
1292 			GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1293 			GPMI_CTRL1_BCH_MODE);
1294 
1295 	return 0;
1296 
1297 err3:
1298 	for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1299 		mxs_dma_release(j);
1300 err2:
1301 	for (--i; i >= 0; i--)
1302 		mxs_dma_desc_free(info->desc[i]);
1303 	free(info->desc);
1304 err1:
1305 	if (ret == -ENOMEM)
1306 		printf("MXS NAND: Unable to allocate DMA descriptors\n");
1307 	return ret;
1308 }
1309 
mxs_nand_init_spl(struct nand_chip * nand)1310 int mxs_nand_init_spl(struct nand_chip *nand)
1311 {
1312 	struct mxs_nand_info *nand_info;
1313 	int err;
1314 
1315 	nand_info = malloc(sizeof(struct mxs_nand_info));
1316 	if (!nand_info) {
1317 		printf("MXS NAND: Failed to allocate private data\n");
1318 		return -ENOMEM;
1319 	}
1320 	memset(nand_info, 0, sizeof(struct mxs_nand_info));
1321 
1322 	nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1323 	nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1324 
1325 	if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
1326 		nand_info->max_ecc_strength_supported = 62;
1327 	else
1328 		nand_info->max_ecc_strength_supported = 40;
1329 
1330 	err = mxs_nand_alloc_buffers(nand_info);
1331 	if (err)
1332 		return err;
1333 
1334 	err = mxs_nand_init_dma(nand_info);
1335 	if (err)
1336 		return err;
1337 
1338 	nand_set_controller_data(nand, nand_info);
1339 
1340 	nand->options |= NAND_NO_SUBPAGE_WRITE;
1341 
1342 	nand->cmd_ctrl		= mxs_nand_cmd_ctrl;
1343 	nand->dev_ready		= mxs_nand_device_ready;
1344 	nand->select_chip	= mxs_nand_select_chip;
1345 
1346 	nand->read_byte		= mxs_nand_read_byte;
1347 	nand->read_buf		= mxs_nand_read_buf;
1348 
1349 	nand->ecc.read_page	= mxs_nand_ecc_read_page;
1350 
1351 	nand->ecc.mode		= NAND_ECC_HW;
1352 
1353 	return 0;
1354 }
1355 
mxs_nand_init_ctrl(struct mxs_nand_info * nand_info)1356 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1357 {
1358 	struct mtd_info *mtd;
1359 	struct nand_chip *nand;
1360 	int err;
1361 
1362 	nand = &nand_info->chip;
1363 	mtd = nand_to_mtd(nand);
1364 	err = mxs_nand_alloc_buffers(nand_info);
1365 	if (err)
1366 		return err;
1367 
1368 	err = mxs_nand_init_dma(nand_info);
1369 	if (err)
1370 		goto err_free_buffers;
1371 
1372 	memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1373 
1374 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1375 	nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1376 #endif
1377 
1378 	nand_set_controller_data(nand, nand_info);
1379 	nand->options |= NAND_NO_SUBPAGE_WRITE;
1380 
1381 	if (nand_info->dev)
1382 		nand->flash_node = dev_of_offset(nand_info->dev);
1383 
1384 	nand->cmd_ctrl		= mxs_nand_cmd_ctrl;
1385 
1386 	nand->dev_ready		= mxs_nand_device_ready;
1387 	nand->select_chip	= mxs_nand_select_chip;
1388 	nand->block_bad		= mxs_nand_block_bad;
1389 
1390 	nand->read_byte		= mxs_nand_read_byte;
1391 
1392 	nand->read_buf		= mxs_nand_read_buf;
1393 	nand->write_buf		= mxs_nand_write_buf;
1394 
1395 	/* first scan to find the device and get the page size */
1396 	if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1397 		goto err_free_buffers;
1398 
1399 	if (mxs_nand_setup_ecc(mtd))
1400 		goto err_free_buffers;
1401 
1402 	nand->ecc.read_page	= mxs_nand_ecc_read_page;
1403 	nand->ecc.write_page	= mxs_nand_ecc_write_page;
1404 	nand->ecc.read_oob	= mxs_nand_ecc_read_oob;
1405 	nand->ecc.write_oob	= mxs_nand_ecc_write_oob;
1406 
1407 	nand->ecc.layout	= &fake_ecc_layout;
1408 	nand->ecc.mode		= NAND_ECC_HW;
1409 	nand->ecc.size		= nand_info->bch_geometry.ecc_chunkn_size;
1410 	nand->ecc.strength	= nand_info->bch_geometry.ecc_strength;
1411 
1412 	/* second phase scan */
1413 	err = nand_scan_tail(mtd);
1414 	if (err)
1415 		goto err_free_buffers;
1416 
1417 	err = nand_register(0, mtd);
1418 	if (err)
1419 		goto err_free_buffers;
1420 
1421 	return 0;
1422 
1423 err_free_buffers:
1424 	free(nand_info->data_buf);
1425 	free(nand_info->cmd_buf);
1426 
1427 	return err;
1428 }
1429 
1430 #ifndef CONFIG_NAND_MXS_DT
board_nand_init(void)1431 void board_nand_init(void)
1432 {
1433 	struct mxs_nand_info *nand_info;
1434 
1435 	nand_info = malloc(sizeof(struct mxs_nand_info));
1436 	if (!nand_info) {
1437 		printf("MXS NAND: Failed to allocate private data\n");
1438 			return;
1439 	}
1440 	memset(nand_info, 0, sizeof(struct mxs_nand_info));
1441 
1442 	nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1443 	nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1444 
1445 	/* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1446 	if (is_mx6sx() || is_mx7())
1447 		nand_info->max_ecc_strength_supported = 62;
1448 	else
1449 		nand_info->max_ecc_strength_supported = 40;
1450 
1451 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1452 	nand_info->use_minimum_ecc = true;
1453 #endif
1454 
1455 	if (mxs_nand_init_ctrl(nand_info) < 0)
1456 		goto err;
1457 
1458 	return;
1459 
1460 err:
1461 	free(nand_info);
1462 }
1463 #endif
1464 
1465 /*
1466  * Read NAND layout for FCB block generation.
1467  */
mxs_nand_get_layout(struct mtd_info * mtd,struct mxs_nand_layout * l)1468 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1469 {
1470 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1471 	u32 tmp;
1472 
1473 	tmp = readl(&bch_regs->hw_bch_flash0layout0);
1474 	l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1475 			BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1476 	l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1477 			BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1478 
1479 	tmp = readl(&bch_regs->hw_bch_flash0layout1);
1480 	l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1481 			BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1482 	l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1483 			BCH_FLASHLAYOUT0_ECC0_OFFSET;
1484 	l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1485 			BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1486 	l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1487 			BCH_FLASHLAYOUT1_ECCN_OFFSET;
1488 	l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1489 		     BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1490 }
1491 
1492 /*
1493  * Set BCH to specific layout used by ROM bootloader to read FCB.
1494  */
mxs_nand_mode_fcb_62bit(struct mtd_info * mtd)1495 void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
1496 {
1497 	u32 tmp;
1498 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1499 	struct nand_chip *nand = mtd_to_nand(mtd);
1500 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1501 
1502 	nand_info->en_randomizer = 1;
1503 
1504 	mtd->writesize = 1024;
1505 	mtd->oobsize = 1862 - 1024;
1506 
1507 	/* 8 ecc_chunks_*/
1508 	tmp = 7	<< BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1509 	/* 32 bytes for metadata */
1510 	tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1511 	/* using ECC62 level to be performed */
1512 	tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1513 	/* 0x20 * 4 bytes of the data0 block */
1514 	tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1515 	tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1516 	writel(tmp, &bch_regs->hw_bch_flash0layout0);
1517 
1518 	/* 1024 for data + 838 for OOB */
1519 	tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1520 	/* using ECC62 level to be performed */
1521 	tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1522 	/* 0x20 * 4 bytes of the data0 block */
1523 	tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1524 	tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1525 	writel(tmp, &bch_regs->hw_bch_flash0layout1);
1526 }
1527 
1528 /*
1529  * Set BCH to specific layout used by ROM bootloader to read FCB.
1530  */
mxs_nand_mode_fcb_40bit(struct mtd_info * mtd)1531 void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1532 {
1533 	u32 tmp;
1534 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1535 	struct nand_chip *nand = mtd_to_nand(mtd);
1536 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1537 
1538 	/* no randomizer in this setting*/
1539 	nand_info->en_randomizer = 0;
1540 
1541 	mtd->writesize = 1024;
1542 	mtd->oobsize = 1576 - 1024;
1543 
1544 	/* 8 ecc_chunks_*/
1545 	tmp = 7	<< BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1546 	/* 32 bytes for metadata */
1547 	tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1548 	/* using ECC40 level to be performed */
1549 	tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1550 	/* 0x20 * 4 bytes of the data0 block */
1551 	tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1552 	tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1553 	writel(tmp, &bch_regs->hw_bch_flash0layout0);
1554 
1555 	/* 1024 for data + 552 for OOB */
1556 	tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1557 	/* using ECC40 level to be performed */
1558 	tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1559 	/* 0x20 * 4 bytes of the data0 block */
1560 	tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1561 	tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1562 	writel(tmp, &bch_regs->hw_bch_flash0layout1);
1563 }
1564 
1565 /*
1566  * Restore BCH to normal settings.
1567  */
mxs_nand_mode_normal(struct mtd_info * mtd)1568 void mxs_nand_mode_normal(struct mtd_info *mtd)
1569 {
1570 	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1571 	struct nand_chip *nand = mtd_to_nand(mtd);
1572 	struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1573 
1574 	nand_info->en_randomizer = 0;
1575 
1576 	mtd->writesize = nand_info->writesize;
1577 	mtd->oobsize = nand_info->oobsize;
1578 
1579 	writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1580 	writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1581 }
1582 
mxs_nand_mark_byte_offset(struct mtd_info * mtd)1583 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1584 {
1585 	struct nand_chip *chip = mtd_to_nand(mtd);
1586 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1587 	struct bch_geometry *geo = &nand_info->bch_geometry;
1588 
1589 	return geo->block_mark_byte_offset;
1590 }
1591 
mxs_nand_mark_bit_offset(struct mtd_info * mtd)1592 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1593 {
1594 	struct nand_chip *chip = mtd_to_nand(mtd);
1595 	struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1596 	struct bch_geometry *geo = &nand_info->bch_geometry;
1597 
1598 	return geo->block_mark_bit_offset;
1599 }
1600