xref: /linux/drivers/mtd/nand/raw/qcom_nandc.c (revision b61bb5bc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma/qcom_adm.h>
11 #include <linux/dma/qcom_bam_dma.h>
12 #include <linux/module.h>
13 #include <linux/mtd/partitions.h>
14 #include <linux/mtd/rawnand.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 
19 /* NANDc reg offsets */
20 #define	NAND_FLASH_CMD			0x00
21 #define	NAND_ADDR0			0x04
22 #define	NAND_ADDR1			0x08
23 #define	NAND_FLASH_CHIP_SELECT		0x0c
24 #define	NAND_EXEC_CMD			0x10
25 #define	NAND_FLASH_STATUS		0x14
26 #define	NAND_BUFFER_STATUS		0x18
27 #define	NAND_DEV0_CFG0			0x20
28 #define	NAND_DEV0_CFG1			0x24
29 #define	NAND_DEV0_ECC_CFG		0x28
30 #define	NAND_AUTO_STATUS_EN		0x2c
31 #define	NAND_DEV1_CFG0			0x30
32 #define	NAND_DEV1_CFG1			0x34
33 #define	NAND_READ_ID			0x40
34 #define	NAND_READ_STATUS		0x44
35 #define	NAND_DEV_CMD0			0xa0
36 #define	NAND_DEV_CMD1			0xa4
37 #define	NAND_DEV_CMD2			0xa8
38 #define	NAND_DEV_CMD_VLD		0xac
39 #define	SFLASHC_BURST_CFG		0xe0
40 #define	NAND_ERASED_CW_DETECT_CFG	0xe8
41 #define	NAND_ERASED_CW_DETECT_STATUS	0xec
42 #define	NAND_EBI2_ECC_BUF_CFG		0xf0
43 #define	FLASH_BUF_ACC			0x100
44 
45 #define	NAND_CTRL			0xf00
46 #define	NAND_VERSION			0xf08
47 #define	NAND_READ_LOCATION_0		0xf20
48 #define	NAND_READ_LOCATION_1		0xf24
49 #define	NAND_READ_LOCATION_2		0xf28
50 #define	NAND_READ_LOCATION_3		0xf2c
51 #define	NAND_READ_LOCATION_LAST_CW_0	0xf40
52 #define	NAND_READ_LOCATION_LAST_CW_1	0xf44
53 #define	NAND_READ_LOCATION_LAST_CW_2	0xf48
54 #define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
55 
56 /* dummy register offsets, used by write_reg_dma */
57 #define	NAND_DEV_CMD1_RESTORE		0xdead
58 #define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
59 
60 /* NAND_FLASH_CMD bits */
61 #define	PAGE_ACC			BIT(4)
62 #define	LAST_PAGE			BIT(5)
63 
64 /* NAND_FLASH_CHIP_SELECT bits */
65 #define	NAND_DEV_SEL			0
66 #define	DM_EN				BIT(2)
67 
68 /* NAND_FLASH_STATUS bits */
69 #define	FS_OP_ERR			BIT(4)
70 #define	FS_READY_BSY_N			BIT(5)
71 #define	FS_MPU_ERR			BIT(8)
72 #define	FS_DEVICE_STS_ERR		BIT(16)
73 #define	FS_DEVICE_WP			BIT(23)
74 
75 /* NAND_BUFFER_STATUS bits */
76 #define	BS_UNCORRECTABLE_BIT		BIT(8)
77 #define	BS_CORRECTABLE_ERR_MSK		0x1f
78 
79 /* NAND_DEVn_CFG0 bits */
80 #define	DISABLE_STATUS_AFTER_WRITE	4
81 #define	CW_PER_PAGE			6
82 #define	UD_SIZE_BYTES			9
83 #define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
84 #define	ECC_PARITY_SIZE_BYTES_RS	19
85 #define	SPARE_SIZE_BYTES		23
86 #define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
87 #define	NUM_ADDR_CYCLES			27
88 #define	STATUS_BFR_READ			30
89 #define	SET_RD_MODE_AFTER_STATUS	31
90 
91 /* NAND_DEVn_CFG0 bits */
92 #define	DEV0_CFG1_ECC_DISABLE		0
93 #define	WIDE_FLASH			1
94 #define	NAND_RECOVERY_CYCLES		2
95 #define	CS_ACTIVE_BSY			5
96 #define	BAD_BLOCK_BYTE_NUM		6
97 #define	BAD_BLOCK_IN_SPARE_AREA		16
98 #define	WR_RD_BSY_GAP			17
99 #define	ENABLE_BCH_ECC			27
100 
101 /* NAND_DEV0_ECC_CFG bits */
102 #define	ECC_CFG_ECC_DISABLE		0
103 #define	ECC_SW_RESET			1
104 #define	ECC_MODE			4
105 #define	ECC_PARITY_SIZE_BYTES_BCH	8
106 #define	ECC_NUM_DATA_BYTES		16
107 #define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
108 #define	ECC_FORCE_CLK_OPEN		30
109 
110 /* NAND_DEV_CMD1 bits */
111 #define	READ_ADDR			0
112 
113 /* NAND_DEV_CMD_VLD bits */
114 #define	READ_START_VLD			BIT(0)
115 #define	READ_STOP_VLD			BIT(1)
116 #define	WRITE_START_VLD			BIT(2)
117 #define	ERASE_START_VLD			BIT(3)
118 #define	SEQ_READ_START_VLD		BIT(4)
119 
120 /* NAND_EBI2_ECC_BUF_CFG bits */
121 #define	NUM_STEPS			0
122 
123 /* NAND_ERASED_CW_DETECT_CFG bits */
124 #define	ERASED_CW_ECC_MASK		1
125 #define	AUTO_DETECT_RES			0
126 #define	MASK_ECC			BIT(ERASED_CW_ECC_MASK)
127 #define	RESET_ERASED_DET		BIT(AUTO_DETECT_RES)
128 #define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
129 #define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
130 #define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
131 
132 /* NAND_ERASED_CW_DETECT_STATUS bits */
133 #define	PAGE_ALL_ERASED			BIT(7)
134 #define	CODEWORD_ALL_ERASED		BIT(6)
135 #define	PAGE_ERASED			BIT(5)
136 #define	CODEWORD_ERASED			BIT(4)
137 #define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
138 #define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
139 
140 /* NAND_READ_LOCATION_n bits */
141 #define READ_LOCATION_OFFSET		0
142 #define READ_LOCATION_SIZE		16
143 #define READ_LOCATION_LAST		31
144 
145 /* Version Mask */
146 #define	NAND_VERSION_MAJOR_MASK		0xf0000000
147 #define	NAND_VERSION_MAJOR_SHIFT	28
148 #define	NAND_VERSION_MINOR_MASK		0x0fff0000
149 #define	NAND_VERSION_MINOR_SHIFT	16
150 
151 /* NAND OP_CMDs */
152 #define	OP_PAGE_READ			0x2
153 #define	OP_PAGE_READ_WITH_ECC		0x3
154 #define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
155 #define	OP_PAGE_READ_ONFI_READ		0x5
156 #define	OP_PROGRAM_PAGE			0x6
157 #define	OP_PAGE_PROGRAM_WITH_ECC	0x7
158 #define	OP_PROGRAM_PAGE_SPARE		0x9
159 #define	OP_BLOCK_ERASE			0xa
160 #define	OP_CHECK_STATUS			0xc
161 #define	OP_FETCH_ID			0xb
162 #define	OP_RESET_DEVICE			0xd
163 
164 /* Default Value for NAND_DEV_CMD_VLD */
165 #define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
166 					 ERASE_START_VLD | SEQ_READ_START_VLD)
167 
168 /* NAND_CTRL bits */
169 #define	BAM_MODE_EN			BIT(0)
170 
171 /*
172  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
173  * the driver calls the chunks 'step' or 'codeword' interchangeably
174  */
175 #define	NANDC_STEP_SIZE			512
176 
177 /*
178  * the largest page size we support is 8K, this will have 16 steps/codewords
179  * of 512 bytes each
180  */
181 #define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
182 
183 /* we read at most 3 registers per codeword scan */
184 #define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
185 
186 /* ECC modes supported by the controller */
187 #define	ECC_NONE	BIT(0)
188 #define	ECC_RS_4BIT	BIT(1)
189 #define	ECC_BCH_4BIT	BIT(2)
190 #define	ECC_BCH_8BIT	BIT(3)
191 
192 #define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)	\
193 nandc_set_reg(chip, reg,			\
194 	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
195 	      ((read_size) << READ_LOCATION_SIZE) |			\
196 	      ((is_last_read_loc) << READ_LOCATION_LAST))
197 
198 #define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)	\
199 nandc_set_reg(chip, reg,			\
200 	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
201 	      ((read_size) << READ_LOCATION_SIZE) |			\
202 	      ((is_last_read_loc) << READ_LOCATION_LAST))
203 /*
204  * Returns the actual register address for all NAND_DEV_ registers
205  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
206  */
207 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
208 
209 /* Returns the NAND register physical address */
210 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
211 
212 /* Returns the dma address for reg read buffer */
213 #define reg_buf_dma_addr(chip, vaddr) \
214 	((chip)->reg_read_dma + \
215 	((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
216 
217 #define QPIC_PER_CW_CMD_ELEMENTS	32
218 #define QPIC_PER_CW_CMD_SGL		32
219 #define QPIC_PER_CW_DATA_SGL		8
220 
221 #define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
222 
223 /*
224  * Flags used in DMA descriptor preparation helper functions
225  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
226  */
227 /* Don't set the EOT in current tx BAM sgl */
228 #define NAND_BAM_NO_EOT			BIT(0)
229 /* Set the NWD flag in current BAM sgl */
230 #define NAND_BAM_NWD			BIT(1)
231 /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
232 #define NAND_BAM_NEXT_SGL		BIT(2)
233 /*
234  * Erased codeword status is being used two times in single transfer so this
235  * flag will determine the current value of erased codeword status register
236  */
237 #define NAND_ERASED_CW_SET		BIT(4)
238 
239 #define MAX_ADDRESS_CYCLE		5
240 
241 /*
242  * This data type corresponds to the BAM transaction which will be used for all
243  * NAND transfers.
244  * @bam_ce - the array of BAM command elements
245  * @cmd_sgl - sgl for NAND BAM command pipe
246  * @data_sgl - sgl for NAND BAM consumer/producer pipe
247  * @last_data_desc - last DMA desc in data channel (tx/rx).
248  * @last_cmd_desc - last DMA desc in command channel.
249  * @txn_done - completion for NAND transfer.
250  * @bam_ce_pos - the index in bam_ce which is available for next sgl
251  * @bam_ce_start - the index in bam_ce which marks the start position ce
252  *		   for current sgl. It will be used for size calculation
253  *		   for current sgl
254  * @cmd_sgl_pos - current index in command sgl.
255  * @cmd_sgl_start - start index in command sgl.
256  * @tx_sgl_pos - current index in data sgl for tx.
257  * @tx_sgl_start - start index in data sgl for tx.
258  * @rx_sgl_pos - current index in data sgl for rx.
259  * @rx_sgl_start - start index in data sgl for rx.
260  * @wait_second_completion - wait for second DMA desc completion before making
261  *			     the NAND transfer completion.
262  */
263 struct bam_transaction {
264 	struct bam_cmd_element *bam_ce;
265 	struct scatterlist *cmd_sgl;
266 	struct scatterlist *data_sgl;
267 	struct dma_async_tx_descriptor *last_data_desc;
268 	struct dma_async_tx_descriptor *last_cmd_desc;
269 	struct completion txn_done;
270 	u32 bam_ce_pos;
271 	u32 bam_ce_start;
272 	u32 cmd_sgl_pos;
273 	u32 cmd_sgl_start;
274 	u32 tx_sgl_pos;
275 	u32 tx_sgl_start;
276 	u32 rx_sgl_pos;
277 	u32 rx_sgl_start;
278 	bool wait_second_completion;
279 };
280 
281 /*
282  * This data type corresponds to the nand dma descriptor
283  * @dma_desc - low level DMA engine descriptor
284  * @list - list for desc_info
285  *
286  * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
287  *	      ADM
288  * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
289  * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
290  * @dir - DMA transfer direction
291  */
292 struct desc_info {
293 	struct dma_async_tx_descriptor *dma_desc;
294 	struct list_head node;
295 
296 	union {
297 		struct scatterlist adm_sgl;
298 		struct {
299 			struct scatterlist *bam_sgl;
300 			int sgl_cnt;
301 		};
302 	};
303 	enum dma_data_direction dir;
304 };
305 
306 /*
307  * holds the current register values that we want to write. acts as a contiguous
308  * chunk of memory which we use to write the controller registers through DMA.
309  */
310 struct nandc_regs {
311 	__le32 cmd;
312 	__le32 addr0;
313 	__le32 addr1;
314 	__le32 chip_sel;
315 	__le32 exec;
316 
317 	__le32 cfg0;
318 	__le32 cfg1;
319 	__le32 ecc_bch_cfg;
320 
321 	__le32 clrflashstatus;
322 	__le32 clrreadstatus;
323 
324 	__le32 cmd1;
325 	__le32 vld;
326 
327 	__le32 orig_cmd1;
328 	__le32 orig_vld;
329 
330 	__le32 ecc_buf_cfg;
331 	__le32 read_location0;
332 	__le32 read_location1;
333 	__le32 read_location2;
334 	__le32 read_location3;
335 	__le32 read_location_last0;
336 	__le32 read_location_last1;
337 	__le32 read_location_last2;
338 	__le32 read_location_last3;
339 
340 	__le32 erased_cw_detect_cfg_clr;
341 	__le32 erased_cw_detect_cfg_set;
342 };
343 
344 /*
345  * NAND controller data struct
346  *
347  * @dev:			parent device
348  *
349  * @base:			MMIO base
350  *
351  * @core_clk:			controller clock
352  * @aon_clk:			another controller clock
353  *
354  * @regs:			a contiguous chunk of memory for DMA register
355  *				writes. contains the register values to be
356  *				written to controller
357  *
358  * @props:			properties of current NAND controller,
359  *				initialized via DT match data
360  *
361  * @controller:			base controller structure
362  * @host_list:			list containing all the chips attached to the
363  *				controller
364  *
365  * @chan:			dma channel
366  * @cmd_crci:			ADM DMA CRCI for command flow control
367  * @data_crci:			ADM DMA CRCI for data flow control
368  *
369  * @desc_list:			DMA descriptor list (list of desc_infos)
370  *
371  * @data_buffer:		our local DMA buffer for page read/writes,
372  *				used when we can't use the buffer provided
373  *				by upper layers directly
374  * @reg_read_buf:		local buffer for reading back registers via DMA
375  *
376  * @base_phys:			physical base address of controller registers
377  * @base_dma:			dma base address of controller registers
378  * @reg_read_dma:		contains dma address for register read buffer
379  *
380  * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
381  *				functions
382  * @max_cwperpage:		maximum QPIC codewords required. calculated
383  *				from all connected NAND devices pagesize
384  *
385  * @reg_read_pos:		marker for data read in reg_read_buf
386  *
387  * @cmd1/vld:			some fixed controller register values
388  *
389  * @exec_opwrite:		flag to select correct number of code word
390  *				while reading status
391  */
392 struct qcom_nand_controller {
393 	struct device *dev;
394 
395 	void __iomem *base;
396 
397 	struct clk *core_clk;
398 	struct clk *aon_clk;
399 
400 	struct nandc_regs *regs;
401 	struct bam_transaction *bam_txn;
402 
403 	const struct qcom_nandc_props *props;
404 
405 	struct nand_controller controller;
406 	struct list_head host_list;
407 
408 	union {
409 		/* will be used only by QPIC for BAM DMA */
410 		struct {
411 			struct dma_chan *tx_chan;
412 			struct dma_chan *rx_chan;
413 			struct dma_chan *cmd_chan;
414 		};
415 
416 		/* will be used only by EBI2 for ADM DMA */
417 		struct {
418 			struct dma_chan *chan;
419 			unsigned int cmd_crci;
420 			unsigned int data_crci;
421 		};
422 	};
423 
424 	struct list_head desc_list;
425 
426 	u8		*data_buffer;
427 	__le32		*reg_read_buf;
428 
429 	phys_addr_t base_phys;
430 	dma_addr_t base_dma;
431 	dma_addr_t reg_read_dma;
432 
433 	int		buf_size;
434 	int		buf_count;
435 	int		buf_start;
436 	unsigned int	max_cwperpage;
437 
438 	int reg_read_pos;
439 
440 	u32 cmd1, vld;
441 	bool exec_opwrite;
442 };
443 
444 /*
445  * NAND special boot partitions
446  *
447  * @page_offset:		offset of the partition where spare data is not protected
448  *				by ECC (value in pages)
449  * @page_offset:		size of the partition where spare data is not protected
450  *				by ECC (value in pages)
451  */
452 struct qcom_nand_boot_partition {
453 	u32 page_offset;
454 	u32 page_size;
455 };
456 
457 /*
458  * Qcom op for each exec_op transfer
459  *
460  * @data_instr:			data instruction pointer
461  * @data_instr_idx:		data instruction index
462  * @rdy_timeout_ms:		wait ready timeout in ms
463  * @rdy_delay_ns:		Additional delay in ns
464  * @addr1_reg:			Address1 register value
465  * @addr2_reg:			Address2 register value
466  * @cmd_reg:			CMD register value
467  * @flag:			flag for misc instruction
468  */
469 struct qcom_op {
470 	const struct nand_op_instr *data_instr;
471 	unsigned int data_instr_idx;
472 	unsigned int rdy_timeout_ms;
473 	unsigned int rdy_delay_ns;
474 	u32 addr1_reg;
475 	u32 addr2_reg;
476 	u32 cmd_reg;
477 	u8 flag;
478 };
479 
480 /*
481  * NAND chip structure
482  *
483  * @boot_partitions:		array of boot partitions where offset and size of the
484  *				boot partitions are stored
485  *
486  * @chip:			base NAND chip structure
487  * @node:			list node to add itself to host_list in
488  *				qcom_nand_controller
489  *
490  * @nr_boot_partitions:		count of the boot partitions where spare data is not
491  *				protected by ECC
492  *
493  * @cs:				chip select value for this chip
494  * @cw_size:			the number of bytes in a single step/codeword
495  *				of a page, consisting of all data, ecc, spare
496  *				and reserved bytes
497  * @cw_data:			the number of bytes within a codeword protected
498  *				by ECC
499  * @ecc_bytes_hw:		ECC bytes used by controller hardware for this
500  *				chip
501  *
502  * @last_command:		keeps track of last command on this chip. used
503  *				for reading correct status
504  *
505  * @cfg0, cfg1, cfg0_raw..:	NANDc register configurations needed for
506  *				ecc/non-ecc mode for the current nand flash
507  *				device
508  *
509  * @status:			value to be returned if NAND_CMD_STATUS command
510  *				is executed
511  * @codeword_fixup:		keep track of the current layout used by
512  *				the driver for read/write operation.
513  * @use_ecc:			request the controller to use ECC for the
514  *				upcoming read/write
515  * @bch_enabled:		flag to tell whether BCH ECC mode is used
516  */
517 struct qcom_nand_host {
518 	struct qcom_nand_boot_partition *boot_partitions;
519 
520 	struct nand_chip chip;
521 	struct list_head node;
522 
523 	int nr_boot_partitions;
524 
525 	int cs;
526 	int cw_size;
527 	int cw_data;
528 	int ecc_bytes_hw;
529 	int spare_bytes;
530 	int bbm_size;
531 
532 	int last_command;
533 
534 	u32 cfg0, cfg1;
535 	u32 cfg0_raw, cfg1_raw;
536 	u32 ecc_buf_cfg;
537 	u32 ecc_bch_cfg;
538 	u32 clrflashstatus;
539 	u32 clrreadstatus;
540 
541 	u8 status;
542 	bool codeword_fixup;
543 	bool use_ecc;
544 	bool bch_enabled;
545 };
546 
547 /*
548  * This data type corresponds to the NAND controller properties which varies
549  * among different NAND controllers.
550  * @ecc_modes - ecc mode for NAND
551  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
552  * @is_bam - whether NAND controller is using BAM
553  * @is_qpic - whether NAND CTRL is part of qpic IP
554  * @qpic_v2 - flag to indicate QPIC IP version 2
555  * @use_codeword_fixup - whether NAND has different layout for boot partitions
556  */
557 struct qcom_nandc_props {
558 	u32 ecc_modes;
559 	u32 dev_cmd_reg_start;
560 	bool is_bam;
561 	bool is_qpic;
562 	bool qpic_v2;
563 	bool use_codeword_fixup;
564 };
565 
566 /* Frees the BAM transaction memory */
free_bam_transaction(struct qcom_nand_controller * nandc)567 static void free_bam_transaction(struct qcom_nand_controller *nandc)
568 {
569 	struct bam_transaction *bam_txn = nandc->bam_txn;
570 
571 	devm_kfree(nandc->dev, bam_txn);
572 }
573 
574 /* Allocates and Initializes the BAM transaction */
575 static struct bam_transaction *
alloc_bam_transaction(struct qcom_nand_controller * nandc)576 alloc_bam_transaction(struct qcom_nand_controller *nandc)
577 {
578 	struct bam_transaction *bam_txn;
579 	size_t bam_txn_size;
580 	unsigned int num_cw = nandc->max_cwperpage;
581 	void *bam_txn_buf;
582 
583 	bam_txn_size =
584 		sizeof(*bam_txn) + num_cw *
585 		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
586 		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
587 		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
588 
589 	bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
590 	if (!bam_txn_buf)
591 		return NULL;
592 
593 	bam_txn = bam_txn_buf;
594 	bam_txn_buf += sizeof(*bam_txn);
595 
596 	bam_txn->bam_ce = bam_txn_buf;
597 	bam_txn_buf +=
598 		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
599 
600 	bam_txn->cmd_sgl = bam_txn_buf;
601 	bam_txn_buf +=
602 		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
603 
604 	bam_txn->data_sgl = bam_txn_buf;
605 
606 	init_completion(&bam_txn->txn_done);
607 
608 	return bam_txn;
609 }
610 
611 /* Clears the BAM transaction indexes */
clear_bam_transaction(struct qcom_nand_controller * nandc)612 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
613 {
614 	struct bam_transaction *bam_txn = nandc->bam_txn;
615 
616 	if (!nandc->props->is_bam)
617 		return;
618 
619 	bam_txn->bam_ce_pos = 0;
620 	bam_txn->bam_ce_start = 0;
621 	bam_txn->cmd_sgl_pos = 0;
622 	bam_txn->cmd_sgl_start = 0;
623 	bam_txn->tx_sgl_pos = 0;
624 	bam_txn->tx_sgl_start = 0;
625 	bam_txn->rx_sgl_pos = 0;
626 	bam_txn->rx_sgl_start = 0;
627 	bam_txn->last_data_desc = NULL;
628 	bam_txn->wait_second_completion = false;
629 
630 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
631 		      QPIC_PER_CW_CMD_SGL);
632 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
633 		      QPIC_PER_CW_DATA_SGL);
634 
635 	reinit_completion(&bam_txn->txn_done);
636 }
637 
638 /* Callback for DMA descriptor completion */
qpic_bam_dma_done(void * data)639 static void qpic_bam_dma_done(void *data)
640 {
641 	struct bam_transaction *bam_txn = data;
642 
643 	/*
644 	 * In case of data transfer with NAND, 2 callbacks will be generated.
645 	 * One for command channel and another one for data channel.
646 	 * If current transaction has data descriptors
647 	 * (i.e. wait_second_completion is true), then set this to false
648 	 * and wait for second DMA descriptor completion.
649 	 */
650 	if (bam_txn->wait_second_completion)
651 		bam_txn->wait_second_completion = false;
652 	else
653 		complete(&bam_txn->txn_done);
654 }
655 
to_qcom_nand_host(struct nand_chip * chip)656 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
657 {
658 	return container_of(chip, struct qcom_nand_host, chip);
659 }
660 
661 static inline struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip * chip)662 get_qcom_nand_controller(struct nand_chip *chip)
663 {
664 	return container_of(chip->controller, struct qcom_nand_controller,
665 			    controller);
666 }
667 
nandc_read(struct qcom_nand_controller * nandc,int offset)668 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
669 {
670 	return ioread32(nandc->base + offset);
671 }
672 
nandc_write(struct qcom_nand_controller * nandc,int offset,u32 val)673 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
674 			       u32 val)
675 {
676 	iowrite32(val, nandc->base + offset);
677 }
678 
nandc_read_buffer_sync(struct qcom_nand_controller * nandc,bool is_cpu)679 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
680 					  bool is_cpu)
681 {
682 	if (!nandc->props->is_bam)
683 		return;
684 
685 	if (is_cpu)
686 		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
687 					MAX_REG_RD *
688 					sizeof(*nandc->reg_read_buf),
689 					DMA_FROM_DEVICE);
690 	else
691 		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
692 					   MAX_REG_RD *
693 					   sizeof(*nandc->reg_read_buf),
694 					   DMA_FROM_DEVICE);
695 }
696 
offset_to_nandc_reg(struct nandc_regs * regs,int offset)697 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
698 {
699 	switch (offset) {
700 	case NAND_FLASH_CMD:
701 		return &regs->cmd;
702 	case NAND_ADDR0:
703 		return &regs->addr0;
704 	case NAND_ADDR1:
705 		return &regs->addr1;
706 	case NAND_FLASH_CHIP_SELECT:
707 		return &regs->chip_sel;
708 	case NAND_EXEC_CMD:
709 		return &regs->exec;
710 	case NAND_FLASH_STATUS:
711 		return &regs->clrflashstatus;
712 	case NAND_DEV0_CFG0:
713 		return &regs->cfg0;
714 	case NAND_DEV0_CFG1:
715 		return &regs->cfg1;
716 	case NAND_DEV0_ECC_CFG:
717 		return &regs->ecc_bch_cfg;
718 	case NAND_READ_STATUS:
719 		return &regs->clrreadstatus;
720 	case NAND_DEV_CMD1:
721 		return &regs->cmd1;
722 	case NAND_DEV_CMD1_RESTORE:
723 		return &regs->orig_cmd1;
724 	case NAND_DEV_CMD_VLD:
725 		return &regs->vld;
726 	case NAND_DEV_CMD_VLD_RESTORE:
727 		return &regs->orig_vld;
728 	case NAND_EBI2_ECC_BUF_CFG:
729 		return &regs->ecc_buf_cfg;
730 	case NAND_READ_LOCATION_0:
731 		return &regs->read_location0;
732 	case NAND_READ_LOCATION_1:
733 		return &regs->read_location1;
734 	case NAND_READ_LOCATION_2:
735 		return &regs->read_location2;
736 	case NAND_READ_LOCATION_3:
737 		return &regs->read_location3;
738 	case NAND_READ_LOCATION_LAST_CW_0:
739 		return &regs->read_location_last0;
740 	case NAND_READ_LOCATION_LAST_CW_1:
741 		return &regs->read_location_last1;
742 	case NAND_READ_LOCATION_LAST_CW_2:
743 		return &regs->read_location_last2;
744 	case NAND_READ_LOCATION_LAST_CW_3:
745 		return &regs->read_location_last3;
746 	default:
747 		return NULL;
748 	}
749 }
750 
nandc_set_reg(struct nand_chip * chip,int offset,u32 val)751 static void nandc_set_reg(struct nand_chip *chip, int offset,
752 			  u32 val)
753 {
754 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
755 	struct nandc_regs *regs = nandc->regs;
756 	__le32 *reg;
757 
758 	reg = offset_to_nandc_reg(regs, offset);
759 
760 	if (reg)
761 		*reg = cpu_to_le32(val);
762 }
763 
764 /* Helper to check the code word, whether it is last cw or not */
qcom_nandc_is_last_cw(struct nand_ecc_ctrl * ecc,int cw)765 static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
766 {
767 	return cw == (ecc->steps - 1);
768 }
769 
770 /* helper to configure location register values */
nandc_set_read_loc(struct nand_chip * chip,int cw,int reg,int cw_offset,int read_size,int is_last_read_loc)771 static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
772 			       int cw_offset, int read_size, int is_last_read_loc)
773 {
774 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
775 	struct nand_ecc_ctrl *ecc = &chip->ecc;
776 	int reg_base = NAND_READ_LOCATION_0;
777 
778 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
779 		reg_base = NAND_READ_LOCATION_LAST_CW_0;
780 
781 	reg_base += reg * 4;
782 
783 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
784 		return nandc_set_read_loc_last(chip, reg_base, cw_offset,
785 				read_size, is_last_read_loc);
786 	else
787 		return nandc_set_read_loc_first(chip, reg_base, cw_offset,
788 				read_size, is_last_read_loc);
789 }
790 
791 /* helper to configure address register values */
set_address(struct qcom_nand_host * host,u16 column,int page)792 static void set_address(struct qcom_nand_host *host, u16 column, int page)
793 {
794 	struct nand_chip *chip = &host->chip;
795 
796 	if (chip->options & NAND_BUSWIDTH_16)
797 		column >>= 1;
798 
799 	nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
800 	nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
801 }
802 
803 /*
804  * update_rw_regs:	set up read/write register values, these will be
805  *			written to the NAND controller registers via DMA
806  *
807  * @num_cw:		number of steps for the read/write operation
808  * @read:		read or write operation
809  * @cw	:		which code word
810  */
update_rw_regs(struct qcom_nand_host * host,int num_cw,bool read,int cw)811 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
812 {
813 	struct nand_chip *chip = &host->chip;
814 	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
815 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
816 
817 	if (read) {
818 		if (host->use_ecc)
819 			cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
820 		else
821 			cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
822 	} else {
823 		cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
824 	}
825 
826 	if (host->use_ecc) {
827 		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
828 				(num_cw - 1) << CW_PER_PAGE;
829 
830 		cfg1 = host->cfg1;
831 		ecc_bch_cfg = host->ecc_bch_cfg;
832 	} else {
833 		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
834 				(num_cw - 1) << CW_PER_PAGE;
835 
836 		cfg1 = host->cfg1_raw;
837 		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
838 	}
839 
840 	nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
841 	nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
842 	nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
843 	nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
844 	if (!nandc->props->qpic_v2)
845 		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
846 	nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
847 	nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
848 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
849 
850 	if (read)
851 		nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
852 				   host->cw_data : host->cw_size, 1);
853 }
854 
855 /*
856  * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
857  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
858  * which will be submitted to DMA engine.
859  */
prepare_bam_async_desc(struct qcom_nand_controller * nandc,struct dma_chan * chan,unsigned long flags)860 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
861 				  struct dma_chan *chan,
862 				  unsigned long flags)
863 {
864 	struct desc_info *desc;
865 	struct scatterlist *sgl;
866 	unsigned int sgl_cnt;
867 	int ret;
868 	struct bam_transaction *bam_txn = nandc->bam_txn;
869 	enum dma_transfer_direction dir_eng;
870 	struct dma_async_tx_descriptor *dma_desc;
871 
872 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
873 	if (!desc)
874 		return -ENOMEM;
875 
876 	if (chan == nandc->cmd_chan) {
877 		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
878 		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
879 		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
880 		dir_eng = DMA_MEM_TO_DEV;
881 		desc->dir = DMA_TO_DEVICE;
882 	} else if (chan == nandc->tx_chan) {
883 		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
884 		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
885 		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
886 		dir_eng = DMA_MEM_TO_DEV;
887 		desc->dir = DMA_TO_DEVICE;
888 	} else {
889 		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
890 		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
891 		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
892 		dir_eng = DMA_DEV_TO_MEM;
893 		desc->dir = DMA_FROM_DEVICE;
894 	}
895 
896 	sg_mark_end(sgl + sgl_cnt - 1);
897 	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
898 	if (ret == 0) {
899 		dev_err(nandc->dev, "failure in mapping desc\n");
900 		kfree(desc);
901 		return -ENOMEM;
902 	}
903 
904 	desc->sgl_cnt = sgl_cnt;
905 	desc->bam_sgl = sgl;
906 
907 	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
908 					   flags);
909 
910 	if (!dma_desc) {
911 		dev_err(nandc->dev, "failure in prep desc\n");
912 		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
913 		kfree(desc);
914 		return -EINVAL;
915 	}
916 
917 	desc->dma_desc = dma_desc;
918 
919 	/* update last data/command descriptor */
920 	if (chan == nandc->cmd_chan)
921 		bam_txn->last_cmd_desc = dma_desc;
922 	else
923 		bam_txn->last_data_desc = dma_desc;
924 
925 	list_add_tail(&desc->node, &nandc->desc_list);
926 
927 	return 0;
928 }
929 
930 /*
931  * Prepares the command descriptor for BAM DMA which will be used for NAND
932  * register reads and writes. The command descriptor requires the command
933  * to be formed in command element type so this function uses the command
934  * element from bam transaction ce array and fills the same with required
935  * data. A single SGL can contain multiple command elements so
936  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
937  * after the current command element.
938  */
prep_bam_dma_desc_cmd(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,unsigned int flags)939 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
940 				 int reg_off, const void *vaddr,
941 				 int size, unsigned int flags)
942 {
943 	int bam_ce_size;
944 	int i, ret;
945 	struct bam_cmd_element *bam_ce_buffer;
946 	struct bam_transaction *bam_txn = nandc->bam_txn;
947 
948 	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
949 
950 	/* fill the command desc */
951 	for (i = 0; i < size; i++) {
952 		if (read)
953 			bam_prep_ce(&bam_ce_buffer[i],
954 				    nandc_reg_phys(nandc, reg_off + 4 * i),
955 				    BAM_READ_COMMAND,
956 				    reg_buf_dma_addr(nandc,
957 						     (__le32 *)vaddr + i));
958 		else
959 			bam_prep_ce_le32(&bam_ce_buffer[i],
960 					 nandc_reg_phys(nandc, reg_off + 4 * i),
961 					 BAM_WRITE_COMMAND,
962 					 *((__le32 *)vaddr + i));
963 	}
964 
965 	bam_txn->bam_ce_pos += size;
966 
967 	/* use the separate sgl after this command */
968 	if (flags & NAND_BAM_NEXT_SGL) {
969 		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
970 		bam_ce_size = (bam_txn->bam_ce_pos -
971 				bam_txn->bam_ce_start) *
972 				sizeof(struct bam_cmd_element);
973 		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
974 			   bam_ce_buffer, bam_ce_size);
975 		bam_txn->cmd_sgl_pos++;
976 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
977 
978 		if (flags & NAND_BAM_NWD) {
979 			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
980 						     DMA_PREP_FENCE |
981 						     DMA_PREP_CMD);
982 			if (ret)
983 				return ret;
984 		}
985 	}
986 
987 	return 0;
988 }
989 
990 /*
991  * Prepares the data descriptor for BAM DMA which will be used for NAND
992  * data reads and writes.
993  */
prep_bam_dma_desc_data(struct qcom_nand_controller * nandc,bool read,const void * vaddr,int size,unsigned int flags)994 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
995 				  const void *vaddr,
996 				  int size, unsigned int flags)
997 {
998 	int ret;
999 	struct bam_transaction *bam_txn = nandc->bam_txn;
1000 
1001 	if (read) {
1002 		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
1003 			   vaddr, size);
1004 		bam_txn->rx_sgl_pos++;
1005 	} else {
1006 		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
1007 			   vaddr, size);
1008 		bam_txn->tx_sgl_pos++;
1009 
1010 		/*
1011 		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
1012 		 * is not set, form the DMA descriptor
1013 		 */
1014 		if (!(flags & NAND_BAM_NO_EOT)) {
1015 			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
1016 						     DMA_PREP_INTERRUPT);
1017 			if (ret)
1018 				return ret;
1019 		}
1020 	}
1021 
1022 	return 0;
1023 }
1024 
prep_adm_dma_desc(struct qcom_nand_controller * nandc,bool read,int reg_off,const void * vaddr,int size,bool flow_control)1025 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
1026 			     int reg_off, const void *vaddr, int size,
1027 			     bool flow_control)
1028 {
1029 	struct desc_info *desc;
1030 	struct dma_async_tx_descriptor *dma_desc;
1031 	struct scatterlist *sgl;
1032 	struct dma_slave_config slave_conf;
1033 	struct qcom_adm_peripheral_config periph_conf = {};
1034 	enum dma_transfer_direction dir_eng;
1035 	int ret;
1036 
1037 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
1038 	if (!desc)
1039 		return -ENOMEM;
1040 
1041 	sgl = &desc->adm_sgl;
1042 
1043 	sg_init_one(sgl, vaddr, size);
1044 
1045 	if (read) {
1046 		dir_eng = DMA_DEV_TO_MEM;
1047 		desc->dir = DMA_FROM_DEVICE;
1048 	} else {
1049 		dir_eng = DMA_MEM_TO_DEV;
1050 		desc->dir = DMA_TO_DEVICE;
1051 	}
1052 
1053 	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
1054 	if (ret == 0) {
1055 		ret = -ENOMEM;
1056 		goto err;
1057 	}
1058 
1059 	memset(&slave_conf, 0x00, sizeof(slave_conf));
1060 
1061 	slave_conf.device_fc = flow_control;
1062 	if (read) {
1063 		slave_conf.src_maxburst = 16;
1064 		slave_conf.src_addr = nandc->base_dma + reg_off;
1065 		if (nandc->data_crci) {
1066 			periph_conf.crci = nandc->data_crci;
1067 			slave_conf.peripheral_config = &periph_conf;
1068 			slave_conf.peripheral_size = sizeof(periph_conf);
1069 		}
1070 	} else {
1071 		slave_conf.dst_maxburst = 16;
1072 		slave_conf.dst_addr = nandc->base_dma + reg_off;
1073 		if (nandc->cmd_crci) {
1074 			periph_conf.crci = nandc->cmd_crci;
1075 			slave_conf.peripheral_config = &periph_conf;
1076 			slave_conf.peripheral_size = sizeof(periph_conf);
1077 		}
1078 	}
1079 
1080 	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
1081 	if (ret) {
1082 		dev_err(nandc->dev, "failed to configure dma channel\n");
1083 		goto err;
1084 	}
1085 
1086 	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
1087 	if (!dma_desc) {
1088 		dev_err(nandc->dev, "failed to prepare desc\n");
1089 		ret = -EINVAL;
1090 		goto err;
1091 	}
1092 
1093 	desc->dma_desc = dma_desc;
1094 
1095 	list_add_tail(&desc->node, &nandc->desc_list);
1096 
1097 	return 0;
1098 err:
1099 	kfree(desc);
1100 
1101 	return ret;
1102 }
1103 
1104 /*
1105  * read_reg_dma:	prepares a descriptor to read a given number of
1106  *			contiguous registers to the reg_read_buf pointer
1107  *
1108  * @first:		offset of the first register in the contiguous block
1109  * @num_regs:		number of registers to read
1110  * @flags:		flags to control DMA descriptor preparation
1111  */
read_reg_dma(struct qcom_nand_controller * nandc,int first,int num_regs,unsigned int flags)1112 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
1113 			int num_regs, unsigned int flags)
1114 {
1115 	bool flow_control = false;
1116 	void *vaddr;
1117 
1118 	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
1119 	nandc->reg_read_pos += num_regs;
1120 
1121 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
1122 		first = dev_cmd_reg_addr(nandc, first);
1123 
1124 	if (nandc->props->is_bam)
1125 		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
1126 					     num_regs, flags);
1127 
1128 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
1129 		flow_control = true;
1130 
1131 	return prep_adm_dma_desc(nandc, true, first, vaddr,
1132 				 num_regs * sizeof(u32), flow_control);
1133 }
1134 
1135 /*
1136  * write_reg_dma:	prepares a descriptor to write a given number of
1137  *			contiguous registers
1138  *
1139  * @first:		offset of the first register in the contiguous block
1140  * @num_regs:		number of registers to write
1141  * @flags:		flags to control DMA descriptor preparation
1142  */
write_reg_dma(struct qcom_nand_controller * nandc,int first,int num_regs,unsigned int flags)1143 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1144 			 int num_regs, unsigned int flags)
1145 {
1146 	bool flow_control = false;
1147 	struct nandc_regs *regs = nandc->regs;
1148 	void *vaddr;
1149 
1150 	vaddr = offset_to_nandc_reg(regs, first);
1151 
1152 	if (first == NAND_ERASED_CW_DETECT_CFG) {
1153 		if (flags & NAND_ERASED_CW_SET)
1154 			vaddr = &regs->erased_cw_detect_cfg_set;
1155 		else
1156 			vaddr = &regs->erased_cw_detect_cfg_clr;
1157 	}
1158 
1159 	if (first == NAND_EXEC_CMD)
1160 		flags |= NAND_BAM_NWD;
1161 
1162 	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1163 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1164 
1165 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1166 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1167 
1168 	if (nandc->props->is_bam)
1169 		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1170 					     num_regs, flags);
1171 
1172 	if (first == NAND_FLASH_CMD)
1173 		flow_control = true;
1174 
1175 	return prep_adm_dma_desc(nandc, false, first, vaddr,
1176 				 num_regs * sizeof(u32), flow_control);
1177 }
1178 
1179 /*
1180  * read_data_dma:	prepares a DMA descriptor to transfer data from the
1181  *			controller's internal buffer to the buffer 'vaddr'
1182  *
1183  * @reg_off:		offset within the controller's data buffer
1184  * @vaddr:		virtual address of the buffer we want to write to
1185  * @size:		DMA transaction size in bytes
1186  * @flags:		flags to control DMA descriptor preparation
1187  */
read_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)1188 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1189 			 const u8 *vaddr, int size, unsigned int flags)
1190 {
1191 	if (nandc->props->is_bam)
1192 		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1193 
1194 	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1195 }
1196 
1197 /*
1198  * write_data_dma:	prepares a DMA descriptor to transfer data from
1199  *			'vaddr' to the controller's internal buffer
1200  *
1201  * @reg_off:		offset within the controller's data buffer
1202  * @vaddr:		virtual address of the buffer we want to read from
1203  * @size:		DMA transaction size in bytes
1204  * @flags:		flags to control DMA descriptor preparation
1205  */
write_data_dma(struct qcom_nand_controller * nandc,int reg_off,const u8 * vaddr,int size,unsigned int flags)1206 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1207 			  const u8 *vaddr, int size, unsigned int flags)
1208 {
1209 	if (nandc->props->is_bam)
1210 		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1211 
1212 	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1213 }
1214 
1215 /*
1216  * Helper to prepare DMA descriptors for configuring registers
1217  * before reading a NAND page.
1218  */
config_nand_page_read(struct nand_chip * chip)1219 static void config_nand_page_read(struct nand_chip *chip)
1220 {
1221 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1222 
1223 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1224 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1225 	if (!nandc->props->qpic_v2)
1226 		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1227 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1228 	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1229 		      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1230 }
1231 
1232 /*
1233  * Helper to prepare DMA descriptors for configuring registers
1234  * before reading each codeword in NAND page.
1235  */
1236 static void
config_nand_cw_read(struct nand_chip * chip,bool use_ecc,int cw)1237 config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
1238 {
1239 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1240 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1241 
1242 	int reg = NAND_READ_LOCATION_0;
1243 
1244 	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
1245 		reg = NAND_READ_LOCATION_LAST_CW_0;
1246 
1247 	if (nandc->props->is_bam)
1248 		write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
1249 
1250 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1251 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1252 
1253 	if (use_ecc) {
1254 		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1255 		read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1256 			     NAND_BAM_NEXT_SGL);
1257 	} else {
1258 		read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1259 	}
1260 }
1261 
1262 /*
1263  * Helper to prepare dma descriptors to configure registers needed for reading a
1264  * single codeword in page
1265  */
1266 static void
config_nand_single_cw_page_read(struct nand_chip * chip,bool use_ecc,int cw)1267 config_nand_single_cw_page_read(struct nand_chip *chip,
1268 				bool use_ecc, int cw)
1269 {
1270 	config_nand_page_read(chip);
1271 	config_nand_cw_read(chip, use_ecc, cw);
1272 }
1273 
1274 /*
1275  * Helper to prepare DMA descriptors used to configure registers needed for
1276  * before writing a NAND page.
1277  */
config_nand_page_write(struct nand_chip * chip)1278 static void config_nand_page_write(struct nand_chip *chip)
1279 {
1280 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1281 
1282 	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1283 	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1284 	if (!nandc->props->qpic_v2)
1285 		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1286 			      NAND_BAM_NEXT_SGL);
1287 }
1288 
1289 /*
1290  * Helper to prepare DMA descriptors for configuring registers
1291  * before writing each codeword in NAND page.
1292  */
config_nand_cw_write(struct nand_chip * chip)1293 static void config_nand_cw_write(struct nand_chip *chip)
1294 {
1295 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1296 
1297 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1298 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1299 
1300 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1301 
1302 	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1303 	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1304 }
1305 
1306 /* helpers to submit/free our list of dma descriptors */
submit_descs(struct qcom_nand_controller * nandc)1307 static int submit_descs(struct qcom_nand_controller *nandc)
1308 {
1309 	struct desc_info *desc, *n;
1310 	dma_cookie_t cookie = 0;
1311 	struct bam_transaction *bam_txn = nandc->bam_txn;
1312 	int ret = 0;
1313 
1314 	if (nandc->props->is_bam) {
1315 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1316 			ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1317 			if (ret)
1318 				goto err_unmap_free_desc;
1319 		}
1320 
1321 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1322 			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
1323 						   DMA_PREP_INTERRUPT);
1324 			if (ret)
1325 				goto err_unmap_free_desc;
1326 		}
1327 
1328 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1329 			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1330 						   DMA_PREP_CMD);
1331 			if (ret)
1332 				goto err_unmap_free_desc;
1333 		}
1334 	}
1335 
1336 	list_for_each_entry(desc, &nandc->desc_list, node)
1337 		cookie = dmaengine_submit(desc->dma_desc);
1338 
1339 	if (nandc->props->is_bam) {
1340 		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1341 		bam_txn->last_cmd_desc->callback_param = bam_txn;
1342 		if (bam_txn->last_data_desc) {
1343 			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1344 			bam_txn->last_data_desc->callback_param = bam_txn;
1345 			bam_txn->wait_second_completion = true;
1346 		}
1347 
1348 		dma_async_issue_pending(nandc->tx_chan);
1349 		dma_async_issue_pending(nandc->rx_chan);
1350 		dma_async_issue_pending(nandc->cmd_chan);
1351 
1352 		if (!wait_for_completion_timeout(&bam_txn->txn_done,
1353 						 QPIC_NAND_COMPLETION_TIMEOUT))
1354 			ret = -ETIMEDOUT;
1355 	} else {
1356 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1357 			ret = -ETIMEDOUT;
1358 	}
1359 
1360 err_unmap_free_desc:
1361 	/*
1362 	 * Unmap the dma sg_list and free the desc allocated by both
1363 	 * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
1364 	 */
1365 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1366 		list_del(&desc->node);
1367 
1368 		if (nandc->props->is_bam)
1369 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
1370 				     desc->sgl_cnt, desc->dir);
1371 		else
1372 			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1373 				     desc->dir);
1374 
1375 		kfree(desc);
1376 	}
1377 
1378 	return ret;
1379 }
1380 
1381 /* reset the register read buffer for next NAND operation */
clear_read_regs(struct qcom_nand_controller * nandc)1382 static void clear_read_regs(struct qcom_nand_controller *nandc)
1383 {
1384 	nandc->reg_read_pos = 0;
1385 	nandc_read_buffer_sync(nandc, false);
1386 }
1387 
1388 /*
1389  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1390  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1391  *
1392  * when using RS ECC, the HW reports the same erros when reading an erased CW,
1393  * but it notifies that it is an erased CW by placing special characters at
1394  * certain offsets in the buffer.
1395  *
1396  * verify if the page is erased or not, and fix up the page for RS ECC by
1397  * replacing the special characters with 0xff.
1398  */
erased_chunk_check_and_fixup(u8 * data_buf,int data_len)1399 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1400 {
1401 	u8 empty1, empty2;
1402 
1403 	/*
1404 	 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1405 	 * is erased by looking for 0x54s at offsets 3 and 175 from the
1406 	 * beginning of each codeword
1407 	 */
1408 
1409 	empty1 = data_buf[3];
1410 	empty2 = data_buf[175];
1411 
1412 	/*
1413 	 * if the erased codework markers, if they exist override them with
1414 	 * 0xffs
1415 	 */
1416 	if ((empty1 == 0x54 && empty2 == 0xff) ||
1417 	    (empty1 == 0xff && empty2 == 0x54)) {
1418 		data_buf[3] = 0xff;
1419 		data_buf[175] = 0xff;
1420 	}
1421 
1422 	/*
1423 	 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1424 	 * restore the original values at the special offsets
1425 	 */
1426 	if (memchr_inv(data_buf, 0xff, data_len)) {
1427 		data_buf[3] = empty1;
1428 		data_buf[175] = empty2;
1429 
1430 		return false;
1431 	}
1432 
1433 	return true;
1434 }
1435 
1436 struct read_stats {
1437 	__le32 flash;
1438 	__le32 buffer;
1439 	__le32 erased_cw;
1440 };
1441 
1442 /* reads back FLASH_STATUS register set by the controller */
check_flash_errors(struct qcom_nand_host * host,int cw_cnt)1443 static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1444 {
1445 	struct nand_chip *chip = &host->chip;
1446 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1447 	int i;
1448 
1449 	nandc_read_buffer_sync(nandc, true);
1450 
1451 	for (i = 0; i < cw_cnt; i++) {
1452 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1453 
1454 		if (flash & (FS_OP_ERR | FS_MPU_ERR))
1455 			return -EIO;
1456 	}
1457 
1458 	return 0;
1459 }
1460 
1461 /* performs raw read for one codeword */
1462 static int
qcom_nandc_read_cw_raw(struct mtd_info * mtd,struct nand_chip * chip,u8 * data_buf,u8 * oob_buf,int page,int cw)1463 qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1464 		       u8 *data_buf, u8 *oob_buf, int page, int cw)
1465 {
1466 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1467 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1468 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1469 	int data_size1, data_size2, oob_size1, oob_size2;
1470 	int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1471 	int raw_cw = cw;
1472 
1473 	nand_read_page_op(chip, page, 0, NULL, 0);
1474 	nandc->buf_count = 0;
1475 	nandc->buf_start = 0;
1476 	clear_read_regs(nandc);
1477 	host->use_ecc = false;
1478 
1479 	if (nandc->props->qpic_v2)
1480 		raw_cw = ecc->steps - 1;
1481 
1482 	clear_bam_transaction(nandc);
1483 	set_address(host, host->cw_size * cw, page);
1484 	update_rw_regs(host, 1, true, raw_cw);
1485 	config_nand_page_read(chip);
1486 
1487 	data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1488 	oob_size1 = host->bbm_size;
1489 
1490 	if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1491 		data_size2 = ecc->size - data_size1 -
1492 			     ((ecc->steps - 1) * 4);
1493 		oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1494 			    host->spare_bytes;
1495 	} else {
1496 		data_size2 = host->cw_data - data_size1;
1497 		oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1498 	}
1499 
1500 	if (nandc->props->is_bam) {
1501 		nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
1502 		read_loc += data_size1;
1503 
1504 		nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
1505 		read_loc += oob_size1;
1506 
1507 		nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
1508 		read_loc += data_size2;
1509 
1510 		nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
1511 	}
1512 
1513 	config_nand_cw_read(chip, false, raw_cw);
1514 
1515 	read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1516 	reg_off += data_size1;
1517 
1518 	read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1519 	reg_off += oob_size1;
1520 
1521 	read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1522 	reg_off += data_size2;
1523 
1524 	read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1525 
1526 	ret = submit_descs(nandc);
1527 	if (ret) {
1528 		dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1529 		return ret;
1530 	}
1531 
1532 	return check_flash_errors(host, 1);
1533 }
1534 
1535 /*
1536  * Bitflips can happen in erased codewords also so this function counts the
1537  * number of 0 in each CW for which ECC engine returns the uncorrectable
1538  * error. The page will be assumed as erased if this count is less than or
1539  * equal to the ecc->strength for each CW.
1540  *
1541  * 1. Both DATA and OOB need to be checked for number of 0. The
1542  *    top-level API can be called with only data buf or OOB buf so use
1543  *    chip->data_buf if data buf is null and chip->oob_poi if oob buf
1544  *    is null for copying the raw bytes.
1545  * 2. Perform raw read for all the CW which has uncorrectable errors.
1546  * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
1547  *    The BBM and spare bytes bit flip won’t affect the ECC so don’t check
1548  *    the number of bitflips in this area.
1549  */
1550 static int
check_for_erased_page(struct qcom_nand_host * host,u8 * data_buf,u8 * oob_buf,unsigned long uncorrectable_cws,int page,unsigned int max_bitflips)1551 check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1552 		      u8 *oob_buf, unsigned long uncorrectable_cws,
1553 		      int page, unsigned int max_bitflips)
1554 {
1555 	struct nand_chip *chip = &host->chip;
1556 	struct mtd_info *mtd = nand_to_mtd(chip);
1557 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1558 	u8 *cw_data_buf, *cw_oob_buf;
1559 	int cw, data_size, oob_size, ret;
1560 
1561 	if (!data_buf)
1562 		data_buf = nand_get_data_buf(chip);
1563 
1564 	if (!oob_buf) {
1565 		nand_get_data_buf(chip);
1566 		oob_buf = chip->oob_poi;
1567 	}
1568 
1569 	for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1570 		if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1571 			data_size = ecc->size - ((ecc->steps - 1) * 4);
1572 			oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1573 		} else {
1574 			data_size = host->cw_data;
1575 			oob_size = host->ecc_bytes_hw;
1576 		}
1577 
1578 		/* determine starting buffer address for current CW */
1579 		cw_data_buf = data_buf + (cw * host->cw_data);
1580 		cw_oob_buf = oob_buf + (cw * ecc->bytes);
1581 
1582 		ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1583 					     cw_oob_buf, page, cw);
1584 		if (ret)
1585 			return ret;
1586 
1587 		/*
1588 		 * make sure it isn't an erased page reported
1589 		 * as not-erased by HW because of a few bitflips
1590 		 */
1591 		ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1592 						  cw_oob_buf + host->bbm_size,
1593 						  oob_size, NULL,
1594 						  0, ecc->strength);
1595 		if (ret < 0) {
1596 			mtd->ecc_stats.failed++;
1597 		} else {
1598 			mtd->ecc_stats.corrected += ret;
1599 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
1600 		}
1601 	}
1602 
1603 	return max_bitflips;
1604 }
1605 
1606 /*
1607  * reads back status registers set by the controller to notify page read
1608  * errors. this is equivalent to what 'ecc->correct()' would do.
1609  */
parse_read_errors(struct qcom_nand_host * host,u8 * data_buf,u8 * oob_buf,int page)1610 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1611 			     u8 *oob_buf, int page)
1612 {
1613 	struct nand_chip *chip = &host->chip;
1614 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1615 	struct mtd_info *mtd = nand_to_mtd(chip);
1616 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1617 	unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1618 	struct read_stats *buf;
1619 	bool flash_op_err = false, erased;
1620 	int i;
1621 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1622 
1623 	buf = (struct read_stats *)nandc->reg_read_buf;
1624 	nandc_read_buffer_sync(nandc, true);
1625 
1626 	for (i = 0; i < ecc->steps; i++, buf++) {
1627 		u32 flash, buffer, erased_cw;
1628 		int data_len, oob_len;
1629 
1630 		if (qcom_nandc_is_last_cw(ecc, i)) {
1631 			data_len = ecc->size - ((ecc->steps - 1) << 2);
1632 			oob_len = ecc->steps << 2;
1633 		} else {
1634 			data_len = host->cw_data;
1635 			oob_len = 0;
1636 		}
1637 
1638 		flash = le32_to_cpu(buf->flash);
1639 		buffer = le32_to_cpu(buf->buffer);
1640 		erased_cw = le32_to_cpu(buf->erased_cw);
1641 
1642 		/*
1643 		 * Check ECC failure for each codeword. ECC failure can
1644 		 * happen in either of the following conditions
1645 		 * 1. If number of bitflips are greater than ECC engine
1646 		 *    capability.
1647 		 * 2. If this codeword contains all 0xff for which erased
1648 		 *    codeword detection check will be done.
1649 		 */
1650 		if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1651 			/*
1652 			 * For BCH ECC, ignore erased codeword errors, if
1653 			 * ERASED_CW bits are set.
1654 			 */
1655 			if (host->bch_enabled) {
1656 				erased = (erased_cw & ERASED_CW) == ERASED_CW;
1657 			/*
1658 			 * For RS ECC, HW reports the erased CW by placing
1659 			 * special characters at certain offsets in the buffer.
1660 			 * These special characters will be valid only if
1661 			 * complete page is read i.e. data_buf is not NULL.
1662 			 */
1663 			} else if (data_buf) {
1664 				erased = erased_chunk_check_and_fixup(data_buf,
1665 								      data_len);
1666 			} else {
1667 				erased = false;
1668 			}
1669 
1670 			if (!erased)
1671 				uncorrectable_cws |= BIT(i);
1672 		/*
1673 		 * Check if MPU or any other operational error (timeout,
1674 		 * device failure, etc.) happened for this codeword and
1675 		 * make flash_op_err true. If flash_op_err is set, then
1676 		 * EIO will be returned for page read.
1677 		 */
1678 		} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1679 			flash_op_err = true;
1680 		/*
1681 		 * No ECC or operational errors happened. Check the number of
1682 		 * bits corrected and update the ecc_stats.corrected.
1683 		 */
1684 		} else {
1685 			unsigned int stat;
1686 
1687 			stat = buffer & BS_CORRECTABLE_ERR_MSK;
1688 			mtd->ecc_stats.corrected += stat;
1689 			max_bitflips = max(max_bitflips, stat);
1690 		}
1691 
1692 		if (data_buf)
1693 			data_buf += data_len;
1694 		if (oob_buf)
1695 			oob_buf += oob_len + ecc->bytes;
1696 	}
1697 
1698 	if (flash_op_err)
1699 		return -EIO;
1700 
1701 	if (!uncorrectable_cws)
1702 		return max_bitflips;
1703 
1704 	return check_for_erased_page(host, data_buf_start, oob_buf_start,
1705 				     uncorrectable_cws, page,
1706 				     max_bitflips);
1707 }
1708 
1709 /*
1710  * helper to perform the actual page read operation, used by ecc->read_page(),
1711  * ecc->read_oob()
1712  */
read_page_ecc(struct qcom_nand_host * host,u8 * data_buf,u8 * oob_buf,int page)1713 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1714 			 u8 *oob_buf, int page)
1715 {
1716 	struct nand_chip *chip = &host->chip;
1717 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1718 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1719 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1720 	int i, ret;
1721 
1722 	config_nand_page_read(chip);
1723 
1724 	/* queue cmd descs for each codeword */
1725 	for (i = 0; i < ecc->steps; i++) {
1726 		int data_size, oob_size;
1727 
1728 		if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
1729 			data_size = ecc->size - ((ecc->steps - 1) << 2);
1730 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1731 				   host->spare_bytes;
1732 		} else {
1733 			data_size = host->cw_data;
1734 			oob_size = host->ecc_bytes_hw + host->spare_bytes;
1735 		}
1736 
1737 		if (nandc->props->is_bam) {
1738 			if (data_buf && oob_buf) {
1739 				nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
1740 				nandc_set_read_loc(chip, i, 1, data_size,
1741 						   oob_size, 1);
1742 			} else if (data_buf) {
1743 				nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
1744 			} else {
1745 				nandc_set_read_loc(chip, i, 0, data_size,
1746 						   oob_size, 1);
1747 			}
1748 		}
1749 
1750 		config_nand_cw_read(chip, true, i);
1751 
1752 		if (data_buf)
1753 			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1754 				      data_size, 0);
1755 
1756 		/*
1757 		 * when ecc is enabled, the controller doesn't read the real
1758 		 * or dummy bad block markers in each chunk. To maintain a
1759 		 * consistent layout across RAW and ECC reads, we just
1760 		 * leave the real/dummy BBM offsets empty (i.e, filled with
1761 		 * 0xffs)
1762 		 */
1763 		if (oob_buf) {
1764 			int j;
1765 
1766 			for (j = 0; j < host->bbm_size; j++)
1767 				*oob_buf++ = 0xff;
1768 
1769 			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1770 				      oob_buf, oob_size, 0);
1771 		}
1772 
1773 		if (data_buf)
1774 			data_buf += data_size;
1775 		if (oob_buf)
1776 			oob_buf += oob_size;
1777 	}
1778 
1779 	ret = submit_descs(nandc);
1780 	if (ret) {
1781 		dev_err(nandc->dev, "failure to read page/oob\n");
1782 		return ret;
1783 	}
1784 
1785 	return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1786 }
1787 
1788 /*
1789  * a helper that copies the last step/codeword of a page (containing free oob)
1790  * into our local buffer
1791  */
copy_last_cw(struct qcom_nand_host * host,int page)1792 static int copy_last_cw(struct qcom_nand_host *host, int page)
1793 {
1794 	struct nand_chip *chip = &host->chip;
1795 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1796 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1797 	int size;
1798 	int ret;
1799 
1800 	clear_read_regs(nandc);
1801 
1802 	size = host->use_ecc ? host->cw_data : host->cw_size;
1803 
1804 	/* prepare a clean read buffer */
1805 	memset(nandc->data_buffer, 0xff, size);
1806 
1807 	set_address(host, host->cw_size * (ecc->steps - 1), page);
1808 	update_rw_regs(host, 1, true, ecc->steps - 1);
1809 
1810 	config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
1811 
1812 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1813 
1814 	ret = submit_descs(nandc);
1815 	if (ret)
1816 		dev_err(nandc->dev, "failed to copy last codeword\n");
1817 
1818 	return ret;
1819 }
1820 
qcom_nandc_is_boot_partition(struct qcom_nand_host * host,int page)1821 static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
1822 {
1823 	struct qcom_nand_boot_partition *boot_partition;
1824 	u32 start, end;
1825 	int i;
1826 
1827 	/*
1828 	 * Since the frequent access will be to the non-boot partitions like rootfs,
1829 	 * optimize the page check by:
1830 	 *
1831 	 * 1. Checking if the page lies after the last boot partition.
1832 	 * 2. Checking from the boot partition end.
1833 	 */
1834 
1835 	/* First check the last boot partition */
1836 	boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
1837 	start = boot_partition->page_offset;
1838 	end = start + boot_partition->page_size;
1839 
1840 	/* Page is after the last boot partition end. This is NOT a boot partition */
1841 	if (page > end)
1842 		return false;
1843 
1844 	/* Actually check if it's a boot partition */
1845 	if (page < end && page >= start)
1846 		return true;
1847 
1848 	/* Check the other boot partitions starting from the second-last partition */
1849 	for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
1850 		boot_partition = &host->boot_partitions[i];
1851 		start = boot_partition->page_offset;
1852 		end = start + boot_partition->page_size;
1853 
1854 		if (page < end && page >= start)
1855 			return true;
1856 	}
1857 
1858 	return false;
1859 }
1860 
qcom_nandc_codeword_fixup(struct qcom_nand_host * host,int page)1861 static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
1862 {
1863 	bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
1864 
1865 	/* Skip conf write if we are already in the correct mode */
1866 	if (codeword_fixup == host->codeword_fixup)
1867 		return;
1868 
1869 	host->codeword_fixup = codeword_fixup;
1870 
1871 	host->cw_data = codeword_fixup ? 512 : 516;
1872 	host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
1873 			    host->bbm_size - host->cw_data;
1874 
1875 	host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
1876 	host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
1877 		      host->cw_data << UD_SIZE_BYTES;
1878 
1879 	host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
1880 	host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
1881 	host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
1882 }
1883 
1884 /* implements ecc->read_page() */
qcom_nandc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1885 static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
1886 				int oob_required, int page)
1887 {
1888 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1889 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1890 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1891 	u8 *data_buf, *oob_buf = NULL;
1892 
1893 	if (host->nr_boot_partitions)
1894 		qcom_nandc_codeword_fixup(host, page);
1895 
1896 	nand_read_page_op(chip, page, 0, NULL, 0);
1897 	nandc->buf_count = 0;
1898 	nandc->buf_start = 0;
1899 	host->use_ecc = true;
1900 	clear_read_regs(nandc);
1901 	set_address(host, 0, page);
1902 	update_rw_regs(host, ecc->steps, true, 0);
1903 
1904 	data_buf = buf;
1905 	oob_buf = oob_required ? chip->oob_poi : NULL;
1906 
1907 	clear_bam_transaction(nandc);
1908 
1909 	return read_page_ecc(host, data_buf, oob_buf, page);
1910 }
1911 
1912 /* implements ecc->read_page_raw() */
qcom_nandc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1913 static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
1914 				    int oob_required, int page)
1915 {
1916 	struct mtd_info *mtd = nand_to_mtd(chip);
1917 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1918 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1919 	int cw, ret;
1920 	u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1921 
1922 	if (host->nr_boot_partitions)
1923 		qcom_nandc_codeword_fixup(host, page);
1924 
1925 	for (cw = 0; cw < ecc->steps; cw++) {
1926 		ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1927 					     page, cw);
1928 		if (ret)
1929 			return ret;
1930 
1931 		data_buf += host->cw_data;
1932 		oob_buf += ecc->bytes;
1933 	}
1934 
1935 	return 0;
1936 }
1937 
1938 /* implements ecc->read_oob() */
qcom_nandc_read_oob(struct nand_chip * chip,int page)1939 static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
1940 {
1941 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1942 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1943 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1944 
1945 	if (host->nr_boot_partitions)
1946 		qcom_nandc_codeword_fixup(host, page);
1947 
1948 	clear_read_regs(nandc);
1949 	clear_bam_transaction(nandc);
1950 
1951 	host->use_ecc = true;
1952 	set_address(host, 0, page);
1953 	update_rw_regs(host, ecc->steps, true, 0);
1954 
1955 	return read_page_ecc(host, NULL, chip->oob_poi, page);
1956 }
1957 
1958 /* implements ecc->write_page() */
qcom_nandc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1959 static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
1960 				 int oob_required, int page)
1961 {
1962 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
1963 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1964 	struct nand_ecc_ctrl *ecc = &chip->ecc;
1965 	u8 *data_buf, *oob_buf;
1966 	int i, ret;
1967 
1968 	if (host->nr_boot_partitions)
1969 		qcom_nandc_codeword_fixup(host, page);
1970 
1971 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1972 
1973 	set_address(host, 0, page);
1974 	nandc->buf_count = 0;
1975 	nandc->buf_start = 0;
1976 	clear_read_regs(nandc);
1977 	clear_bam_transaction(nandc);
1978 
1979 	data_buf = (u8 *)buf;
1980 	oob_buf = chip->oob_poi;
1981 
1982 	host->use_ecc = true;
1983 	update_rw_regs(host, ecc->steps, false, 0);
1984 	config_nand_page_write(chip);
1985 
1986 	for (i = 0; i < ecc->steps; i++) {
1987 		int data_size, oob_size;
1988 
1989 		if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
1990 			data_size = ecc->size - ((ecc->steps - 1) << 2);
1991 			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1992 				   host->spare_bytes;
1993 		} else {
1994 			data_size = host->cw_data;
1995 			oob_size = ecc->bytes;
1996 		}
1997 
1998 		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1999 			       i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2000 
2001 		/*
2002 		 * when ECC is enabled, we don't really need to write anything
2003 		 * to oob for the first n - 1 codewords since these oob regions
2004 		 * just contain ECC bytes that's written by the controller
2005 		 * itself. For the last codeword, we skip the bbm positions and
2006 		 * write to the free oob area.
2007 		 */
2008 		if (qcom_nandc_is_last_cw(ecc, i)) {
2009 			oob_buf += host->bbm_size;
2010 
2011 			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2012 				       oob_buf, oob_size, 0);
2013 		}
2014 
2015 		config_nand_cw_write(chip);
2016 
2017 		data_buf += data_size;
2018 		oob_buf += oob_size;
2019 	}
2020 
2021 	ret = submit_descs(nandc);
2022 	if (ret) {
2023 		dev_err(nandc->dev, "failure to write page\n");
2024 		return ret;
2025 	}
2026 
2027 	return nand_prog_page_end_op(chip);
2028 }
2029 
2030 /* implements ecc->write_page_raw() */
qcom_nandc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)2031 static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2032 				     const u8 *buf, int oob_required,
2033 				     int page)
2034 {
2035 	struct mtd_info *mtd = nand_to_mtd(chip);
2036 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2037 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2038 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2039 	u8 *data_buf, *oob_buf;
2040 	int i, ret;
2041 
2042 	if (host->nr_boot_partitions)
2043 		qcom_nandc_codeword_fixup(host, page);
2044 
2045 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2046 	clear_read_regs(nandc);
2047 	clear_bam_transaction(nandc);
2048 
2049 	data_buf = (u8 *)buf;
2050 	oob_buf = chip->oob_poi;
2051 
2052 	host->use_ecc = false;
2053 	update_rw_regs(host, ecc->steps, false, 0);
2054 	config_nand_page_write(chip);
2055 
2056 	for (i = 0; i < ecc->steps; i++) {
2057 		int data_size1, data_size2, oob_size1, oob_size2;
2058 		int reg_off = FLASH_BUF_ACC;
2059 
2060 		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2061 		oob_size1 = host->bbm_size;
2062 
2063 		if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
2064 			data_size2 = ecc->size - data_size1 -
2065 				     ((ecc->steps - 1) << 2);
2066 			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2067 				    host->spare_bytes;
2068 		} else {
2069 			data_size2 = host->cw_data - data_size1;
2070 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2071 		}
2072 
2073 		write_data_dma(nandc, reg_off, data_buf, data_size1,
2074 			       NAND_BAM_NO_EOT);
2075 		reg_off += data_size1;
2076 		data_buf += data_size1;
2077 
2078 		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2079 			       NAND_BAM_NO_EOT);
2080 		reg_off += oob_size1;
2081 		oob_buf += oob_size1;
2082 
2083 		write_data_dma(nandc, reg_off, data_buf, data_size2,
2084 			       NAND_BAM_NO_EOT);
2085 		reg_off += data_size2;
2086 		data_buf += data_size2;
2087 
2088 		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2089 		oob_buf += oob_size2;
2090 
2091 		config_nand_cw_write(chip);
2092 	}
2093 
2094 	ret = submit_descs(nandc);
2095 	if (ret) {
2096 		dev_err(nandc->dev, "failure to write raw page\n");
2097 		return ret;
2098 	}
2099 
2100 	return nand_prog_page_end_op(chip);
2101 }
2102 
2103 /*
2104  * implements ecc->write_oob()
2105  *
2106  * the NAND controller cannot write only data or only OOB within a codeword
2107  * since ECC is calculated for the combined codeword. So update the OOB from
2108  * chip->oob_poi, and pad the data area with OxFF before writing.
2109  */
qcom_nandc_write_oob(struct nand_chip * chip,int page)2110 static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2111 {
2112 	struct mtd_info *mtd = nand_to_mtd(chip);
2113 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2114 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2115 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2116 	u8 *oob = chip->oob_poi;
2117 	int data_size, oob_size;
2118 	int ret;
2119 
2120 	if (host->nr_boot_partitions)
2121 		qcom_nandc_codeword_fixup(host, page);
2122 
2123 	host->use_ecc = true;
2124 	clear_bam_transaction(nandc);
2125 
2126 	/* calculate the data and oob size for the last codeword/step */
2127 	data_size = ecc->size - ((ecc->steps - 1) << 2);
2128 	oob_size = mtd->oobavail;
2129 
2130 	memset(nandc->data_buffer, 0xff, host->cw_data);
2131 	/* override new oob content to last codeword */
2132 	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2133 				    0, mtd->oobavail);
2134 
2135 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2136 	update_rw_regs(host, 1, false, 0);
2137 
2138 	config_nand_page_write(chip);
2139 	write_data_dma(nandc, FLASH_BUF_ACC,
2140 		       nandc->data_buffer, data_size + oob_size, 0);
2141 	config_nand_cw_write(chip);
2142 
2143 	ret = submit_descs(nandc);
2144 	if (ret) {
2145 		dev_err(nandc->dev, "failure to write oob\n");
2146 		return ret;
2147 	}
2148 
2149 	return nand_prog_page_end_op(chip);
2150 }
2151 
qcom_nandc_block_bad(struct nand_chip * chip,loff_t ofs)2152 static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2153 {
2154 	struct mtd_info *mtd = nand_to_mtd(chip);
2155 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2156 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2157 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2158 	int page, ret, bbpos, bad = 0;
2159 
2160 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2161 
2162 	/*
2163 	 * configure registers for a raw sub page read, the address is set to
2164 	 * the beginning of the last codeword, we don't care about reading ecc
2165 	 * portion of oob. we just want the first few bytes from this codeword
2166 	 * that contains the BBM
2167 	 */
2168 	host->use_ecc = false;
2169 
2170 	clear_bam_transaction(nandc);
2171 	ret = copy_last_cw(host, page);
2172 	if (ret)
2173 		goto err;
2174 
2175 	if (check_flash_errors(host, 1)) {
2176 		dev_warn(nandc->dev, "error when trying to read BBM\n");
2177 		goto err;
2178 	}
2179 
2180 	bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2181 
2182 	bad = nandc->data_buffer[bbpos] != 0xff;
2183 
2184 	if (chip->options & NAND_BUSWIDTH_16)
2185 		bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2186 err:
2187 	return bad;
2188 }
2189 
qcom_nandc_block_markbad(struct nand_chip * chip,loff_t ofs)2190 static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2191 {
2192 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2193 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2194 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2195 	int page, ret;
2196 
2197 	clear_read_regs(nandc);
2198 	clear_bam_transaction(nandc);
2199 
2200 	/*
2201 	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2202 	 * we don't care about the rest of the content in the codeword since
2203 	 * we aren't going to use this block again
2204 	 */
2205 	memset(nandc->data_buffer, 0x00, host->cw_size);
2206 
2207 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2208 
2209 	/* prepare write */
2210 	host->use_ecc = false;
2211 	set_address(host, host->cw_size * (ecc->steps - 1), page);
2212 	update_rw_regs(host, 1, false, ecc->steps - 1);
2213 
2214 	config_nand_page_write(chip);
2215 	write_data_dma(nandc, FLASH_BUF_ACC,
2216 		       nandc->data_buffer, host->cw_size, 0);
2217 	config_nand_cw_write(chip);
2218 
2219 	ret = submit_descs(nandc);
2220 	if (ret) {
2221 		dev_err(nandc->dev, "failure to update BBM\n");
2222 		return ret;
2223 	}
2224 
2225 	return nand_prog_page_end_op(chip);
2226 }
2227 
2228 /*
2229  * NAND controller page layout info
2230  *
2231  * Layout with ECC enabled:
2232  *
2233  * |----------------------|  |---------------------------------|
2234  * |           xx.......yy|  |             *********xx.......yy|
2235  * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
2236  * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
2237  * |           xx.......yy|  |             *********xx.......yy|
2238  * |----------------------|  |---------------------------------|
2239  *     codeword 1,2..n-1                  codeword n
2240  *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
2241  *
2242  * n = Number of codewords in the page
2243  * . = ECC bytes
2244  * * = Spare/free bytes
2245  * x = Unused byte(s)
2246  * y = Reserved byte(s)
2247  *
2248  * 2K page: n = 4, spare = 16 bytes
2249  * 4K page: n = 8, spare = 32 bytes
2250  * 8K page: n = 16, spare = 64 bytes
2251  *
2252  * the qcom nand controller operates at a sub page/codeword level. each
2253  * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2254  * the number of ECC bytes vary based on the ECC strength and the bus width.
2255  *
2256  * the first n - 1 codewords contains 516 bytes of user data, the remaining
2257  * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2258  * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2259  *
2260  * When we access a page with ECC enabled, the reserved bytes(s) are not
2261  * accessible at all. When reading, we fill up these unreadable positions
2262  * with 0xffs. When writing, the controller skips writing the inaccessible
2263  * bytes.
2264  *
2265  * Layout with ECC disabled:
2266  *
2267  * |------------------------------|  |---------------------------------------|
2268  * |         yy          xx.......|  |         bb          *********xx.......|
2269  * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
2270  * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
2271  * |         yy          xx.......|  |         bb          *********xx.......|
2272  * |------------------------------|  |---------------------------------------|
2273  *         codeword 1,2..n-1                        codeword n
2274  *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
2275  *
2276  * n = Number of codewords in the page
2277  * . = ECC bytes
2278  * * = Spare/free bytes
2279  * x = Unused byte(s)
2280  * y = Dummy Bad Bock byte(s)
2281  * b = Real Bad Block byte(s)
2282  * size1/size2 = function of codeword size and 'n'
2283  *
2284  * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2285  * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2286  * Block Markers. In the last codeword, this position contains the real BBM
2287  *
2288  * In order to have a consistent layout between RAW and ECC modes, we assume
2289  * the following OOB layout arrangement:
2290  *
2291  * |-----------|  |--------------------|
2292  * |yyxx.......|  |bb*********xx.......|
2293  * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
2294  * |yyxx.......|  |bb*********xx.......|
2295  * |yyxx.......|  |bb*********xx.......|
2296  * |-----------|  |--------------------|
2297  *  first n - 1       nth OOB region
2298  *  OOB regions
2299  *
2300  * n = Number of codewords in the page
2301  * . = ECC bytes
2302  * * = FREE OOB bytes
2303  * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2304  * x = Unused byte(s)
2305  * b = Real bad block byte(s) (inaccessible when ECC enabled)
2306  *
2307  * This layout is read as is when ECC is disabled. When ECC is enabled, the
2308  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2309  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
2310  * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2311  * the sum of the three).
2312  */
qcom_nand_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2313 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2314 				   struct mtd_oob_region *oobregion)
2315 {
2316 	struct nand_chip *chip = mtd_to_nand(mtd);
2317 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2318 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2319 
2320 	if (section > 1)
2321 		return -ERANGE;
2322 
2323 	if (!section) {
2324 		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2325 				    host->bbm_size;
2326 		oobregion->offset = 0;
2327 	} else {
2328 		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2329 		oobregion->offset = mtd->oobsize - oobregion->length;
2330 	}
2331 
2332 	return 0;
2333 }
2334 
qcom_nand_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2335 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2336 				    struct mtd_oob_region *oobregion)
2337 {
2338 	struct nand_chip *chip = mtd_to_nand(mtd);
2339 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2340 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2341 
2342 	if (section)
2343 		return -ERANGE;
2344 
2345 	oobregion->length = ecc->steps * 4;
2346 	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2347 
2348 	return 0;
2349 }
2350 
2351 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2352 	.ecc = qcom_nand_ooblayout_ecc,
2353 	.free = qcom_nand_ooblayout_free,
2354 };
2355 
2356 static int
qcom_nandc_calc_ecc_bytes(int step_size,int strength)2357 qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2358 {
2359 	return strength == 4 ? 12 : 16;
2360 }
2361 
2362 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2363 		     NANDC_STEP_SIZE, 4, 8);
2364 
qcom_nand_attach_chip(struct nand_chip * chip)2365 static int qcom_nand_attach_chip(struct nand_chip *chip)
2366 {
2367 	struct mtd_info *mtd = nand_to_mtd(chip);
2368 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2369 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2370 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2371 	int cwperpage, bad_block_byte, ret;
2372 	bool wide_bus;
2373 	int ecc_mode = 1;
2374 
2375 	/* controller only supports 512 bytes data steps */
2376 	ecc->size = NANDC_STEP_SIZE;
2377 	wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2378 	cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2379 
2380 	/*
2381 	 * Each CW has 4 available OOB bytes which will be protected with ECC
2382 	 * so remaining bytes can be used for ECC.
2383 	 */
2384 	ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2385 				   mtd->oobsize - (cwperpage * 4));
2386 	if (ret) {
2387 		dev_err(nandc->dev, "No valid ECC settings possible\n");
2388 		return ret;
2389 	}
2390 
2391 	if (ecc->strength >= 8) {
2392 		/* 8 bit ECC defaults to BCH ECC on all platforms */
2393 		host->bch_enabled = true;
2394 		ecc_mode = 1;
2395 
2396 		if (wide_bus) {
2397 			host->ecc_bytes_hw = 14;
2398 			host->spare_bytes = 0;
2399 			host->bbm_size = 2;
2400 		} else {
2401 			host->ecc_bytes_hw = 13;
2402 			host->spare_bytes = 2;
2403 			host->bbm_size = 1;
2404 		}
2405 	} else {
2406 		/*
2407 		 * if the controller supports BCH for 4 bit ECC, the controller
2408 		 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2409 		 * always 10 bytes
2410 		 */
2411 		if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2412 			/* BCH */
2413 			host->bch_enabled = true;
2414 			ecc_mode = 0;
2415 
2416 			if (wide_bus) {
2417 				host->ecc_bytes_hw = 8;
2418 				host->spare_bytes = 2;
2419 				host->bbm_size = 2;
2420 			} else {
2421 				host->ecc_bytes_hw = 7;
2422 				host->spare_bytes = 4;
2423 				host->bbm_size = 1;
2424 			}
2425 		} else {
2426 			/* RS */
2427 			host->ecc_bytes_hw = 10;
2428 
2429 			if (wide_bus) {
2430 				host->spare_bytes = 0;
2431 				host->bbm_size = 2;
2432 			} else {
2433 				host->spare_bytes = 1;
2434 				host->bbm_size = 1;
2435 			}
2436 		}
2437 	}
2438 
2439 	/*
2440 	 * we consider ecc->bytes as the sum of all the non-data content in a
2441 	 * step. It gives us a clean representation of the oob area (even if
2442 	 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2443 	 * ECC and 12 bytes for 4 bit ECC
2444 	 */
2445 	ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2446 
2447 	ecc->read_page		= qcom_nandc_read_page;
2448 	ecc->read_page_raw	= qcom_nandc_read_page_raw;
2449 	ecc->read_oob		= qcom_nandc_read_oob;
2450 	ecc->write_page		= qcom_nandc_write_page;
2451 	ecc->write_page_raw	= qcom_nandc_write_page_raw;
2452 	ecc->write_oob		= qcom_nandc_write_oob;
2453 
2454 	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2455 
2456 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2457 	/* Free the initially allocated BAM transaction for reading the ONFI params */
2458 	if (nandc->props->is_bam)
2459 		free_bam_transaction(nandc);
2460 
2461 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2462 				     cwperpage);
2463 
2464 	/* Now allocate the BAM transaction based on updated max_cwperpage */
2465 	if (nandc->props->is_bam) {
2466 		nandc->bam_txn = alloc_bam_transaction(nandc);
2467 		if (!nandc->bam_txn) {
2468 			dev_err(nandc->dev,
2469 				"failed to allocate bam transaction\n");
2470 			return -ENOMEM;
2471 		}
2472 	}
2473 
2474 	/*
2475 	 * DATA_UD_BYTES varies based on whether the read/write command protects
2476 	 * spare data with ECC too. We protect spare data by default, so we set
2477 	 * it to main + spare data, which are 512 and 4 bytes respectively.
2478 	 */
2479 	host->cw_data = 516;
2480 
2481 	/*
2482 	 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2483 	 * for 8 bit ECC
2484 	 */
2485 	host->cw_size = host->cw_data + ecc->bytes;
2486 	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2487 
2488 	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2489 				| host->cw_data << UD_SIZE_BYTES
2490 				| 0 << DISABLE_STATUS_AFTER_WRITE
2491 				| 5 << NUM_ADDR_CYCLES
2492 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2493 				| 0 << STATUS_BFR_READ
2494 				| 1 << SET_RD_MODE_AFTER_STATUS
2495 				| host->spare_bytes << SPARE_SIZE_BYTES;
2496 
2497 	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2498 				| 0 <<  CS_ACTIVE_BSY
2499 				| bad_block_byte << BAD_BLOCK_BYTE_NUM
2500 				| 0 << BAD_BLOCK_IN_SPARE_AREA
2501 				| 2 << WR_RD_BSY_GAP
2502 				| wide_bus << WIDE_FLASH
2503 				| host->bch_enabled << ENABLE_BCH_ECC;
2504 
2505 	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2506 				| host->cw_size << UD_SIZE_BYTES
2507 				| 5 << NUM_ADDR_CYCLES
2508 				| 0 << SPARE_SIZE_BYTES;
2509 
2510 	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2511 				| 0 << CS_ACTIVE_BSY
2512 				| 17 << BAD_BLOCK_BYTE_NUM
2513 				| 1 << BAD_BLOCK_IN_SPARE_AREA
2514 				| 2 << WR_RD_BSY_GAP
2515 				| wide_bus << WIDE_FLASH
2516 				| 1 << DEV0_CFG1_ECC_DISABLE;
2517 
2518 	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2519 				| 0 << ECC_SW_RESET
2520 				| host->cw_data << ECC_NUM_DATA_BYTES
2521 				| 1 << ECC_FORCE_CLK_OPEN
2522 				| ecc_mode << ECC_MODE
2523 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2524 
2525 	if (!nandc->props->qpic_v2)
2526 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2527 
2528 	host->clrflashstatus = FS_READY_BSY_N;
2529 	host->clrreadstatus = 0xc0;
2530 	nandc->regs->erased_cw_detect_cfg_clr =
2531 		cpu_to_le32(CLR_ERASED_PAGE_DET);
2532 	nandc->regs->erased_cw_detect_cfg_set =
2533 		cpu_to_le32(SET_ERASED_PAGE_DET);
2534 
2535 	dev_dbg(nandc->dev,
2536 		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2537 		host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2538 		host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2539 		cwperpage);
2540 
2541 	return 0;
2542 }
2543 
qcom_op_cmd_mapping(struct nand_chip * chip,u8 opcode,struct qcom_op * q_op)2544 static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
2545 			       struct qcom_op *q_op)
2546 {
2547 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2548 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2549 	int cmd;
2550 
2551 	switch (opcode) {
2552 	case NAND_CMD_RESET:
2553 		cmd = OP_RESET_DEVICE;
2554 		break;
2555 	case NAND_CMD_READID:
2556 		cmd = OP_FETCH_ID;
2557 		break;
2558 	case NAND_CMD_PARAM:
2559 		if (nandc->props->qpic_v2)
2560 			cmd = OP_PAGE_READ_ONFI_READ;
2561 		else
2562 			cmd = OP_PAGE_READ;
2563 		break;
2564 	case NAND_CMD_ERASE1:
2565 	case NAND_CMD_ERASE2:
2566 		cmd = OP_BLOCK_ERASE;
2567 		break;
2568 	case NAND_CMD_STATUS:
2569 		cmd = OP_CHECK_STATUS;
2570 		break;
2571 	case NAND_CMD_PAGEPROG:
2572 		cmd = OP_PROGRAM_PAGE;
2573 		q_op->flag = OP_PROGRAM_PAGE;
2574 		nandc->exec_opwrite = true;
2575 		break;
2576 	case NAND_CMD_READ0:
2577 	case NAND_CMD_READSTART:
2578 		if (host->use_ecc)
2579 			cmd = OP_PAGE_READ_WITH_ECC;
2580 		else
2581 			cmd = OP_PAGE_READ;
2582 		break;
2583 	default:
2584 		dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
2585 		return -EOPNOTSUPP;
2586 	}
2587 
2588 	return cmd;
2589 }
2590 
2591 /* NAND framework ->exec_op() hooks and related helpers */
qcom_parse_instructions(struct nand_chip * chip,const struct nand_subop * subop,struct qcom_op * q_op)2592 static int qcom_parse_instructions(struct nand_chip *chip,
2593 				    const struct nand_subop *subop,
2594 				    struct qcom_op *q_op)
2595 {
2596 	const struct nand_op_instr *instr = NULL;
2597 	unsigned int op_id;
2598 	int i, ret;
2599 
2600 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2601 		unsigned int offset, naddrs;
2602 		const u8 *addrs;
2603 
2604 		instr = &subop->instrs[op_id];
2605 
2606 		switch (instr->type) {
2607 		case NAND_OP_CMD_INSTR:
2608 			ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
2609 			if (ret < 0)
2610 				return ret;
2611 
2612 			q_op->cmd_reg = ret;
2613 			q_op->rdy_delay_ns = instr->delay_ns;
2614 			break;
2615 
2616 		case NAND_OP_ADDR_INSTR:
2617 			offset = nand_subop_get_addr_start_off(subop, op_id);
2618 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2619 			addrs = &instr->ctx.addr.addrs[offset];
2620 
2621 			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
2622 				q_op->addr1_reg |= addrs[i] << (i * 8);
2623 
2624 			if (naddrs > 4)
2625 				q_op->addr2_reg |= addrs[4];
2626 
2627 			q_op->rdy_delay_ns = instr->delay_ns;
2628 			break;
2629 
2630 		case NAND_OP_DATA_IN_INSTR:
2631 			q_op->data_instr = instr;
2632 			q_op->data_instr_idx = op_id;
2633 			q_op->rdy_delay_ns = instr->delay_ns;
2634 			fallthrough;
2635 		case NAND_OP_DATA_OUT_INSTR:
2636 			q_op->rdy_delay_ns = instr->delay_ns;
2637 			break;
2638 
2639 		case NAND_OP_WAITRDY_INSTR:
2640 			q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
2641 			q_op->rdy_delay_ns = instr->delay_ns;
2642 			break;
2643 		}
2644 	}
2645 
2646 	return 0;
2647 }
2648 
qcom_delay_ns(unsigned int ns)2649 static void qcom_delay_ns(unsigned int ns)
2650 {
2651 	if (!ns)
2652 		return;
2653 
2654 	if (ns < 10000)
2655 		ndelay(ns);
2656 	else
2657 		udelay(DIV_ROUND_UP(ns, 1000));
2658 }
2659 
qcom_wait_rdy_poll(struct nand_chip * chip,unsigned int time_ms)2660 static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
2661 {
2662 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2663 	unsigned long start = jiffies + msecs_to_jiffies(time_ms);
2664 	u32 flash;
2665 
2666 	nandc_read_buffer_sync(nandc, true);
2667 
2668 	do {
2669 		flash = le32_to_cpu(nandc->reg_read_buf[0]);
2670 		if (flash & FS_READY_BSY_N)
2671 			return 0;
2672 		cpu_relax();
2673 	} while (time_after(start, jiffies));
2674 
2675 	dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
2676 
2677 	return -ETIMEDOUT;
2678 }
2679 
qcom_read_status_exec(struct nand_chip * chip,const struct nand_subop * subop)2680 static int qcom_read_status_exec(struct nand_chip *chip,
2681 				 const struct nand_subop *subop)
2682 {
2683 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2684 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2685 	struct nand_ecc_ctrl *ecc = &chip->ecc;
2686 	struct qcom_op q_op = {};
2687 	const struct nand_op_instr *instr = NULL;
2688 	unsigned int op_id = 0;
2689 	unsigned int len = 0;
2690 	int ret, num_cw, i;
2691 	u32 flash_status;
2692 
2693 	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2694 
2695 	ret = qcom_parse_instructions(chip, subop, &q_op);
2696 	if (ret)
2697 		return ret;
2698 
2699 	num_cw = nandc->exec_opwrite ? ecc->steps : 1;
2700 	nandc->exec_opwrite = false;
2701 
2702 	nandc->buf_count = 0;
2703 	nandc->buf_start = 0;
2704 	host->use_ecc = false;
2705 
2706 	clear_read_regs(nandc);
2707 	clear_bam_transaction(nandc);
2708 
2709 	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2710 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2711 
2712 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
2713 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2714 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
2715 
2716 	ret = submit_descs(nandc);
2717 	if (ret) {
2718 		dev_err(nandc->dev, "failure in submitting status descriptor\n");
2719 		goto err_out;
2720 	}
2721 
2722 	nandc_read_buffer_sync(nandc, true);
2723 
2724 	for (i = 0; i < num_cw; i++) {
2725 		flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
2726 
2727 		if (flash_status & FS_MPU_ERR)
2728 			host->status &= ~NAND_STATUS_WP;
2729 
2730 		if (flash_status & FS_OP_ERR ||
2731 		    (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
2732 			host->status |= NAND_STATUS_FAIL;
2733 	}
2734 
2735 	flash_status = host->status;
2736 	instr = q_op.data_instr;
2737 	op_id = q_op.data_instr_idx;
2738 	len = nand_subop_get_data_len(subop, op_id);
2739 	memcpy(instr->ctx.data.buf.in, &flash_status, len);
2740 
2741 err_out:
2742 	return ret;
2743 }
2744 
qcom_read_id_type_exec(struct nand_chip * chip,const struct nand_subop * subop)2745 static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
2746 {
2747 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2748 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2749 	struct qcom_op q_op = {};
2750 	const struct nand_op_instr *instr = NULL;
2751 	unsigned int op_id = 0;
2752 	unsigned int len = 0;
2753 	int ret;
2754 
2755 	ret = qcom_parse_instructions(chip, subop, &q_op);
2756 	if (ret)
2757 		return ret;
2758 
2759 	nandc->buf_count = 0;
2760 	nandc->buf_start = 0;
2761 	host->use_ecc = false;
2762 
2763 	clear_read_regs(nandc);
2764 	clear_bam_transaction(nandc);
2765 
2766 	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2767 	nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
2768 	nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
2769 	nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
2770 		      nandc->props->is_bam ? 0 : DM_EN);
2771 
2772 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2773 
2774 	write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
2775 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2776 
2777 	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
2778 
2779 	ret = submit_descs(nandc);
2780 	if (ret) {
2781 		dev_err(nandc->dev, "failure in submitting read id descriptor\n");
2782 		goto err_out;
2783 	}
2784 
2785 	instr = q_op.data_instr;
2786 	op_id = q_op.data_instr_idx;
2787 	len = nand_subop_get_data_len(subop, op_id);
2788 
2789 	nandc_read_buffer_sync(nandc, true);
2790 	memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
2791 
2792 err_out:
2793 	return ret;
2794 }
2795 
qcom_misc_cmd_type_exec(struct nand_chip * chip,const struct nand_subop * subop)2796 static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
2797 {
2798 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2799 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2800 	struct qcom_op q_op = {};
2801 	int ret;
2802 	int instrs = 1;
2803 
2804 	ret = qcom_parse_instructions(chip, subop, &q_op);
2805 	if (ret)
2806 		return ret;
2807 
2808 	if (q_op.flag == OP_PROGRAM_PAGE) {
2809 		goto wait_rdy;
2810 	} else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
2811 		q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
2812 		nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
2813 		nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
2814 		nandc_set_reg(chip, NAND_DEV0_CFG0,
2815 			      host->cfg0_raw & ~(7 << CW_PER_PAGE));
2816 		nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
2817 		instrs = 3;
2818 	} else if (q_op.cmd_reg != OP_RESET_DEVICE) {
2819 		return 0;
2820 	}
2821 
2822 	nandc->buf_count = 0;
2823 	nandc->buf_start = 0;
2824 	host->use_ecc = false;
2825 
2826 	clear_read_regs(nandc);
2827 	clear_bam_transaction(nandc);
2828 
2829 	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2830 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2831 
2832 	write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
2833 	if (q_op.cmd_reg == OP_BLOCK_ERASE)
2834 		write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
2835 
2836 	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
2837 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
2838 
2839 	ret = submit_descs(nandc);
2840 	if (ret) {
2841 		dev_err(nandc->dev, "failure in submitting misc descriptor\n");
2842 		goto err_out;
2843 	}
2844 
2845 wait_rdy:
2846 	qcom_delay_ns(q_op.rdy_delay_ns);
2847 	ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
2848 
2849 err_out:
2850 	return ret;
2851 }
2852 
qcom_param_page_type_exec(struct nand_chip * chip,const struct nand_subop * subop)2853 static int qcom_param_page_type_exec(struct nand_chip *chip,  const struct nand_subop *subop)
2854 {
2855 	struct qcom_nand_host *host = to_qcom_nand_host(chip);
2856 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2857 	struct qcom_op q_op = {};
2858 	const struct nand_op_instr *instr = NULL;
2859 	unsigned int op_id = 0;
2860 	unsigned int len = 0;
2861 	int ret;
2862 
2863 	ret = qcom_parse_instructions(chip, subop, &q_op);
2864 	if (ret)
2865 		return ret;
2866 
2867 	q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
2868 
2869 	nandc->buf_count = 0;
2870 	nandc->buf_start = 0;
2871 	host->use_ecc = false;
2872 	clear_read_regs(nandc);
2873 	clear_bam_transaction(nandc);
2874 
2875 	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
2876 
2877 	nandc_set_reg(chip, NAND_ADDR0, 0);
2878 	nandc_set_reg(chip, NAND_ADDR1, 0);
2879 	nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
2880 					| 512 << UD_SIZE_BYTES
2881 					| 5 << NUM_ADDR_CYCLES
2882 					| 0 << SPARE_SIZE_BYTES);
2883 	nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
2884 					| 0 << CS_ACTIVE_BSY
2885 					| 17 << BAD_BLOCK_BYTE_NUM
2886 					| 1 << BAD_BLOCK_IN_SPARE_AREA
2887 					| 2 << WR_RD_BSY_GAP
2888 					| 0 << WIDE_FLASH
2889 					| 1 << DEV0_CFG1_ECC_DISABLE);
2890 	if (!nandc->props->qpic_v2)
2891 		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
2892 
2893 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
2894 	if (!nandc->props->qpic_v2) {
2895 		nandc_set_reg(chip, NAND_DEV_CMD_VLD,
2896 			      (nandc->vld & ~READ_START_VLD));
2897 		nandc_set_reg(chip, NAND_DEV_CMD1,
2898 			      (nandc->cmd1 & ~(0xFF << READ_ADDR))
2899 			      | NAND_CMD_PARAM << READ_ADDR);
2900 	}
2901 
2902 	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
2903 
2904 	if (!nandc->props->qpic_v2) {
2905 		nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
2906 		nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
2907 	}
2908 
2909 	instr = q_op.data_instr;
2910 	op_id = q_op.data_instr_idx;
2911 	len = nand_subop_get_data_len(subop, op_id);
2912 
2913 	nandc_set_read_loc(chip, 0, 0, 0, len, 1);
2914 
2915 	if (!nandc->props->qpic_v2) {
2916 		write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
2917 		write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
2918 	}
2919 
2920 	nandc->buf_count = len;
2921 	memset(nandc->data_buffer, 0xff, nandc->buf_count);
2922 
2923 	config_nand_single_cw_page_read(chip, false, 0);
2924 
2925 	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
2926 		      nandc->buf_count, 0);
2927 
2928 	/* restore CMD1 and VLD regs */
2929 	if (!nandc->props->qpic_v2) {
2930 		write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
2931 		write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
2932 	}
2933 
2934 	ret = submit_descs(nandc);
2935 	if (ret) {
2936 		dev_err(nandc->dev, "failure in submitting param page descriptor\n");
2937 		goto err_out;
2938 	}
2939 
2940 	ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
2941 	if (ret)
2942 		goto err_out;
2943 
2944 	memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
2945 
2946 err_out:
2947 	return ret;
2948 }
2949 
2950 static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
2951 		NAND_OP_PARSER_PATTERN(
2952 			qcom_read_id_type_exec,
2953 			NAND_OP_PARSER_PAT_CMD_ELEM(false),
2954 			NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
2955 			NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
2956 		NAND_OP_PARSER_PATTERN(
2957 			qcom_read_status_exec,
2958 			NAND_OP_PARSER_PAT_CMD_ELEM(false),
2959 			NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
2960 		NAND_OP_PARSER_PATTERN(
2961 			qcom_param_page_type_exec,
2962 			NAND_OP_PARSER_PAT_CMD_ELEM(false),
2963 			NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
2964 			NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
2965 			NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
2966 		NAND_OP_PARSER_PATTERN(
2967 			qcom_misc_cmd_type_exec,
2968 			NAND_OP_PARSER_PAT_CMD_ELEM(false),
2969 			NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
2970 			NAND_OP_PARSER_PAT_CMD_ELEM(true),
2971 			NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2972 		);
2973 
qcom_check_op(struct nand_chip * chip,const struct nand_operation * op)2974 static int qcom_check_op(struct nand_chip *chip,
2975 			 const struct nand_operation *op)
2976 {
2977 	const struct nand_op_instr *instr;
2978 	int op_id;
2979 
2980 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
2981 		instr = &op->instrs[op_id];
2982 
2983 		switch (instr->type) {
2984 		case NAND_OP_CMD_INSTR:
2985 			if (instr->ctx.cmd.opcode != NAND_CMD_RESET  &&
2986 			    instr->ctx.cmd.opcode != NAND_CMD_READID &&
2987 			    instr->ctx.cmd.opcode != NAND_CMD_PARAM  &&
2988 			    instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
2989 			    instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
2990 			    instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
2991 			    instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
2992 			    instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
2993 			    instr->ctx.cmd.opcode != NAND_CMD_READSTART)
2994 				return -EOPNOTSUPP;
2995 			break;
2996 		default:
2997 			break;
2998 		}
2999 	}
3000 
3001 	return 0;
3002 }
3003 
qcom_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)3004 static int qcom_nand_exec_op(struct nand_chip *chip,
3005 			     const struct nand_operation *op, bool check_only)
3006 {
3007 	if (check_only)
3008 		return qcom_check_op(chip, op);
3009 
3010 	return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
3011 }
3012 
3013 static const struct nand_controller_ops qcom_nandc_ops = {
3014 	.attach_chip = qcom_nand_attach_chip,
3015 	.exec_op = qcom_nand_exec_op,
3016 };
3017 
qcom_nandc_unalloc(struct qcom_nand_controller * nandc)3018 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
3019 {
3020 	if (nandc->props->is_bam) {
3021 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
3022 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
3023 					 MAX_REG_RD *
3024 					 sizeof(*nandc->reg_read_buf),
3025 					 DMA_FROM_DEVICE);
3026 
3027 		if (nandc->tx_chan)
3028 			dma_release_channel(nandc->tx_chan);
3029 
3030 		if (nandc->rx_chan)
3031 			dma_release_channel(nandc->rx_chan);
3032 
3033 		if (nandc->cmd_chan)
3034 			dma_release_channel(nandc->cmd_chan);
3035 	} else {
3036 		if (nandc->chan)
3037 			dma_release_channel(nandc->chan);
3038 	}
3039 }
3040 
qcom_nandc_alloc(struct qcom_nand_controller * nandc)3041 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
3042 {
3043 	int ret;
3044 
3045 	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
3046 	if (ret) {
3047 		dev_err(nandc->dev, "failed to set DMA mask\n");
3048 		return ret;
3049 	}
3050 
3051 	/*
3052 	 * we use the internal buffer for reading ONFI params, reading small
3053 	 * data like ID and status, and preforming read-copy-write operations
3054 	 * when writing to a codeword partially. 532 is the maximum possible
3055 	 * size of a codeword for our nand controller
3056 	 */
3057 	nandc->buf_size = 532;
3058 
3059 	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
3060 	if (!nandc->data_buffer)
3061 		return -ENOMEM;
3062 
3063 	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
3064 	if (!nandc->regs)
3065 		return -ENOMEM;
3066 
3067 	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
3068 					   sizeof(*nandc->reg_read_buf),
3069 					   GFP_KERNEL);
3070 	if (!nandc->reg_read_buf)
3071 		return -ENOMEM;
3072 
3073 	if (nandc->props->is_bam) {
3074 		nandc->reg_read_dma =
3075 			dma_map_single(nandc->dev, nandc->reg_read_buf,
3076 				       MAX_REG_RD *
3077 				       sizeof(*nandc->reg_read_buf),
3078 				       DMA_FROM_DEVICE);
3079 		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
3080 			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
3081 			return -EIO;
3082 		}
3083 
3084 		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
3085 		if (IS_ERR(nandc->tx_chan)) {
3086 			ret = PTR_ERR(nandc->tx_chan);
3087 			nandc->tx_chan = NULL;
3088 			dev_err_probe(nandc->dev, ret,
3089 				      "tx DMA channel request failed\n");
3090 			goto unalloc;
3091 		}
3092 
3093 		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
3094 		if (IS_ERR(nandc->rx_chan)) {
3095 			ret = PTR_ERR(nandc->rx_chan);
3096 			nandc->rx_chan = NULL;
3097 			dev_err_probe(nandc->dev, ret,
3098 				      "rx DMA channel request failed\n");
3099 			goto unalloc;
3100 		}
3101 
3102 		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
3103 		if (IS_ERR(nandc->cmd_chan)) {
3104 			ret = PTR_ERR(nandc->cmd_chan);
3105 			nandc->cmd_chan = NULL;
3106 			dev_err_probe(nandc->dev, ret,
3107 				      "cmd DMA channel request failed\n");
3108 			goto unalloc;
3109 		}
3110 
3111 		/*
3112 		 * Initially allocate BAM transaction to read ONFI param page.
3113 		 * After detecting all the devices, this BAM transaction will
3114 		 * be freed and the next BAM transaction will be allocated with
3115 		 * maximum codeword size
3116 		 */
3117 		nandc->max_cwperpage = 1;
3118 		nandc->bam_txn = alloc_bam_transaction(nandc);
3119 		if (!nandc->bam_txn) {
3120 			dev_err(nandc->dev,
3121 				"failed to allocate bam transaction\n");
3122 			ret = -ENOMEM;
3123 			goto unalloc;
3124 		}
3125 	} else {
3126 		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
3127 		if (IS_ERR(nandc->chan)) {
3128 			ret = PTR_ERR(nandc->chan);
3129 			nandc->chan = NULL;
3130 			dev_err_probe(nandc->dev, ret,
3131 				      "rxtx DMA channel request failed\n");
3132 			return ret;
3133 		}
3134 	}
3135 
3136 	INIT_LIST_HEAD(&nandc->desc_list);
3137 	INIT_LIST_HEAD(&nandc->host_list);
3138 
3139 	nand_controller_init(&nandc->controller);
3140 	nandc->controller.ops = &qcom_nandc_ops;
3141 
3142 	return 0;
3143 unalloc:
3144 	qcom_nandc_unalloc(nandc);
3145 	return ret;
3146 }
3147 
3148 /* one time setup of a few nand controller registers */
qcom_nandc_setup(struct qcom_nand_controller * nandc)3149 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
3150 {
3151 	u32 nand_ctrl;
3152 
3153 	/* kill onenand */
3154 	if (!nandc->props->is_qpic)
3155 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
3156 
3157 	if (!nandc->props->qpic_v2)
3158 		nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
3159 			    NAND_DEV_CMD_VLD_VAL);
3160 
3161 	/* enable ADM or BAM DMA */
3162 	if (nandc->props->is_bam) {
3163 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
3164 
3165 		/*
3166 		 *NAND_CTRL is an operational registers, and CPU
3167 		 * access to operational registers are read only
3168 		 * in BAM mode. So update the NAND_CTRL register
3169 		 * only if it is not in BAM mode. In most cases BAM
3170 		 * mode will be enabled in bootloader
3171 		 */
3172 		if (!(nand_ctrl & BAM_MODE_EN))
3173 			nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
3174 	} else {
3175 		nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
3176 	}
3177 
3178 	/* save the original values of these registers */
3179 	if (!nandc->props->qpic_v2) {
3180 		nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
3181 		nandc->vld = NAND_DEV_CMD_VLD_VAL;
3182 	}
3183 
3184 	return 0;
3185 }
3186 
3187 static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
3188 
qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller * nandc,struct qcom_nand_host * host,struct device_node * dn)3189 static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
3190 						struct qcom_nand_host *host,
3191 						struct device_node *dn)
3192 {
3193 	struct nand_chip *chip = &host->chip;
3194 	struct mtd_info *mtd = nand_to_mtd(chip);
3195 	struct qcom_nand_boot_partition *boot_partition;
3196 	struct device *dev = nandc->dev;
3197 	int partitions_count, i, j, ret;
3198 
3199 	if (!of_property_present(dn, "qcom,boot-partitions"))
3200 		return 0;
3201 
3202 	partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
3203 	if (partitions_count <= 0) {
3204 		dev_err(dev, "Error parsing boot partition\n");
3205 		return partitions_count ? partitions_count : -EINVAL;
3206 	}
3207 
3208 	host->nr_boot_partitions = partitions_count / 2;
3209 	host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
3210 					     sizeof(*host->boot_partitions), GFP_KERNEL);
3211 	if (!host->boot_partitions) {
3212 		host->nr_boot_partitions = 0;
3213 		return -ENOMEM;
3214 	}
3215 
3216 	for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
3217 		boot_partition = &host->boot_partitions[i];
3218 
3219 		ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
3220 						 &boot_partition->page_offset);
3221 		if (ret) {
3222 			dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
3223 			host->nr_boot_partitions = 0;
3224 			return ret;
3225 		}
3226 
3227 		if (boot_partition->page_offset % mtd->writesize) {
3228 			dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
3229 				i);
3230 			host->nr_boot_partitions = 0;
3231 			return -EINVAL;
3232 		}
3233 		/* Convert offset to nand pages */
3234 		boot_partition->page_offset /= mtd->writesize;
3235 
3236 		ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
3237 						 &boot_partition->page_size);
3238 		if (ret) {
3239 			dev_err(dev, "Error parsing boot partition size at index %d\n", i);
3240 			host->nr_boot_partitions = 0;
3241 			return ret;
3242 		}
3243 
3244 		if (boot_partition->page_size % mtd->writesize) {
3245 			dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
3246 				i);
3247 			host->nr_boot_partitions = 0;
3248 			return -EINVAL;
3249 		}
3250 		/* Convert size to nand pages */
3251 		boot_partition->page_size /= mtd->writesize;
3252 	}
3253 
3254 	return 0;
3255 }
3256 
qcom_nand_host_init_and_register(struct qcom_nand_controller * nandc,struct qcom_nand_host * host,struct device_node * dn)3257 static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
3258 					    struct qcom_nand_host *host,
3259 					    struct device_node *dn)
3260 {
3261 	struct nand_chip *chip = &host->chip;
3262 	struct mtd_info *mtd = nand_to_mtd(chip);
3263 	struct device *dev = nandc->dev;
3264 	int ret;
3265 
3266 	ret = of_property_read_u32(dn, "reg", &host->cs);
3267 	if (ret) {
3268 		dev_err(dev, "can't get chip-select\n");
3269 		return -ENXIO;
3270 	}
3271 
3272 	nand_set_flash_node(chip, dn);
3273 	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
3274 	if (!mtd->name)
3275 		return -ENOMEM;
3276 
3277 	mtd->owner = THIS_MODULE;
3278 	mtd->dev.parent = dev;
3279 
3280 	/*
3281 	 * the bad block marker is readable only when we read the last codeword
3282 	 * of a page with ECC disabled. currently, the nand_base and nand_bbt
3283 	 * helpers don't allow us to read BB from a nand chip with ECC
3284 	 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
3285 	 * and block_markbad helpers until we permanently switch to using
3286 	 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
3287 	 */
3288 	chip->legacy.block_bad		= qcom_nandc_block_bad;
3289 	chip->legacy.block_markbad	= qcom_nandc_block_markbad;
3290 
3291 	chip->controller = &nandc->controller;
3292 	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
3293 			 NAND_SKIP_BBTSCAN;
3294 
3295 	/* set up initial status value */
3296 	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
3297 
3298 	ret = nand_scan(chip, 1);
3299 	if (ret)
3300 		return ret;
3301 
3302 	ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
3303 	if (ret)
3304 		goto err;
3305 
3306 	if (nandc->props->use_codeword_fixup) {
3307 		ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
3308 		if (ret)
3309 			goto err;
3310 	}
3311 
3312 	return 0;
3313 
3314 err:
3315 	nand_cleanup(chip);
3316 	return ret;
3317 }
3318 
qcom_probe_nand_devices(struct qcom_nand_controller * nandc)3319 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
3320 {
3321 	struct device *dev = nandc->dev;
3322 	struct device_node *dn = dev->of_node, *child;
3323 	struct qcom_nand_host *host;
3324 	int ret = -ENODEV;
3325 
3326 	for_each_available_child_of_node(dn, child) {
3327 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
3328 		if (!host) {
3329 			of_node_put(child);
3330 			return -ENOMEM;
3331 		}
3332 
3333 		ret = qcom_nand_host_init_and_register(nandc, host, child);
3334 		if (ret) {
3335 			devm_kfree(dev, host);
3336 			continue;
3337 		}
3338 
3339 		list_add_tail(&host->node, &nandc->host_list);
3340 	}
3341 
3342 	return ret;
3343 }
3344 
3345 /* parse custom DT properties here */
qcom_nandc_parse_dt(struct platform_device * pdev)3346 static int qcom_nandc_parse_dt(struct platform_device *pdev)
3347 {
3348 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3349 	struct device_node *np = nandc->dev->of_node;
3350 	int ret;
3351 
3352 	if (!nandc->props->is_bam) {
3353 		ret = of_property_read_u32(np, "qcom,cmd-crci",
3354 					   &nandc->cmd_crci);
3355 		if (ret) {
3356 			dev_err(nandc->dev, "command CRCI unspecified\n");
3357 			return ret;
3358 		}
3359 
3360 		ret = of_property_read_u32(np, "qcom,data-crci",
3361 					   &nandc->data_crci);
3362 		if (ret) {
3363 			dev_err(nandc->dev, "data CRCI unspecified\n");
3364 			return ret;
3365 		}
3366 	}
3367 
3368 	return 0;
3369 }
3370 
qcom_nandc_probe(struct platform_device * pdev)3371 static int qcom_nandc_probe(struct platform_device *pdev)
3372 {
3373 	struct qcom_nand_controller *nandc;
3374 	const void *dev_data;
3375 	struct device *dev = &pdev->dev;
3376 	struct resource *res;
3377 	int ret;
3378 
3379 	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
3380 	if (!nandc)
3381 		return -ENOMEM;
3382 
3383 	platform_set_drvdata(pdev, nandc);
3384 	nandc->dev = dev;
3385 
3386 	dev_data = of_device_get_match_data(dev);
3387 	if (!dev_data) {
3388 		dev_err(&pdev->dev, "failed to get device data\n");
3389 		return -ENODEV;
3390 	}
3391 
3392 	nandc->props = dev_data;
3393 
3394 	nandc->core_clk = devm_clk_get(dev, "core");
3395 	if (IS_ERR(nandc->core_clk))
3396 		return PTR_ERR(nandc->core_clk);
3397 
3398 	nandc->aon_clk = devm_clk_get(dev, "aon");
3399 	if (IS_ERR(nandc->aon_clk))
3400 		return PTR_ERR(nandc->aon_clk);
3401 
3402 	ret = qcom_nandc_parse_dt(pdev);
3403 	if (ret)
3404 		return ret;
3405 
3406 	nandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
3407 	if (IS_ERR(nandc->base))
3408 		return PTR_ERR(nandc->base);
3409 
3410 	nandc->base_phys = res->start;
3411 	nandc->base_dma = dma_map_resource(dev, res->start,
3412 					   resource_size(res),
3413 					   DMA_BIDIRECTIONAL, 0);
3414 	if (dma_mapping_error(dev, nandc->base_dma))
3415 		return -ENXIO;
3416 
3417 	ret = clk_prepare_enable(nandc->core_clk);
3418 	if (ret)
3419 		goto err_core_clk;
3420 
3421 	ret = clk_prepare_enable(nandc->aon_clk);
3422 	if (ret)
3423 		goto err_aon_clk;
3424 
3425 	ret = qcom_nandc_alloc(nandc);
3426 	if (ret)
3427 		goto err_nandc_alloc;
3428 
3429 	ret = qcom_nandc_setup(nandc);
3430 	if (ret)
3431 		goto err_setup;
3432 
3433 	ret = qcom_probe_nand_devices(nandc);
3434 	if (ret)
3435 		goto err_setup;
3436 
3437 	return 0;
3438 
3439 err_setup:
3440 	qcom_nandc_unalloc(nandc);
3441 err_nandc_alloc:
3442 	clk_disable_unprepare(nandc->aon_clk);
3443 err_aon_clk:
3444 	clk_disable_unprepare(nandc->core_clk);
3445 err_core_clk:
3446 	dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
3447 			   DMA_BIDIRECTIONAL, 0);
3448 	return ret;
3449 }
3450 
qcom_nandc_remove(struct platform_device * pdev)3451 static void qcom_nandc_remove(struct platform_device *pdev)
3452 {
3453 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3454 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3455 	struct qcom_nand_host *host;
3456 	struct nand_chip *chip;
3457 	int ret;
3458 
3459 	list_for_each_entry(host, &nandc->host_list, node) {
3460 		chip = &host->chip;
3461 		ret = mtd_device_unregister(nand_to_mtd(chip));
3462 		WARN_ON(ret);
3463 		nand_cleanup(chip);
3464 	}
3465 
3466 	qcom_nandc_unalloc(nandc);
3467 
3468 	clk_disable_unprepare(nandc->aon_clk);
3469 	clk_disable_unprepare(nandc->core_clk);
3470 
3471 	dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3472 			   DMA_BIDIRECTIONAL, 0);
3473 }
3474 
3475 static const struct qcom_nandc_props ipq806x_nandc_props = {
3476 	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3477 	.is_bam = false,
3478 	.use_codeword_fixup = true,
3479 	.dev_cmd_reg_start = 0x0,
3480 };
3481 
3482 static const struct qcom_nandc_props ipq4019_nandc_props = {
3483 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3484 	.is_bam = true,
3485 	.is_qpic = true,
3486 	.dev_cmd_reg_start = 0x0,
3487 };
3488 
3489 static const struct qcom_nandc_props ipq8074_nandc_props = {
3490 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3491 	.is_bam = true,
3492 	.is_qpic = true,
3493 	.dev_cmd_reg_start = 0x7000,
3494 };
3495 
3496 static const struct qcom_nandc_props sdx55_nandc_props = {
3497 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3498 	.is_bam = true,
3499 	.is_qpic = true,
3500 	.qpic_v2 = true,
3501 	.dev_cmd_reg_start = 0x7000,
3502 };
3503 
3504 /*
3505  * data will hold a struct pointer containing more differences once we support
3506  * more controller variants
3507  */
3508 static const struct of_device_id qcom_nandc_of_match[] = {
3509 	{
3510 		.compatible = "qcom,ipq806x-nand",
3511 		.data = &ipq806x_nandc_props,
3512 	},
3513 	{
3514 		.compatible = "qcom,ipq4019-nand",
3515 		.data = &ipq4019_nandc_props,
3516 	},
3517 	{
3518 		.compatible = "qcom,ipq6018-nand",
3519 		.data = &ipq8074_nandc_props,
3520 	},
3521 	{
3522 		.compatible = "qcom,ipq8074-nand",
3523 		.data = &ipq8074_nandc_props,
3524 	},
3525 	{
3526 		.compatible = "qcom,sdx55-nand",
3527 		.data = &sdx55_nandc_props,
3528 	},
3529 	{}
3530 };
3531 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3532 
3533 static struct platform_driver qcom_nandc_driver = {
3534 	.driver = {
3535 		.name = "qcom-nandc",
3536 		.of_match_table = qcom_nandc_of_match,
3537 	},
3538 	.probe   = qcom_nandc_probe,
3539 	.remove_new = qcom_nandc_remove,
3540 };
3541 module_platform_driver(qcom_nandc_driver);
3542 
3543 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3544 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3545 MODULE_LICENSE("GPL v2");
3546