1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright © 2010-2015 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <common.h>
16 #include <asm/io.h>
17 #include <memalign.h>
18 #include <nand.h>
19 #include <clk.h>
20 #include <dm/device_compat.h>
21 #include <dm/devres.h>
22 #include <linux/bitops.h>
23 #include <linux/bug.h>
24 #include <linux/err.h>
25 #include <linux/ioport.h>
26 #include <linux/completion.h>
27 #include <linux/errno.h>
28 #include <linux/log2.h>
29 #include <asm/processor.h>
30 #include <dm.h>
31 
32 #include "brcmnand.h"
33 #include "brcmnand_compat.h"
34 
35 /*
36  * This flag controls if WP stays on between erase/write commands to mitigate
37  * flash corruption due to power glitches. Values:
38  * 0: NAND_WP is not used or not available
39  * 1: NAND_WP is set by default, cleared for erase/write operations
40  * 2: NAND_WP is always cleared
41  */
42 static int wp_on = 1;
43 module_param(wp_on, int, 0444);
44 
45 /***********************************************************************
46  * Definitions
47  ***********************************************************************/
48 
49 #define DRV_NAME			"brcmnand"
50 
51 #define CMD_NULL			0x00
52 #define CMD_PAGE_READ			0x01
53 #define CMD_SPARE_AREA_READ		0x02
54 #define CMD_STATUS_READ			0x03
55 #define CMD_PROGRAM_PAGE		0x04
56 #define CMD_PROGRAM_SPARE_AREA		0x05
57 #define CMD_COPY_BACK			0x06
58 #define CMD_DEVICE_ID_READ		0x07
59 #define CMD_BLOCK_ERASE			0x08
60 #define CMD_FLASH_RESET			0x09
61 #define CMD_BLOCKS_LOCK			0x0a
62 #define CMD_BLOCKS_LOCK_DOWN		0x0b
63 #define CMD_BLOCKS_UNLOCK		0x0c
64 #define CMD_READ_BLOCKS_LOCK_STATUS	0x0d
65 #define CMD_PARAMETER_READ		0x0e
66 #define CMD_PARAMETER_CHANGE_COL	0x0f
67 #define CMD_LOW_LEVEL_OP		0x10
68 
69 struct brcm_nand_dma_desc {
70 	u32 next_desc;
71 	u32 next_desc_ext;
72 	u32 cmd_irq;
73 	u32 dram_addr;
74 	u32 dram_addr_ext;
75 	u32 tfr_len;
76 	u32 total_len;
77 	u32 flash_addr;
78 	u32 flash_addr_ext;
79 	u32 cs;
80 	u32 pad2[5];
81 	u32 status_valid;
82 } __packed;
83 
84 /* Bitfields for brcm_nand_dma_desc::status_valid */
85 #define FLASH_DMA_ECC_ERROR	(1 << 8)
86 #define FLASH_DMA_CORR_ERROR	(1 << 9)
87 
88 /* 512B flash cache in the NAND controller HW */
89 #define FC_SHIFT		9U
90 #define FC_BYTES		512U
91 #define FC_WORDS		(FC_BYTES >> 2)
92 
93 #define BRCMNAND_MIN_PAGESIZE	512
94 #define BRCMNAND_MIN_BLOCKSIZE	(8 * 1024)
95 #define BRCMNAND_MIN_DEVSIZE	(4ULL * 1024 * 1024)
96 
97 #define NAND_CTRL_RDY			(INTFC_CTLR_READY | INTFC_FLASH_READY)
98 #define NAND_POLL_STATUS_TIMEOUT_MS	100
99 
100 /* Controller feature flags */
101 enum {
102 	BRCMNAND_HAS_1K_SECTORS			= BIT(0),
103 	BRCMNAND_HAS_PREFETCH			= BIT(1),
104 	BRCMNAND_HAS_CACHE_MODE			= BIT(2),
105 	BRCMNAND_HAS_WP				= BIT(3),
106 };
107 
108 struct brcmnand_controller {
109 #ifndef __UBOOT__
110 	struct device		*dev;
111 #else
112 	struct udevice		*dev;
113 #endif /* __UBOOT__ */
114 	struct nand_hw_control	controller;
115 	void __iomem		*nand_base;
116 	void __iomem		*nand_fc; /* flash cache */
117 	void __iomem		*flash_dma_base;
118 	unsigned int		irq;
119 	unsigned int		dma_irq;
120 	int			nand_version;
121 	int			parameter_page_big_endian;
122 
123 	/* Some SoCs provide custom interrupt status register(s) */
124 	struct brcmnand_soc	*soc;
125 
126 	/* Some SoCs have a gateable clock for the controller */
127 	struct clk		*clk;
128 
129 	int			cmd_pending;
130 	bool			dma_pending;
131 	struct completion	done;
132 	struct completion	dma_done;
133 
134 	/* List of NAND hosts (one for each chip-select) */
135 	struct list_head host_list;
136 
137 	struct brcm_nand_dma_desc *dma_desc;
138 	dma_addr_t		dma_pa;
139 
140 	/* in-memory cache of the FLASH_CACHE, used only for some commands */
141 	u8			flash_cache[FC_BYTES];
142 
143 	/* Controller revision details */
144 	const u16		*reg_offsets;
145 	unsigned int		reg_spacing; /* between CS1, CS2, ... regs */
146 	const u8		*cs_offsets; /* within each chip-select */
147 	const u8		*cs0_offsets; /* within CS0, if different */
148 	unsigned int		max_block_size;
149 	const unsigned int	*block_sizes;
150 	unsigned int		max_page_size;
151 	const unsigned int	*page_sizes;
152 	unsigned int		max_oob;
153 	u32			features;
154 
155 	/* for low-power standby/resume only */
156 	u32			nand_cs_nand_select;
157 	u32			nand_cs_nand_xor;
158 	u32			corr_stat_threshold;
159 	u32			flash_dma_mode;
160 };
161 
162 struct brcmnand_cfg {
163 	u64			device_size;
164 	unsigned int		block_size;
165 	unsigned int		page_size;
166 	unsigned int		spare_area_size;
167 	unsigned int		device_width;
168 	unsigned int		col_adr_bytes;
169 	unsigned int		blk_adr_bytes;
170 	unsigned int		ful_adr_bytes;
171 	unsigned int		sector_size_1k;
172 	unsigned int		ecc_level;
173 	/* use for low-power standby/resume only */
174 	u32			acc_control;
175 	u32			config;
176 	u32			config_ext;
177 	u32			timing_1;
178 	u32			timing_2;
179 };
180 
181 struct brcmnand_host {
182 	struct list_head	node;
183 
184 	struct nand_chip	chip;
185 #ifndef __UBOOT__
186 	struct platform_device	*pdev;
187 #else
188 	struct udevice	*pdev;
189 #endif /* __UBOOT__ */
190 	int			cs;
191 
192 	unsigned int		last_cmd;
193 	unsigned int		last_byte;
194 	u64			last_addr;
195 	struct brcmnand_cfg	hwcfg;
196 	struct brcmnand_controller *ctrl;
197 };
198 
199 enum brcmnand_reg {
200 	BRCMNAND_CMD_START = 0,
201 	BRCMNAND_CMD_EXT_ADDRESS,
202 	BRCMNAND_CMD_ADDRESS,
203 	BRCMNAND_INTFC_STATUS,
204 	BRCMNAND_CS_SELECT,
205 	BRCMNAND_CS_XOR,
206 	BRCMNAND_LL_OP,
207 	BRCMNAND_CS0_BASE,
208 	BRCMNAND_CS1_BASE,		/* CS1 regs, if non-contiguous */
209 	BRCMNAND_CORR_THRESHOLD,
210 	BRCMNAND_CORR_THRESHOLD_EXT,
211 	BRCMNAND_UNCORR_COUNT,
212 	BRCMNAND_CORR_COUNT,
213 	BRCMNAND_CORR_EXT_ADDR,
214 	BRCMNAND_CORR_ADDR,
215 	BRCMNAND_UNCORR_EXT_ADDR,
216 	BRCMNAND_UNCORR_ADDR,
217 	BRCMNAND_SEMAPHORE,
218 	BRCMNAND_ID,
219 	BRCMNAND_ID_EXT,
220 	BRCMNAND_LL_RDATA,
221 	BRCMNAND_OOB_READ_BASE,
222 	BRCMNAND_OOB_READ_10_BASE,	/* offset 0x10, if non-contiguous */
223 	BRCMNAND_OOB_WRITE_BASE,
224 	BRCMNAND_OOB_WRITE_10_BASE,	/* offset 0x10, if non-contiguous */
225 	BRCMNAND_FC_BASE,
226 };
227 
228 /* BRCMNAND v4.0 */
229 static const u16 brcmnand_regs_v40[] = {
230 	[BRCMNAND_CMD_START]		=  0x04,
231 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
232 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
233 	[BRCMNAND_INTFC_STATUS]		=  0x6c,
234 	[BRCMNAND_CS_SELECT]		=  0x14,
235 	[BRCMNAND_CS_XOR]		=  0x18,
236 	[BRCMNAND_LL_OP]		= 0x178,
237 	[BRCMNAND_CS0_BASE]		=  0x40,
238 	[BRCMNAND_CS1_BASE]		=  0xd0,
239 	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
240 	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
241 	[BRCMNAND_UNCORR_COUNT]		=     0,
242 	[BRCMNAND_CORR_COUNT]		=     0,
243 	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
244 	[BRCMNAND_CORR_ADDR]		=  0x74,
245 	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
246 	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
247 	[BRCMNAND_SEMAPHORE]		=  0x58,
248 	[BRCMNAND_ID]			=  0x60,
249 	[BRCMNAND_ID_EXT]		=  0x64,
250 	[BRCMNAND_LL_RDATA]		= 0x17c,
251 	[BRCMNAND_OOB_READ_BASE]	=  0x20,
252 	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
253 	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
254 	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
255 	[BRCMNAND_FC_BASE]		= 0x200,
256 };
257 
258 /* BRCMNAND v5.0 */
259 static const u16 brcmnand_regs_v50[] = {
260 	[BRCMNAND_CMD_START]		=  0x04,
261 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
262 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
263 	[BRCMNAND_INTFC_STATUS]		=  0x6c,
264 	[BRCMNAND_CS_SELECT]		=  0x14,
265 	[BRCMNAND_CS_XOR]		=  0x18,
266 	[BRCMNAND_LL_OP]		= 0x178,
267 	[BRCMNAND_CS0_BASE]		=  0x40,
268 	[BRCMNAND_CS1_BASE]		=  0xd0,
269 	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
270 	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
271 	[BRCMNAND_UNCORR_COUNT]		=     0,
272 	[BRCMNAND_CORR_COUNT]		=     0,
273 	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
274 	[BRCMNAND_CORR_ADDR]		=  0x74,
275 	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
276 	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
277 	[BRCMNAND_SEMAPHORE]		=  0x58,
278 	[BRCMNAND_ID]			=  0x60,
279 	[BRCMNAND_ID_EXT]		=  0x64,
280 	[BRCMNAND_LL_RDATA]		= 0x17c,
281 	[BRCMNAND_OOB_READ_BASE]	=  0x20,
282 	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
283 	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
284 	[BRCMNAND_OOB_WRITE_10_BASE]	= 0x140,
285 	[BRCMNAND_FC_BASE]		= 0x200,
286 };
287 
288 /* BRCMNAND v6.0 - v7.1 */
289 static const u16 brcmnand_regs_v60[] = {
290 	[BRCMNAND_CMD_START]		=  0x04,
291 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
292 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
293 	[BRCMNAND_INTFC_STATUS]		=  0x14,
294 	[BRCMNAND_CS_SELECT]		=  0x18,
295 	[BRCMNAND_CS_XOR]		=  0x1c,
296 	[BRCMNAND_LL_OP]		=  0x20,
297 	[BRCMNAND_CS0_BASE]		=  0x50,
298 	[BRCMNAND_CS1_BASE]		=     0,
299 	[BRCMNAND_CORR_THRESHOLD]	=  0xc0,
300 	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xc4,
301 	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
302 	[BRCMNAND_CORR_COUNT]		= 0x100,
303 	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
304 	[BRCMNAND_CORR_ADDR]		= 0x110,
305 	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
306 	[BRCMNAND_UNCORR_ADDR]		= 0x118,
307 	[BRCMNAND_SEMAPHORE]		= 0x150,
308 	[BRCMNAND_ID]			= 0x194,
309 	[BRCMNAND_ID_EXT]		= 0x198,
310 	[BRCMNAND_LL_RDATA]		= 0x19c,
311 	[BRCMNAND_OOB_READ_BASE]	= 0x200,
312 	[BRCMNAND_OOB_READ_10_BASE]	=     0,
313 	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
314 	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
315 	[BRCMNAND_FC_BASE]		= 0x400,
316 };
317 
318 /* BRCMNAND v7.1 */
319 static const u16 brcmnand_regs_v71[] = {
320 	[BRCMNAND_CMD_START]		=  0x04,
321 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
322 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
323 	[BRCMNAND_INTFC_STATUS]		=  0x14,
324 	[BRCMNAND_CS_SELECT]		=  0x18,
325 	[BRCMNAND_CS_XOR]		=  0x1c,
326 	[BRCMNAND_LL_OP]		=  0x20,
327 	[BRCMNAND_CS0_BASE]		=  0x50,
328 	[BRCMNAND_CS1_BASE]		=     0,
329 	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
330 	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
331 	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
332 	[BRCMNAND_CORR_COUNT]		= 0x100,
333 	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
334 	[BRCMNAND_CORR_ADDR]		= 0x110,
335 	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
336 	[BRCMNAND_UNCORR_ADDR]		= 0x118,
337 	[BRCMNAND_SEMAPHORE]		= 0x150,
338 	[BRCMNAND_ID]			= 0x194,
339 	[BRCMNAND_ID_EXT]		= 0x198,
340 	[BRCMNAND_LL_RDATA]		= 0x19c,
341 	[BRCMNAND_OOB_READ_BASE]	= 0x200,
342 	[BRCMNAND_OOB_READ_10_BASE]	=     0,
343 	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
344 	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
345 	[BRCMNAND_FC_BASE]		= 0x400,
346 };
347 
348 /* BRCMNAND v7.2 */
349 static const u16 brcmnand_regs_v72[] = {
350 	[BRCMNAND_CMD_START]		=  0x04,
351 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
352 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
353 	[BRCMNAND_INTFC_STATUS]		=  0x14,
354 	[BRCMNAND_CS_SELECT]		=  0x18,
355 	[BRCMNAND_CS_XOR]		=  0x1c,
356 	[BRCMNAND_LL_OP]		=  0x20,
357 	[BRCMNAND_CS0_BASE]		=  0x50,
358 	[BRCMNAND_CS1_BASE]		=     0,
359 	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
360 	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
361 	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
362 	[BRCMNAND_CORR_COUNT]		= 0x100,
363 	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
364 	[BRCMNAND_CORR_ADDR]		= 0x110,
365 	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
366 	[BRCMNAND_UNCORR_ADDR]		= 0x118,
367 	[BRCMNAND_SEMAPHORE]		= 0x150,
368 	[BRCMNAND_ID]			= 0x194,
369 	[BRCMNAND_ID_EXT]		= 0x198,
370 	[BRCMNAND_LL_RDATA]		= 0x19c,
371 	[BRCMNAND_OOB_READ_BASE]	= 0x200,
372 	[BRCMNAND_OOB_READ_10_BASE]	=     0,
373 	[BRCMNAND_OOB_WRITE_BASE]	= 0x400,
374 	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
375 	[BRCMNAND_FC_BASE]		= 0x600,
376 };
377 
378 enum brcmnand_cs_reg {
379 	BRCMNAND_CS_CFG_EXT = 0,
380 	BRCMNAND_CS_CFG,
381 	BRCMNAND_CS_ACC_CONTROL,
382 	BRCMNAND_CS_TIMING1,
383 	BRCMNAND_CS_TIMING2,
384 };
385 
386 /* Per chip-select offsets for v7.1 */
387 static const u8 brcmnand_cs_offsets_v71[] = {
388 	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
389 	[BRCMNAND_CS_CFG_EXT]		= 0x04,
390 	[BRCMNAND_CS_CFG]		= 0x08,
391 	[BRCMNAND_CS_TIMING1]		= 0x0c,
392 	[BRCMNAND_CS_TIMING2]		= 0x10,
393 };
394 
395 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
396 static const u8 brcmnand_cs_offsets[] = {
397 	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
398 	[BRCMNAND_CS_CFG_EXT]		= 0x04,
399 	[BRCMNAND_CS_CFG]		= 0x04,
400 	[BRCMNAND_CS_TIMING1]		= 0x08,
401 	[BRCMNAND_CS_TIMING2]		= 0x0c,
402 };
403 
404 /* Per chip-select offset for <= v5.0 on CS0 only */
405 static const u8 brcmnand_cs_offsets_cs0[] = {
406 	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
407 	[BRCMNAND_CS_CFG_EXT]		= 0x08,
408 	[BRCMNAND_CS_CFG]		= 0x08,
409 	[BRCMNAND_CS_TIMING1]		= 0x10,
410 	[BRCMNAND_CS_TIMING2]		= 0x14,
411 };
412 
413 /*
414  * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
415  * one config register, but once the bitfields overflowed, newer controllers
416  * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
417  */
418 enum {
419 	CFG_BLK_ADR_BYTES_SHIFT		= 8,
420 	CFG_COL_ADR_BYTES_SHIFT		= 12,
421 	CFG_FUL_ADR_BYTES_SHIFT		= 16,
422 	CFG_BUS_WIDTH_SHIFT		= 23,
423 	CFG_BUS_WIDTH			= BIT(CFG_BUS_WIDTH_SHIFT),
424 	CFG_DEVICE_SIZE_SHIFT		= 24,
425 
426 	/* Only for pre-v7.1 (with no CFG_EXT register) */
427 	CFG_PAGE_SIZE_SHIFT		= 20,
428 	CFG_BLK_SIZE_SHIFT		= 28,
429 
430 	/* Only for v7.1+ (with CFG_EXT register) */
431 	CFG_EXT_PAGE_SIZE_SHIFT		= 0,
432 	CFG_EXT_BLK_SIZE_SHIFT		= 4,
433 };
434 
435 /* BRCMNAND_INTFC_STATUS */
436 enum {
437 	INTFC_FLASH_STATUS		= GENMASK(7, 0),
438 
439 	INTFC_ERASED			= BIT(27),
440 	INTFC_OOB_VALID			= BIT(28),
441 	INTFC_CACHE_VALID		= BIT(29),
442 	INTFC_FLASH_READY		= BIT(30),
443 	INTFC_CTLR_READY		= BIT(31),
444 };
445 
nand_readreg(struct brcmnand_controller * ctrl,u32 offs)446 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
447 {
448 	return brcmnand_readl(ctrl->nand_base + offs);
449 }
450 
nand_writereg(struct brcmnand_controller * ctrl,u32 offs,u32 val)451 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
452 				 u32 val)
453 {
454 	brcmnand_writel(val, ctrl->nand_base + offs);
455 }
456 
brcmnand_revision_init(struct brcmnand_controller * ctrl)457 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
458 {
459 	static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
460 	static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
461 	static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
462 
463 	ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
464 
465 	/* Only support v4.0+? */
466 	if (ctrl->nand_version < 0x0400) {
467 		dev_err(ctrl->dev, "version %#x not supported\n",
468 			ctrl->nand_version);
469 		return -ENODEV;
470 	}
471 
472 	/* Register offsets */
473 	if (ctrl->nand_version >= 0x0702)
474 		ctrl->reg_offsets = brcmnand_regs_v72;
475 	else if (ctrl->nand_version >= 0x0701)
476 		ctrl->reg_offsets = brcmnand_regs_v71;
477 	else if (ctrl->nand_version >= 0x0600)
478 		ctrl->reg_offsets = brcmnand_regs_v60;
479 	else if (ctrl->nand_version >= 0x0500)
480 		ctrl->reg_offsets = brcmnand_regs_v50;
481 	else if (ctrl->nand_version >= 0x0400)
482 		ctrl->reg_offsets = brcmnand_regs_v40;
483 
484 	/* Chip-select stride */
485 	if (ctrl->nand_version >= 0x0701)
486 		ctrl->reg_spacing = 0x14;
487 	else
488 		ctrl->reg_spacing = 0x10;
489 
490 	/* Per chip-select registers */
491 	if (ctrl->nand_version >= 0x0701) {
492 		ctrl->cs_offsets = brcmnand_cs_offsets_v71;
493 	} else {
494 		ctrl->cs_offsets = brcmnand_cs_offsets;
495 
496 		/* v5.0 and earlier has a different CS0 offset layout */
497 		if (ctrl->nand_version <= 0x0500)
498 			ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
499 	}
500 
501 	/* Page / block sizes */
502 	if (ctrl->nand_version >= 0x0701) {
503 		/* >= v7.1 use nice power-of-2 values! */
504 		ctrl->max_page_size = 16 * 1024;
505 		ctrl->max_block_size = 2 * 1024 * 1024;
506 	} else {
507 		ctrl->page_sizes = page_sizes;
508 		if (ctrl->nand_version >= 0x0600)
509 			ctrl->block_sizes = block_sizes_v6;
510 		else
511 			ctrl->block_sizes = block_sizes_v4;
512 
513 		if (ctrl->nand_version < 0x0400) {
514 			ctrl->max_page_size = 4096;
515 			ctrl->max_block_size = 512 * 1024;
516 		}
517 	}
518 
519 	/* Maximum spare area sector size (per 512B) */
520 	if (ctrl->nand_version >= 0x0702)
521 		ctrl->max_oob = 128;
522 	else if (ctrl->nand_version >= 0x0600)
523 		ctrl->max_oob = 64;
524 	else if (ctrl->nand_version >= 0x0500)
525 		ctrl->max_oob = 32;
526 	else
527 		ctrl->max_oob = 16;
528 
529 	/* v6.0 and newer (except v6.1) have prefetch support */
530 	if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
531 		ctrl->features |= BRCMNAND_HAS_PREFETCH;
532 
533 	/*
534 	 * v6.x has cache mode, but it's implemented differently. Ignore it for
535 	 * now.
536 	 */
537 	if (ctrl->nand_version >= 0x0700)
538 		ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
539 
540 	if (ctrl->nand_version >= 0x0500)
541 		ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
542 
543 	if (ctrl->nand_version >= 0x0700)
544 		ctrl->features |= BRCMNAND_HAS_WP;
545 #ifndef __UBOOT__
546 	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
547 #else
548 	else if (dev_read_bool(ctrl->dev, "brcm,nand-has-wp"))
549 #endif /* __UBOOT__ */
550 		ctrl->features |= BRCMNAND_HAS_WP;
551 
552 	return 0;
553 }
554 
brcmnand_read_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg)555 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
556 		enum brcmnand_reg reg)
557 {
558 	u16 offs = ctrl->reg_offsets[reg];
559 
560 	if (offs)
561 		return nand_readreg(ctrl, offs);
562 	else
563 		return 0;
564 }
565 
brcmnand_write_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg,u32 val)566 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
567 				      enum brcmnand_reg reg, u32 val)
568 {
569 	u16 offs = ctrl->reg_offsets[reg];
570 
571 	if (offs)
572 		nand_writereg(ctrl, offs, val);
573 }
574 
brcmnand_rmw_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg,u32 mask,unsigned int shift,u32 val)575 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
576 				    enum brcmnand_reg reg, u32 mask, unsigned
577 				    int shift, u32 val)
578 {
579 	u32 tmp = brcmnand_read_reg(ctrl, reg);
580 
581 	tmp &= ~mask;
582 	tmp |= val << shift;
583 	brcmnand_write_reg(ctrl, reg, tmp);
584 }
585 
brcmnand_read_fc(struct brcmnand_controller * ctrl,int word)586 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
587 {
588 	return __raw_readl(ctrl->nand_fc + word * 4);
589 }
590 
brcmnand_write_fc(struct brcmnand_controller * ctrl,int word,u32 val)591 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
592 				     int word, u32 val)
593 {
594 	__raw_writel(val, ctrl->nand_fc + word * 4);
595 }
596 
brcmnand_cs_offset(struct brcmnand_controller * ctrl,int cs,enum brcmnand_cs_reg reg)597 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
598 				     enum brcmnand_cs_reg reg)
599 {
600 	u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
601 	u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
602 	u8 cs_offs;
603 
604 	if (cs == 0 && ctrl->cs0_offsets)
605 		cs_offs = ctrl->cs0_offsets[reg];
606 	else
607 		cs_offs = ctrl->cs_offsets[reg];
608 
609 	if (cs && offs_cs1)
610 		return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
611 
612 	return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
613 }
614 
brcmnand_count_corrected(struct brcmnand_controller * ctrl)615 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
616 {
617 	if (ctrl->nand_version < 0x0600)
618 		return 1;
619 	return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
620 }
621 
brcmnand_wr_corr_thresh(struct brcmnand_host * host,u8 val)622 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
623 {
624 	struct brcmnand_controller *ctrl = host->ctrl;
625 	unsigned int shift = 0, bits;
626 	enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
627 	int cs = host->cs;
628 
629 	if (ctrl->nand_version >= 0x0702)
630 		bits = 7;
631 	else if (ctrl->nand_version >= 0x0600)
632 		bits = 6;
633 	else if (ctrl->nand_version >= 0x0500)
634 		bits = 5;
635 	else
636 		bits = 4;
637 
638 	if (ctrl->nand_version >= 0x0702) {
639 		if (cs >= 4)
640 			reg = BRCMNAND_CORR_THRESHOLD_EXT;
641 		shift = (cs % 4) * bits;
642 	} else if (ctrl->nand_version >= 0x0600) {
643 		if (cs >= 5)
644 			reg = BRCMNAND_CORR_THRESHOLD_EXT;
645 		shift = (cs % 5) * bits;
646 	}
647 	brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
648 }
649 
brcmnand_cmd_shift(struct brcmnand_controller * ctrl)650 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
651 {
652 	if (ctrl->nand_version < 0x0602)
653 		return 24;
654 	return 0;
655 }
656 
657 /***********************************************************************
658  * NAND ACC CONTROL bitfield
659  *
660  * Some bits have remained constant throughout hardware revision, while
661  * others have shifted around.
662  ***********************************************************************/
663 
664 /* Constant for all versions (where supported) */
665 enum {
666 	/* See BRCMNAND_HAS_CACHE_MODE */
667 	ACC_CONTROL_CACHE_MODE				= BIT(22),
668 
669 	/* See BRCMNAND_HAS_PREFETCH */
670 	ACC_CONTROL_PREFETCH				= BIT(23),
671 
672 	ACC_CONTROL_PAGE_HIT				= BIT(24),
673 	ACC_CONTROL_WR_PREEMPT				= BIT(25),
674 	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
675 	ACC_CONTROL_RD_ERASED				= BIT(27),
676 	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
677 	ACC_CONTROL_WR_ECC				= BIT(30),
678 	ACC_CONTROL_RD_ECC				= BIT(31),
679 };
680 
brcmnand_spare_area_mask(struct brcmnand_controller * ctrl)681 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
682 {
683 	if (ctrl->nand_version >= 0x0702)
684 		return GENMASK(7, 0);
685 	else if (ctrl->nand_version >= 0x0600)
686 		return GENMASK(6, 0);
687 	else
688 		return GENMASK(5, 0);
689 }
690 
691 #define NAND_ACC_CONTROL_ECC_SHIFT	16
692 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
693 
brcmnand_ecc_level_mask(struct brcmnand_controller * ctrl)694 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
695 {
696 	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
697 
698 	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
699 
700 	/* v7.2 includes additional ECC levels */
701 	if (ctrl->nand_version >= 0x0702)
702 		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
703 
704 	return mask;
705 }
706 
brcmnand_set_ecc_enabled(struct brcmnand_host * host,int en)707 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
708 {
709 	struct brcmnand_controller *ctrl = host->ctrl;
710 	u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
711 	u32 acc_control = nand_readreg(ctrl, offs);
712 	u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
713 
714 	if (en) {
715 		acc_control |= ecc_flags; /* enable RD/WR ECC */
716 		acc_control |= host->hwcfg.ecc_level
717 			       << NAND_ACC_CONTROL_ECC_SHIFT;
718 	} else {
719 		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
720 		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
721 	}
722 
723 	nand_writereg(ctrl, offs, acc_control);
724 }
725 
brcmnand_sector_1k_shift(struct brcmnand_controller * ctrl)726 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
727 {
728 	if (ctrl->nand_version >= 0x0702)
729 		return 9;
730 	else if (ctrl->nand_version >= 0x0600)
731 		return 7;
732 	else if (ctrl->nand_version >= 0x0500)
733 		return 6;
734 	else
735 		return -1;
736 }
737 
brcmnand_get_sector_size_1k(struct brcmnand_host * host)738 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
739 {
740 	struct brcmnand_controller *ctrl = host->ctrl;
741 	int shift = brcmnand_sector_1k_shift(ctrl);
742 	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
743 						  BRCMNAND_CS_ACC_CONTROL);
744 
745 	if (shift < 0)
746 		return 0;
747 
748 	return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
749 }
750 
brcmnand_set_sector_size_1k(struct brcmnand_host * host,int val)751 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
752 {
753 	struct brcmnand_controller *ctrl = host->ctrl;
754 	int shift = brcmnand_sector_1k_shift(ctrl);
755 	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
756 						  BRCMNAND_CS_ACC_CONTROL);
757 	u32 tmp;
758 
759 	if (shift < 0)
760 		return;
761 
762 	tmp = nand_readreg(ctrl, acc_control_offs);
763 	tmp &= ~(1 << shift);
764 	tmp |= (!!val) << shift;
765 	nand_writereg(ctrl, acc_control_offs, tmp);
766 }
767 
768 /***********************************************************************
769  * CS_NAND_SELECT
770  ***********************************************************************/
771 
772 enum {
773 	CS_SELECT_NAND_WP			= BIT(29),
774 	CS_SELECT_AUTO_DEVICE_ID_CFG		= BIT(30),
775 };
776 
bcmnand_ctrl_poll_status(struct brcmnand_controller * ctrl,u32 mask,u32 expected_val,unsigned long timeout_ms)777 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
778 				    u32 mask, u32 expected_val,
779 				    unsigned long timeout_ms)
780 {
781 #ifndef __UBOOT__
782 	unsigned long limit;
783 	u32 val;
784 
785 	if (!timeout_ms)
786 		timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
787 
788 	limit = jiffies + msecs_to_jiffies(timeout_ms);
789 	do {
790 		val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
791 		if ((val & mask) == expected_val)
792 			return 0;
793 
794 		cpu_relax();
795 	} while (time_after(limit, jiffies));
796 #else
797 	unsigned long base, limit;
798 	u32 val;
799 
800 	if (!timeout_ms)
801 		timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
802 
803 	base = get_timer(0);
804 	limit = CONFIG_SYS_HZ * timeout_ms / 1000;
805 	do {
806 		val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
807 		if ((val & mask) == expected_val)
808 			return 0;
809 
810 		cpu_relax();
811 	} while (get_timer(base) < limit);
812 #endif /* __UBOOT__ */
813 
814 	dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
815 		 expected_val, val & mask);
816 
817 	return -ETIMEDOUT;
818 }
819 
brcmnand_set_wp(struct brcmnand_controller * ctrl,bool en)820 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
821 {
822 	u32 val = en ? CS_SELECT_NAND_WP : 0;
823 
824 	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
825 }
826 
827 /***********************************************************************
828  * Flash DMA
829  ***********************************************************************/
830 
831 enum flash_dma_reg {
832 	FLASH_DMA_REVISION		= 0x00,
833 	FLASH_DMA_FIRST_DESC		= 0x04,
834 	FLASH_DMA_FIRST_DESC_EXT	= 0x08,
835 	FLASH_DMA_CTRL			= 0x0c,
836 	FLASH_DMA_MODE			= 0x10,
837 	FLASH_DMA_STATUS		= 0x14,
838 	FLASH_DMA_INTERRUPT_DESC	= 0x18,
839 	FLASH_DMA_INTERRUPT_DESC_EXT	= 0x1c,
840 	FLASH_DMA_ERROR_STATUS		= 0x20,
841 	FLASH_DMA_CURRENT_DESC		= 0x24,
842 	FLASH_DMA_CURRENT_DESC_EXT	= 0x28,
843 };
844 
has_flash_dma(struct brcmnand_controller * ctrl)845 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
846 {
847 	return ctrl->flash_dma_base;
848 }
849 
flash_dma_buf_ok(const void * buf)850 static inline bool flash_dma_buf_ok(const void *buf)
851 {
852 #ifndef __UBOOT__
853 	return buf && !is_vmalloc_addr(buf) &&
854 		likely(IS_ALIGNED((uintptr_t)buf, 4));
855 #else
856 	return buf && likely(IS_ALIGNED((uintptr_t)buf, 4));
857 #endif /* __UBOOT__ */
858 }
859 
flash_dma_writel(struct brcmnand_controller * ctrl,u8 offs,u32 val)860 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
861 				    u32 val)
862 {
863 	brcmnand_writel(val, ctrl->flash_dma_base + offs);
864 }
865 
flash_dma_readl(struct brcmnand_controller * ctrl,u8 offs)866 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
867 {
868 	return brcmnand_readl(ctrl->flash_dma_base + offs);
869 }
870 
871 /* Low-level operation types: command, address, write, or read */
872 enum brcmnand_llop_type {
873 	LL_OP_CMD,
874 	LL_OP_ADDR,
875 	LL_OP_WR,
876 	LL_OP_RD,
877 };
878 
879 /***********************************************************************
880  * Internal support functions
881  ***********************************************************************/
882 
is_hamming_ecc(struct brcmnand_controller * ctrl,struct brcmnand_cfg * cfg)883 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
884 				  struct brcmnand_cfg *cfg)
885 {
886 	if (ctrl->nand_version <= 0x0701)
887 		return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
888 			cfg->ecc_level == 15;
889 	else
890 		return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
891 			cfg->ecc_level == 15) ||
892 			(cfg->spare_area_size == 28 && cfg->ecc_level == 16));
893 }
894 
895 /*
896  * Returns a nand_ecclayout strucutre for the given layout/configuration.
897  * Returns NULL on failure.
898  */
brcmnand_create_layout(int ecc_level,struct brcmnand_host * host)899 static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
900 						     struct brcmnand_host *host)
901 {
902 	struct brcmnand_cfg *cfg = &host->hwcfg;
903 	int i, j;
904 	struct nand_ecclayout *layout;
905 	int req;
906 	int sectors;
907 	int sas;
908 	int idx1, idx2;
909 
910 #ifndef __UBOOT__
911 	layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
912 #else
913 	layout = devm_kzalloc(host->pdev, sizeof(*layout), GFP_KERNEL);
914 #endif
915 	if (!layout)
916 		return NULL;
917 
918 	sectors = cfg->page_size / (512 << cfg->sector_size_1k);
919 	sas = cfg->spare_area_size << cfg->sector_size_1k;
920 
921 	/* Hamming */
922 	if (is_hamming_ecc(host->ctrl, cfg)) {
923 		for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
924 			/* First sector of each page may have BBI */
925 			if (i == 0) {
926 				layout->oobfree[idx2].offset = i * sas + 1;
927 				/* Small-page NAND use byte 6 for BBI */
928 				if (cfg->page_size == 512)
929 					layout->oobfree[idx2].offset--;
930 				layout->oobfree[idx2].length = 5;
931 			} else {
932 				layout->oobfree[idx2].offset = i * sas;
933 				layout->oobfree[idx2].length = 6;
934 			}
935 			idx2++;
936 			layout->eccpos[idx1++] = i * sas + 6;
937 			layout->eccpos[idx1++] = i * sas + 7;
938 			layout->eccpos[idx1++] = i * sas + 8;
939 			layout->oobfree[idx2].offset = i * sas + 9;
940 			layout->oobfree[idx2].length = 7;
941 			idx2++;
942 			/* Leave zero-terminated entry for OOBFREE */
943 			if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
944 			    idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
945 				break;
946 		}
947 
948 		return layout;
949 	}
950 
951 	/*
952 	 * CONTROLLER_VERSION:
953 	 *   < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
954 	 *  >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
955 	 * But we will just be conservative.
956 	 */
957 	req = DIV_ROUND_UP(ecc_level * 14, 8);
958 	if (req >= sas) {
959 		dev_err(&host->pdev->dev,
960 			"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
961 			req, sas);
962 		return NULL;
963 	}
964 
965 	layout->eccbytes = req * sectors;
966 	for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
967 		for (j = sas - req; j < sas && idx1 <
968 				MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
969 			layout->eccpos[idx1] = i * sas + j;
970 
971 		/* First sector of each page may have BBI */
972 		if (i == 0) {
973 			if (cfg->page_size == 512 && (sas - req >= 6)) {
974 				/* Small-page NAND use byte 6 for BBI */
975 				layout->oobfree[idx2].offset = 0;
976 				layout->oobfree[idx2].length = 5;
977 				idx2++;
978 				if (sas - req > 6) {
979 					layout->oobfree[idx2].offset = 6;
980 					layout->oobfree[idx2].length =
981 						sas - req - 6;
982 					idx2++;
983 				}
984 			} else if (sas > req + 1) {
985 				layout->oobfree[idx2].offset = i * sas + 1;
986 				layout->oobfree[idx2].length = sas - req - 1;
987 				idx2++;
988 			}
989 		} else if (sas > req) {
990 			layout->oobfree[idx2].offset = i * sas;
991 			layout->oobfree[idx2].length = sas - req;
992 			idx2++;
993 		}
994 		/* Leave zero-terminated entry for OOBFREE */
995 		if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
996 		    idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
997 			break;
998 	}
999 
1000 	return layout;
1001 }
1002 
brcmstb_choose_ecc_layout(struct brcmnand_host * host)1003 static struct nand_ecclayout *brcmstb_choose_ecc_layout(
1004 		struct brcmnand_host *host)
1005 {
1006 	struct nand_ecclayout *layout;
1007 	struct brcmnand_cfg *p = &host->hwcfg;
1008 	unsigned int ecc_level = p->ecc_level;
1009 
1010 	if (p->sector_size_1k)
1011 		ecc_level <<= 1;
1012 
1013 	layout = brcmnand_create_layout(ecc_level, host);
1014 	if (!layout) {
1015 		dev_err(&host->pdev->dev,
1016 				"no proper ecc_layout for this NAND cfg\n");
1017 		return NULL;
1018 	}
1019 
1020 	return layout;
1021 }
1022 
brcmnand_wp(struct mtd_info * mtd,int wp)1023 static void brcmnand_wp(struct mtd_info *mtd, int wp)
1024 {
1025 	struct nand_chip *chip = mtd_to_nand(mtd);
1026 	struct brcmnand_host *host = nand_get_controller_data(chip);
1027 	struct brcmnand_controller *ctrl = host->ctrl;
1028 
1029 	if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1030 		static int old_wp = -1;
1031 		int ret;
1032 
1033 		if (old_wp != wp) {
1034 			dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1035 			old_wp = wp;
1036 		}
1037 
1038 		/*
1039 		 * make sure ctrl/flash ready before and after
1040 		 * changing state of #WP pin
1041 		 */
1042 		ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1043 					       NAND_STATUS_READY,
1044 					       NAND_CTRL_RDY |
1045 					       NAND_STATUS_READY, 0);
1046 		if (ret)
1047 			return;
1048 
1049 		brcmnand_set_wp(ctrl, wp);
1050 		nand_status_op(chip, NULL);
1051 		/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1052 		ret = bcmnand_ctrl_poll_status(ctrl,
1053 					       NAND_CTRL_RDY |
1054 					       NAND_STATUS_READY |
1055 					       NAND_STATUS_WP,
1056 					       NAND_CTRL_RDY |
1057 					       NAND_STATUS_READY |
1058 					       (wp ? 0 : NAND_STATUS_WP), 0);
1059 #ifndef __UBOOT__
1060 		if (ret)
1061 			dev_err_ratelimited(&host->pdev->dev,
1062 					    "nand #WP expected %s\n",
1063 					    wp ? "on" : "off");
1064 #else
1065 		if (ret)
1066 			dev_err(&host->pdev->dev,
1067 					    "nand #WP expected %s\n",
1068 					    wp ? "on" : "off");
1069 #endif /* __UBOOT__ */
1070 	}
1071 }
1072 
1073 /* Helper functions for reading and writing OOB registers */
oob_reg_read(struct brcmnand_controller * ctrl,u32 offs)1074 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1075 {
1076 	u16 offset0, offset10, reg_offs;
1077 
1078 	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1079 	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1080 
1081 	if (offs >= ctrl->max_oob)
1082 		return 0x77;
1083 
1084 	if (offs >= 16 && offset10)
1085 		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1086 	else
1087 		reg_offs = offset0 + (offs & ~0x03);
1088 
1089 	return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1090 }
1091 
oob_reg_write(struct brcmnand_controller * ctrl,u32 offs,u32 data)1092 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1093 				 u32 data)
1094 {
1095 	u16 offset0, offset10, reg_offs;
1096 
1097 	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1098 	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1099 
1100 	if (offs >= ctrl->max_oob)
1101 		return;
1102 
1103 	if (offs >= 16 && offset10)
1104 		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1105 	else
1106 		reg_offs = offset0 + (offs & ~0x03);
1107 
1108 	nand_writereg(ctrl, reg_offs, data);
1109 }
1110 
1111 /*
1112  * read_oob_from_regs - read data from OOB registers
1113  * @ctrl: NAND controller
1114  * @i: sub-page sector index
1115  * @oob: buffer to read to
1116  * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1117  * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1118  */
read_oob_from_regs(struct brcmnand_controller * ctrl,int i,u8 * oob,int sas,int sector_1k)1119 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1120 			      int sas, int sector_1k)
1121 {
1122 	int tbytes = sas << sector_1k;
1123 	int j;
1124 
1125 	/* Adjust OOB values for 1K sector size */
1126 	if (sector_1k && (i & 0x01))
1127 		tbytes = max(0, tbytes - (int)ctrl->max_oob);
1128 	tbytes = min_t(int, tbytes, ctrl->max_oob);
1129 
1130 	for (j = 0; j < tbytes; j++)
1131 		oob[j] = oob_reg_read(ctrl, j);
1132 	return tbytes;
1133 }
1134 
1135 /*
1136  * write_oob_to_regs - write data to OOB registers
1137  * @i: sub-page sector index
1138  * @oob: buffer to write from
1139  * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1140  * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1141  */
write_oob_to_regs(struct brcmnand_controller * ctrl,int i,const u8 * oob,int sas,int sector_1k)1142 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1143 			     const u8 *oob, int sas, int sector_1k)
1144 {
1145 	int tbytes = sas << sector_1k;
1146 	int j;
1147 
1148 	/* Adjust OOB values for 1K sector size */
1149 	if (sector_1k && (i & 0x01))
1150 		tbytes = max(0, tbytes - (int)ctrl->max_oob);
1151 	tbytes = min_t(int, tbytes, ctrl->max_oob);
1152 
1153 	for (j = 0; j < tbytes; j += 4)
1154 		oob_reg_write(ctrl, j,
1155 				(oob[j + 0] << 24) |
1156 				(oob[j + 1] << 16) |
1157 				(oob[j + 2] <<  8) |
1158 				(oob[j + 3] <<  0));
1159 	return tbytes;
1160 }
1161 
1162 #ifndef __UBOOT__
brcmnand_ctlrdy_irq(int irq,void * data)1163 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
1164 {
1165 	struct brcmnand_controller *ctrl = data;
1166 
1167 	/* Discard all NAND_CTLRDY interrupts during DMA */
1168 	if (ctrl->dma_pending)
1169 		return IRQ_HANDLED;
1170 
1171 	complete(&ctrl->done);
1172 	return IRQ_HANDLED;
1173 }
1174 
1175 /* Handle SoC-specific interrupt hardware */
brcmnand_irq(int irq,void * data)1176 static irqreturn_t brcmnand_irq(int irq, void *data)
1177 {
1178 	struct brcmnand_controller *ctrl = data;
1179 
1180 	if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1181 		return brcmnand_ctlrdy_irq(irq, data);
1182 
1183 	return IRQ_NONE;
1184 }
1185 
brcmnand_dma_irq(int irq,void * data)1186 static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1187 {
1188 	struct brcmnand_controller *ctrl = data;
1189 
1190 	complete(&ctrl->dma_done);
1191 
1192 	return IRQ_HANDLED;
1193 }
1194 #endif /* __UBOOT__ */
1195 
brcmnand_send_cmd(struct brcmnand_host * host,int cmd)1196 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1197 {
1198 	struct brcmnand_controller *ctrl = host->ctrl;
1199 	int ret;
1200 
1201 	dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
1202 		brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
1203 	BUG_ON(ctrl->cmd_pending != 0);
1204 	ctrl->cmd_pending = cmd;
1205 
1206 	ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1207 	WARN_ON(ret);
1208 
1209 	mb(); /* flush previous writes */
1210 	brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1211 			   cmd << brcmnand_cmd_shift(ctrl));
1212 }
1213 
1214 /***********************************************************************
1215  * NAND MTD API: read/program/erase
1216  ***********************************************************************/
1217 
brcmnand_cmd_ctrl(struct mtd_info * mtd,int dat,unsigned int ctrl)1218 static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
1219 	unsigned int ctrl)
1220 {
1221 	/* intentionally left blank */
1222 }
1223 
brcmnand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1224 static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1225 {
1226 	struct nand_chip *chip = mtd_to_nand(mtd);
1227 	struct brcmnand_host *host = nand_get_controller_data(chip);
1228 	struct brcmnand_controller *ctrl = host->ctrl;
1229 
1230 #ifndef __UBOOT__
1231 	unsigned long timeo = msecs_to_jiffies(100);
1232 
1233 	dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1234 	if (ctrl->cmd_pending &&
1235 			wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
1236 		u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1237 					>> brcmnand_cmd_shift(ctrl);
1238 
1239 		dev_err_ratelimited(ctrl->dev,
1240 			"timeout waiting for command %#02x\n", cmd);
1241 		dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1242 			brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1243 	}
1244 #else
1245 	unsigned long timeo = 100; /* 100 msec */
1246 	int ret;
1247 
1248 	dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1249 
1250 	ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, timeo);
1251 	WARN_ON(ret);
1252 #endif /* __UBOOT__ */
1253 
1254 	ctrl->cmd_pending = 0;
1255 	return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1256 				 INTFC_FLASH_STATUS;
1257 }
1258 
1259 enum {
1260 	LLOP_RE				= BIT(16),
1261 	LLOP_WE				= BIT(17),
1262 	LLOP_ALE			= BIT(18),
1263 	LLOP_CLE			= BIT(19),
1264 	LLOP_RETURN_IDLE		= BIT(31),
1265 
1266 	LLOP_DATA_MASK			= GENMASK(15, 0),
1267 };
1268 
brcmnand_low_level_op(struct brcmnand_host * host,enum brcmnand_llop_type type,u32 data,bool last_op)1269 static int brcmnand_low_level_op(struct brcmnand_host *host,
1270 				 enum brcmnand_llop_type type, u32 data,
1271 				 bool last_op)
1272 {
1273 	struct mtd_info *mtd = nand_to_mtd(&host->chip);
1274 	struct nand_chip *chip = &host->chip;
1275 	struct brcmnand_controller *ctrl = host->ctrl;
1276 	u32 tmp;
1277 
1278 	tmp = data & LLOP_DATA_MASK;
1279 	switch (type) {
1280 	case LL_OP_CMD:
1281 		tmp |= LLOP_WE | LLOP_CLE;
1282 		break;
1283 	case LL_OP_ADDR:
1284 		/* WE | ALE */
1285 		tmp |= LLOP_WE | LLOP_ALE;
1286 		break;
1287 	case LL_OP_WR:
1288 		/* WE */
1289 		tmp |= LLOP_WE;
1290 		break;
1291 	case LL_OP_RD:
1292 		/* RE */
1293 		tmp |= LLOP_RE;
1294 		break;
1295 	}
1296 	if (last_op)
1297 		/* RETURN_IDLE */
1298 		tmp |= LLOP_RETURN_IDLE;
1299 
1300 	dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1301 
1302 	brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1303 	(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1304 
1305 	brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
1306 	return brcmnand_waitfunc(mtd, chip);
1307 }
1308 
brcmnand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)1309 static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
1310 			     int column, int page_addr)
1311 {
1312 	struct nand_chip *chip = mtd_to_nand(mtd);
1313 	struct brcmnand_host *host = nand_get_controller_data(chip);
1314 	struct brcmnand_controller *ctrl = host->ctrl;
1315 	u64 addr = (u64)page_addr << chip->page_shift;
1316 	int native_cmd = 0;
1317 
1318 	if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
1319 			command == NAND_CMD_RNDOUT)
1320 		addr = (u64)column;
1321 	/* Avoid propagating a negative, don't-care address */
1322 	else if (page_addr < 0)
1323 		addr = 0;
1324 
1325 	dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1326 		(unsigned long long)addr);
1327 
1328 	host->last_cmd = command;
1329 	host->last_byte = 0;
1330 	host->last_addr = addr;
1331 
1332 	switch (command) {
1333 	case NAND_CMD_RESET:
1334 		native_cmd = CMD_FLASH_RESET;
1335 		break;
1336 	case NAND_CMD_STATUS:
1337 		native_cmd = CMD_STATUS_READ;
1338 		break;
1339 	case NAND_CMD_READID:
1340 		native_cmd = CMD_DEVICE_ID_READ;
1341 		break;
1342 	case NAND_CMD_READOOB:
1343 		native_cmd = CMD_SPARE_AREA_READ;
1344 		break;
1345 	case NAND_CMD_ERASE1:
1346 		native_cmd = CMD_BLOCK_ERASE;
1347 		brcmnand_wp(mtd, 0);
1348 		break;
1349 	case NAND_CMD_PARAM:
1350 		native_cmd = CMD_PARAMETER_READ;
1351 		break;
1352 	case NAND_CMD_SET_FEATURES:
1353 	case NAND_CMD_GET_FEATURES:
1354 		brcmnand_low_level_op(host, LL_OP_CMD, command, false);
1355 		brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
1356 		break;
1357 	case NAND_CMD_RNDOUT:
1358 		native_cmd = CMD_PARAMETER_CHANGE_COL;
1359 		addr &= ~((u64)(FC_BYTES - 1));
1360 		/*
1361 		 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1362 		 * NB: hwcfg.sector_size_1k may not be initialized yet
1363 		 */
1364 		if (brcmnand_get_sector_size_1k(host)) {
1365 			host->hwcfg.sector_size_1k =
1366 				brcmnand_get_sector_size_1k(host);
1367 			brcmnand_set_sector_size_1k(host, 0);
1368 		}
1369 		break;
1370 	}
1371 
1372 	if (!native_cmd)
1373 		return;
1374 
1375 	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1376 		(host->cs << 16) | ((addr >> 32) & 0xffff));
1377 	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1378 	brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
1379 	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1380 
1381 	brcmnand_send_cmd(host, native_cmd);
1382 	brcmnand_waitfunc(mtd, chip);
1383 
1384 	if (native_cmd == CMD_PARAMETER_READ ||
1385 			native_cmd == CMD_PARAMETER_CHANGE_COL) {
1386 		/* Copy flash cache word-wise */
1387 		u32 *flash_cache = (u32 *)ctrl->flash_cache;
1388 		int i;
1389 
1390 		brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1391 
1392 		/*
1393 		 * Must cache the FLASH_CACHE now, since changes in
1394 		 * SECTOR_SIZE_1K may invalidate it
1395 		 */
1396 		for (i = 0; i < FC_WORDS; i++) {
1397 			u32 fc;
1398 
1399 			fc = brcmnand_read_fc(ctrl, i);
1400 
1401 			/*
1402 			 * Flash cache is big endian for parameter pages, at
1403 			 * least on STB SoCs
1404 			 */
1405 			if (ctrl->parameter_page_big_endian)
1406 				flash_cache[i] = be32_to_cpu(fc);
1407 			else
1408 				flash_cache[i] = le32_to_cpu(fc);
1409 		}
1410 
1411 		brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1412 
1413 		/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1414 		if (host->hwcfg.sector_size_1k)
1415 			brcmnand_set_sector_size_1k(host,
1416 						    host->hwcfg.sector_size_1k);
1417 	}
1418 
1419 	/* Re-enable protection is necessary only after erase */
1420 	if (command == NAND_CMD_ERASE1)
1421 		brcmnand_wp(mtd, 1);
1422 }
1423 
brcmnand_read_byte(struct mtd_info * mtd)1424 static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
1425 {
1426 	struct nand_chip *chip = mtd_to_nand(mtd);
1427 	struct brcmnand_host *host = nand_get_controller_data(chip);
1428 	struct brcmnand_controller *ctrl = host->ctrl;
1429 	uint8_t ret = 0;
1430 	int addr, offs;
1431 
1432 	switch (host->last_cmd) {
1433 	case NAND_CMD_READID:
1434 		if (host->last_byte < 4)
1435 			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1436 				(24 - (host->last_byte << 3));
1437 		else if (host->last_byte < 8)
1438 			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1439 				(56 - (host->last_byte << 3));
1440 		break;
1441 
1442 	case NAND_CMD_READOOB:
1443 		ret = oob_reg_read(ctrl, host->last_byte);
1444 		break;
1445 
1446 	case NAND_CMD_STATUS:
1447 		ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1448 					INTFC_FLASH_STATUS;
1449 		if (wp_on) /* hide WP status */
1450 			ret |= NAND_STATUS_WP;
1451 		break;
1452 
1453 	case NAND_CMD_PARAM:
1454 	case NAND_CMD_RNDOUT:
1455 		addr = host->last_addr + host->last_byte;
1456 		offs = addr & (FC_BYTES - 1);
1457 
1458 		/* At FC_BYTES boundary, switch to next column */
1459 		if (host->last_byte > 0 && offs == 0)
1460 			nand_change_read_column_op(chip, addr, NULL, 0, false);
1461 
1462 		ret = ctrl->flash_cache[offs];
1463 		break;
1464 	case NAND_CMD_GET_FEATURES:
1465 		if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
1466 			ret = 0;
1467 		} else {
1468 			bool last = host->last_byte ==
1469 				ONFI_SUBFEATURE_PARAM_LEN - 1;
1470 			brcmnand_low_level_op(host, LL_OP_RD, 0, last);
1471 			ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1472 		}
1473 	}
1474 
1475 	dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1476 	host->last_byte++;
1477 
1478 	return ret;
1479 }
1480 
brcmnand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1481 static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1482 {
1483 	int i;
1484 
1485 	for (i = 0; i < len; i++, buf++)
1486 		*buf = brcmnand_read_byte(mtd);
1487 }
1488 
brcmnand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1489 static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1490 				   int len)
1491 {
1492 	int i;
1493 	struct nand_chip *chip = mtd_to_nand(mtd);
1494 	struct brcmnand_host *host = nand_get_controller_data(chip);
1495 
1496 	switch (host->last_cmd) {
1497 	case NAND_CMD_SET_FEATURES:
1498 		for (i = 0; i < len; i++)
1499 			brcmnand_low_level_op(host, LL_OP_WR, buf[i],
1500 						  (i + 1) == len);
1501 		break;
1502 	default:
1503 		BUG();
1504 		break;
1505 	}
1506 }
1507 
1508 /**
1509  * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1510  * following ahead of time:
1511  *  - Is this descriptor the beginning or end of a linked list?
1512  *  - What is the (DMA) address of the next descriptor in the linked list?
1513  */
1514 #ifndef __UBOOT__
brcmnand_fill_dma_desc(struct brcmnand_host * host,struct brcm_nand_dma_desc * desc,u64 addr,dma_addr_t buf,u32 len,u8 dma_cmd,bool begin,bool end,dma_addr_t next_desc)1515 static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
1516 				  struct brcm_nand_dma_desc *desc, u64 addr,
1517 				  dma_addr_t buf, u32 len, u8 dma_cmd,
1518 				  bool begin, bool end,
1519 				  dma_addr_t next_desc)
1520 {
1521 	memset(desc, 0, sizeof(*desc));
1522 	/* Descriptors are written in native byte order (wordwise) */
1523 	desc->next_desc = lower_32_bits(next_desc);
1524 	desc->next_desc_ext = upper_32_bits(next_desc);
1525 	desc->cmd_irq = (dma_cmd << 24) |
1526 		(end ? (0x03 << 8) : 0) | /* IRQ | STOP */
1527 		(!!begin) | ((!!end) << 1); /* head, tail */
1528 #ifdef CONFIG_CPU_BIG_ENDIAN
1529 	desc->cmd_irq |= 0x01 << 12;
1530 #endif
1531 	desc->dram_addr = lower_32_bits(buf);
1532 	desc->dram_addr_ext = upper_32_bits(buf);
1533 	desc->tfr_len = len;
1534 	desc->total_len = len;
1535 	desc->flash_addr = lower_32_bits(addr);
1536 	desc->flash_addr_ext = upper_32_bits(addr);
1537 	desc->cs = host->cs;
1538 	desc->status_valid = 0x01;
1539 	return 0;
1540 }
1541 
1542 /**
1543  * Kick the FLASH_DMA engine, with a given DMA descriptor
1544  */
brcmnand_dma_run(struct brcmnand_host * host,dma_addr_t desc)1545 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
1546 {
1547 	struct brcmnand_controller *ctrl = host->ctrl;
1548 	unsigned long timeo = msecs_to_jiffies(100);
1549 
1550 	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
1551 	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
1552 	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
1553 	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
1554 
1555 	/* Start FLASH_DMA engine */
1556 	ctrl->dma_pending = true;
1557 	mb(); /* flush previous writes */
1558 	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
1559 
1560 	if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
1561 		dev_err(ctrl->dev,
1562 				"timeout waiting for DMA; status %#x, error status %#x\n",
1563 				flash_dma_readl(ctrl, FLASH_DMA_STATUS),
1564 				flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
1565 	}
1566 	ctrl->dma_pending = false;
1567 	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
1568 }
1569 
brcmnand_dma_trans(struct brcmnand_host * host,u64 addr,u32 * buf,u32 len,u8 dma_cmd)1570 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
1571 			      u32 len, u8 dma_cmd)
1572 {
1573 	struct brcmnand_controller *ctrl = host->ctrl;
1574 	dma_addr_t buf_pa;
1575 	int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1576 
1577 	buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
1578 	if (dma_mapping_error(ctrl->dev, buf_pa)) {
1579 		dev_err(ctrl->dev, "unable to map buffer for DMA\n");
1580 		return -ENOMEM;
1581 	}
1582 
1583 	brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
1584 				   dma_cmd, true, true, 0);
1585 
1586 	brcmnand_dma_run(host, ctrl->dma_pa);
1587 
1588 	dma_unmap_single(ctrl->dev, buf_pa, len, dir);
1589 
1590 	if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
1591 		return -EBADMSG;
1592 	else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
1593 		return -EUCLEAN;
1594 
1595 	return 0;
1596 }
1597 #endif /* __UBOOT__ */
1598 
1599 /*
1600  * Assumes proper CS is already set
1601  */
brcmnand_read_by_pio(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,unsigned int trans,u32 * buf,u8 * oob,u64 * err_addr)1602 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1603 				u64 addr, unsigned int trans, u32 *buf,
1604 				u8 *oob, u64 *err_addr)
1605 {
1606 	struct brcmnand_host *host = nand_get_controller_data(chip);
1607 	struct brcmnand_controller *ctrl = host->ctrl;
1608 	int i, j, ret = 0;
1609 
1610 	/* Clear error addresses */
1611 	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
1612 	brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
1613 	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
1614 	brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
1615 
1616 	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1617 			(host->cs << 16) | ((addr >> 32) & 0xffff));
1618 	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1619 
1620 	for (i = 0; i < trans; i++, addr += FC_BYTES) {
1621 		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1622 				   lower_32_bits(addr));
1623 		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1624 		/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1625 		brcmnand_send_cmd(host, CMD_PAGE_READ);
1626 		brcmnand_waitfunc(mtd, chip);
1627 
1628 		if (likely(buf)) {
1629 			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1630 
1631 			for (j = 0; j < FC_WORDS; j++, buf++)
1632 				*buf = brcmnand_read_fc(ctrl, j);
1633 
1634 			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1635 		}
1636 
1637 		if (oob)
1638 			oob += read_oob_from_regs(ctrl, i, oob,
1639 					mtd->oobsize / trans,
1640 					host->hwcfg.sector_size_1k);
1641 
1642 		if (!ret) {
1643 			*err_addr = brcmnand_read_reg(ctrl,
1644 					BRCMNAND_UNCORR_ADDR) |
1645 				((u64)(brcmnand_read_reg(ctrl,
1646 						BRCMNAND_UNCORR_EXT_ADDR)
1647 					& 0xffff) << 32);
1648 			if (*err_addr)
1649 				ret = -EBADMSG;
1650 		}
1651 
1652 		if (!ret) {
1653 			*err_addr = brcmnand_read_reg(ctrl,
1654 					BRCMNAND_CORR_ADDR) |
1655 				((u64)(brcmnand_read_reg(ctrl,
1656 						BRCMNAND_CORR_EXT_ADDR)
1657 					& 0xffff) << 32);
1658 			if (*err_addr)
1659 				ret = -EUCLEAN;
1660 		}
1661 	}
1662 
1663 	return ret;
1664 }
1665 
1666 /*
1667  * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
1668  * error
1669  *
1670  * Because the HW ECC signals an ECC error if an erase paged has even a single
1671  * bitflip, we must check each ECC error to see if it is actually an erased
1672  * page with bitflips, not a truly corrupted page.
1673  *
1674  * On a real error, return a negative error code (-EBADMSG for ECC error), and
1675  * buf will contain raw data.
1676  * Otherwise, buf gets filled with 0xffs and return the maximum number of
1677  * bitflips-per-ECC-sector to the caller.
1678  *
1679  */
brcmstb_nand_verify_erased_page(struct mtd_info * mtd,struct nand_chip * chip,void * buf,u64 addr)1680 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
1681 		  struct nand_chip *chip, void *buf, u64 addr)
1682 {
1683 	int i, sas;
1684 	void *oob = chip->oob_poi;
1685 	int bitflips = 0;
1686 	int page = addr >> chip->page_shift;
1687 	int ret;
1688 
1689 	if (!buf) {
1690 #ifndef __UBOOT__
1691 		buf = chip->data_buf;
1692 #else
1693 		buf = chip->buffers->databuf;
1694 #endif
1695 		/* Invalidate page cache */
1696 		chip->pagebuf = -1;
1697 	}
1698 
1699 	sas = mtd->oobsize / chip->ecc.steps;
1700 
1701 	/* read without ecc for verification */
1702 	ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
1703 	if (ret)
1704 		return ret;
1705 
1706 	for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
1707 		ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
1708 						  oob, sas, NULL, 0,
1709 						  chip->ecc.strength);
1710 		if (ret < 0)
1711 			return ret;
1712 
1713 		bitflips = max(bitflips, ret);
1714 	}
1715 
1716 	return bitflips;
1717 }
1718 
brcmnand_read(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,unsigned int trans,u32 * buf,u8 * oob)1719 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
1720 			 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
1721 {
1722 	struct brcmnand_host *host = nand_get_controller_data(chip);
1723 	struct brcmnand_controller *ctrl = host->ctrl;
1724 	u64 err_addr = 0;
1725 	int err;
1726 	bool retry = true;
1727 
1728 	dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
1729 
1730 try_dmaread:
1731 	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
1732 
1733 #ifndef __UBOOT__
1734 	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1735 		err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
1736 					     CMD_PAGE_READ);
1737 		if (err) {
1738 			if (mtd_is_bitflip_or_eccerr(err))
1739 				err_addr = addr;
1740 			else
1741 				return -EIO;
1742 		}
1743 	} else {
1744 		if (oob)
1745 			memset(oob, 0x99, mtd->oobsize);
1746 
1747 		err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
1748 					       oob, &err_addr);
1749 	}
1750 #else
1751 	if (oob)
1752 		memset(oob, 0x99, mtd->oobsize);
1753 
1754 	err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
1755 							   oob, &err_addr);
1756 #endif /* __UBOOT__ */
1757 
1758 	if (mtd_is_eccerr(err)) {
1759 		/*
1760 		 * On controller version and 7.0, 7.1 , DMA read after a
1761 		 * prior PIO read that reported uncorrectable error,
1762 		 * the DMA engine captures this error following DMA read
1763 		 * cleared only on subsequent DMA read, so just retry once
1764 		 * to clear a possible false error reported for current DMA
1765 		 * read
1766 		 */
1767 		if ((ctrl->nand_version == 0x0700) ||
1768 		    (ctrl->nand_version == 0x0701)) {
1769 			if (retry) {
1770 				retry = false;
1771 				goto try_dmaread;
1772 			}
1773 		}
1774 
1775 		/*
1776 		 * Controller version 7.2 has hw encoder to detect erased page
1777 		 * bitflips, apply sw verification for older controllers only
1778 		 */
1779 		if (ctrl->nand_version < 0x0702) {
1780 			err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
1781 							      addr);
1782 			/* erased page bitflips corrected */
1783 			if (err >= 0)
1784 				return err;
1785 		}
1786 
1787 		dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
1788 			(unsigned long long)err_addr);
1789 		mtd->ecc_stats.failed++;
1790 		/* NAND layer expects zero on ECC errors */
1791 		return 0;
1792 	}
1793 
1794 	if (mtd_is_bitflip(err)) {
1795 		unsigned int corrected = brcmnand_count_corrected(ctrl);
1796 
1797 		dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
1798 			(unsigned long long)err_addr);
1799 		mtd->ecc_stats.corrected += corrected;
1800 		/* Always exceed the software-imposed threshold */
1801 		return max(mtd->bitflip_threshold, corrected);
1802 	}
1803 
1804 	return 0;
1805 }
1806 
brcmnand_read_page(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1807 static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1808 			      uint8_t *buf, int oob_required, int page)
1809 {
1810 	struct brcmnand_host *host = nand_get_controller_data(chip);
1811 	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1812 
1813 	nand_read_page_op(chip, page, 0, NULL, 0);
1814 
1815 	return brcmnand_read(mtd, chip, host->last_addr,
1816 			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1817 }
1818 
brcmnand_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1819 static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1820 				  uint8_t *buf, int oob_required, int page)
1821 {
1822 	struct brcmnand_host *host = nand_get_controller_data(chip);
1823 	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1824 	int ret;
1825 
1826 	nand_read_page_op(chip, page, 0, NULL, 0);
1827 
1828 	brcmnand_set_ecc_enabled(host, 0);
1829 	ret = brcmnand_read(mtd, chip, host->last_addr,
1830 			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1831 	brcmnand_set_ecc_enabled(host, 1);
1832 	return ret;
1833 }
1834 
brcmnand_read_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)1835 static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1836 			     int page)
1837 {
1838 	return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1839 			mtd->writesize >> FC_SHIFT,
1840 			NULL, (u8 *)chip->oob_poi);
1841 }
1842 
brcmnand_read_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1843 static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1844 				 int page)
1845 {
1846 	struct brcmnand_host *host = nand_get_controller_data(chip);
1847 
1848 	brcmnand_set_ecc_enabled(host, 0);
1849 	brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1850 		mtd->writesize >> FC_SHIFT,
1851 		NULL, (u8 *)chip->oob_poi);
1852 	brcmnand_set_ecc_enabled(host, 1);
1853 	return 0;
1854 }
1855 
brcmnand_write(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,const u32 * buf,u8 * oob)1856 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
1857 			  u64 addr, const u32 *buf, u8 *oob)
1858 {
1859 	struct brcmnand_host *host = nand_get_controller_data(chip);
1860 	struct brcmnand_controller *ctrl = host->ctrl;
1861 	unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
1862 	int status, ret = 0;
1863 
1864 	dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
1865 
1866 	if (unlikely((unsigned long)buf & 0x03)) {
1867 		dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
1868 		buf = (u32 *)((unsigned long)buf & ~0x03);
1869 	}
1870 
1871 	brcmnand_wp(mtd, 0);
1872 
1873 	for (i = 0; i < ctrl->max_oob; i += 4)
1874 		oob_reg_write(ctrl, i, 0xffffffff);
1875 
1876 #ifndef __UBOOT__
1877 	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1878 		if (brcmnand_dma_trans(host, addr, (u32 *)buf,
1879 					mtd->writesize, CMD_PROGRAM_PAGE))
1880 			ret = -EIO;
1881 		goto out;
1882 	}
1883 #endif /* __UBOOT__ */
1884 
1885 	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1886 			(host->cs << 16) | ((addr >> 32) & 0xffff));
1887 	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1888 
1889 	for (i = 0; i < trans; i++, addr += FC_BYTES) {
1890 		/* full address MUST be set before populating FC */
1891 		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1892 				   lower_32_bits(addr));
1893 		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1894 
1895 		if (buf) {
1896 			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1897 
1898 			for (j = 0; j < FC_WORDS; j++, buf++)
1899 				brcmnand_write_fc(ctrl, j, *buf);
1900 
1901 			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1902 		} else if (oob) {
1903 			for (j = 0; j < FC_WORDS; j++)
1904 				brcmnand_write_fc(ctrl, j, 0xffffffff);
1905 		}
1906 
1907 		if (oob) {
1908 			oob += write_oob_to_regs(ctrl, i, oob,
1909 					mtd->oobsize / trans,
1910 					host->hwcfg.sector_size_1k);
1911 		}
1912 
1913 		/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1914 		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
1915 		status = brcmnand_waitfunc(mtd, chip);
1916 
1917 		if (status & NAND_STATUS_FAIL) {
1918 			dev_info(ctrl->dev, "program failed at %llx\n",
1919 				(unsigned long long)addr);
1920 			ret = -EIO;
1921 			goto out;
1922 		}
1923 	}
1924 out:
1925 	brcmnand_wp(mtd, 1);
1926 	return ret;
1927 }
1928 
brcmnand_write_page(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1929 static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1930 			       const uint8_t *buf, int oob_required, int page)
1931 {
1932 	struct brcmnand_host *host = nand_get_controller_data(chip);
1933 	void *oob = oob_required ? chip->oob_poi : NULL;
1934 
1935 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1936 	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1937 
1938 	return nand_prog_page_end_op(chip);
1939 }
1940 
brcmnand_write_page_raw(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1941 static int brcmnand_write_page_raw(struct mtd_info *mtd,
1942 				   struct nand_chip *chip, const uint8_t *buf,
1943 				   int oob_required, int page)
1944 {
1945 	struct brcmnand_host *host = nand_get_controller_data(chip);
1946 	void *oob = oob_required ? chip->oob_poi : NULL;
1947 
1948 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1949 	brcmnand_set_ecc_enabled(host, 0);
1950 	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1951 	brcmnand_set_ecc_enabled(host, 1);
1952 
1953 	return nand_prog_page_end_op(chip);
1954 }
1955 
brcmnand_write_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)1956 static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1957 				  int page)
1958 {
1959 	return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
1960 				  NULL, chip->oob_poi);
1961 }
1962 
brcmnand_write_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1963 static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1964 				  int page)
1965 {
1966 	struct brcmnand_host *host = nand_get_controller_data(chip);
1967 	int ret;
1968 
1969 	brcmnand_set_ecc_enabled(host, 0);
1970 	ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
1971 				 (u8 *)chip->oob_poi);
1972 	brcmnand_set_ecc_enabled(host, 1);
1973 
1974 	return ret;
1975 }
1976 
1977 /***********************************************************************
1978  * Per-CS setup (1 NAND device)
1979  ***********************************************************************/
1980 
brcmnand_set_cfg(struct brcmnand_host * host,struct brcmnand_cfg * cfg)1981 static int brcmnand_set_cfg(struct brcmnand_host *host,
1982 			    struct brcmnand_cfg *cfg)
1983 {
1984 	struct brcmnand_controller *ctrl = host->ctrl;
1985 	struct nand_chip *chip = &host->chip;
1986 	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
1987 	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
1988 			BRCMNAND_CS_CFG_EXT);
1989 	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1990 			BRCMNAND_CS_ACC_CONTROL);
1991 	u8 block_size = 0, page_size = 0, device_size = 0;
1992 	u32 tmp;
1993 
1994 	if (ctrl->block_sizes) {
1995 		int i, found;
1996 
1997 		for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
1998 			if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
1999 				block_size = i;
2000 				found = 1;
2001 			}
2002 		if (!found) {
2003 			dev_warn(ctrl->dev, "invalid block size %u\n",
2004 					cfg->block_size);
2005 			return -EINVAL;
2006 		}
2007 	} else {
2008 		block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
2009 	}
2010 
2011 	if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2012 				cfg->block_size > ctrl->max_block_size)) {
2013 		dev_warn(ctrl->dev, "invalid block size %u\n",
2014 				cfg->block_size);
2015 		block_size = 0;
2016 	}
2017 
2018 	if (ctrl->page_sizes) {
2019 		int i, found;
2020 
2021 		for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2022 			if (ctrl->page_sizes[i] == cfg->page_size) {
2023 				page_size = i;
2024 				found = 1;
2025 			}
2026 		if (!found) {
2027 			dev_warn(ctrl->dev, "invalid page size %u\n",
2028 					cfg->page_size);
2029 			return -EINVAL;
2030 		}
2031 	} else {
2032 		page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
2033 	}
2034 
2035 	if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2036 				cfg->page_size > ctrl->max_page_size)) {
2037 		dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2038 		return -EINVAL;
2039 	}
2040 
2041 	if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
2042 		dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2043 			(unsigned long long)cfg->device_size);
2044 		return -EINVAL;
2045 	}
2046 	device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
2047 
2048 	tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
2049 		(cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
2050 		(cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
2051 		(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
2052 		(device_size << CFG_DEVICE_SIZE_SHIFT);
2053 	if (cfg_offs == cfg_ext_offs) {
2054 		tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
2055 		       (block_size << CFG_BLK_SIZE_SHIFT);
2056 		nand_writereg(ctrl, cfg_offs, tmp);
2057 	} else {
2058 		nand_writereg(ctrl, cfg_offs, tmp);
2059 		tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
2060 		      (block_size << CFG_EXT_BLK_SIZE_SHIFT);
2061 		nand_writereg(ctrl, cfg_ext_offs, tmp);
2062 	}
2063 
2064 	tmp = nand_readreg(ctrl, acc_control_offs);
2065 	tmp &= ~brcmnand_ecc_level_mask(ctrl);
2066 	tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
2067 	tmp &= ~brcmnand_spare_area_mask(ctrl);
2068 	tmp |= cfg->spare_area_size;
2069 	nand_writereg(ctrl, acc_control_offs, tmp);
2070 
2071 	brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
2072 
2073 	/* threshold = ceil(BCH-level * 0.75) */
2074 	brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
2075 
2076 	return 0;
2077 }
2078 
brcmnand_print_cfg(struct brcmnand_host * host,char * buf,struct brcmnand_cfg * cfg)2079 static void brcmnand_print_cfg(struct brcmnand_host *host,
2080 			       char *buf, struct brcmnand_cfg *cfg)
2081 {
2082 	buf += sprintf(buf,
2083 		"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2084 		(unsigned long long)cfg->device_size >> 20,
2085 		cfg->block_size >> 10,
2086 		cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
2087 		cfg->page_size >= 1024 ? "KiB" : "B",
2088 		cfg->spare_area_size, cfg->device_width);
2089 
2090 	/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
2091 	if (is_hamming_ecc(host->ctrl, cfg))
2092 		sprintf(buf, ", Hamming ECC");
2093 	else if (cfg->sector_size_1k)
2094 		sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
2095 	else
2096 		sprintf(buf, ", BCH-%u", cfg->ecc_level);
2097 }
2098 
2099 /*
2100  * Minimum number of bytes to address a page. Calculated as:
2101  *     roundup(log2(size / page-size) / 8)
2102  *
2103  * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2104  *     OK because many other things will break if 'size' is irregular...
2105  */
get_blk_adr_bytes(u64 size,u32 writesize)2106 static inline int get_blk_adr_bytes(u64 size, u32 writesize)
2107 {
2108 	return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
2109 }
2110 
brcmnand_setup_dev(struct brcmnand_host * host)2111 static int brcmnand_setup_dev(struct brcmnand_host *host)
2112 {
2113 	struct mtd_info *mtd = nand_to_mtd(&host->chip);
2114 	struct nand_chip *chip = &host->chip;
2115 	struct brcmnand_controller *ctrl = host->ctrl;
2116 	struct brcmnand_cfg *cfg = &host->hwcfg;
2117 	char msg[128];
2118 	u32 offs, tmp, oob_sector;
2119 	int ret;
2120 
2121 	memset(cfg, 0, sizeof(*cfg));
2122 
2123 #ifndef __UBOOT__
2124 	ret = of_property_read_u32(nand_get_flash_node(chip),
2125 				   "brcm,nand-oob-sector-size",
2126 				   &oob_sector);
2127 #else
2128 	ret = ofnode_read_u32(nand_get_flash_node(chip),
2129 			      "brcm,nand-oob-sector-size",
2130 			      &oob_sector);
2131 #endif /* __UBOOT__ */
2132 	if (ret) {
2133 		/* Use detected size */
2134 		cfg->spare_area_size = mtd->oobsize /
2135 					(mtd->writesize >> FC_SHIFT);
2136 	} else {
2137 		cfg->spare_area_size = oob_sector;
2138 	}
2139 	if (cfg->spare_area_size > ctrl->max_oob)
2140 		cfg->spare_area_size = ctrl->max_oob;
2141 	/*
2142 	 * Set oobsize to be consistent with controller's spare_area_size, as
2143 	 * the rest is inaccessible.
2144 	 */
2145 	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
2146 
2147 	cfg->device_size = mtd->size;
2148 	cfg->block_size = mtd->erasesize;
2149 	cfg->page_size = mtd->writesize;
2150 	cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
2151 	cfg->col_adr_bytes = 2;
2152 	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
2153 
2154 	if (chip->ecc.mode != NAND_ECC_HW) {
2155 		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2156 			chip->ecc.mode);
2157 		return -EINVAL;
2158 	}
2159 
2160 	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
2161 		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
2162 			/* Default to Hamming for 1-bit ECC, if unspecified */
2163 			chip->ecc.algo = NAND_ECC_HAMMING;
2164 		else
2165 			/* Otherwise, BCH */
2166 			chip->ecc.algo = NAND_ECC_BCH;
2167 	}
2168 
2169 	if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
2170 						   chip->ecc.size != 512)) {
2171 		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2172 			chip->ecc.strength, chip->ecc.size);
2173 		return -EINVAL;
2174 	}
2175 
2176 	switch (chip->ecc.size) {
2177 	case 512:
2178 		if (chip->ecc.algo == NAND_ECC_HAMMING)
2179 			cfg->ecc_level = 15;
2180 		else
2181 			cfg->ecc_level = chip->ecc.strength;
2182 		cfg->sector_size_1k = 0;
2183 		break;
2184 	case 1024:
2185 		if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2186 			dev_err(ctrl->dev, "1KB sectors not supported\n");
2187 			return -EINVAL;
2188 		}
2189 		if (chip->ecc.strength & 0x1) {
2190 			dev_err(ctrl->dev,
2191 				"odd ECC not supported with 1KB sectors\n");
2192 			return -EINVAL;
2193 		}
2194 
2195 		cfg->ecc_level = chip->ecc.strength >> 1;
2196 		cfg->sector_size_1k = 1;
2197 		break;
2198 	default:
2199 		dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2200 			chip->ecc.size);
2201 		return -EINVAL;
2202 	}
2203 
2204 	cfg->ful_adr_bytes = cfg->blk_adr_bytes;
2205 	if (mtd->writesize > 512)
2206 		cfg->ful_adr_bytes += cfg->col_adr_bytes;
2207 	else
2208 		cfg->ful_adr_bytes += 1;
2209 
2210 	ret = brcmnand_set_cfg(host, cfg);
2211 	if (ret)
2212 		return ret;
2213 
2214 	brcmnand_set_ecc_enabled(host, 1);
2215 
2216 	brcmnand_print_cfg(host, msg, cfg);
2217 	dev_info(ctrl->dev, "detected %s\n", msg);
2218 
2219 	/* Configure ACC_CONTROL */
2220 	offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2221 	tmp = nand_readreg(ctrl, offs);
2222 	tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
2223 	tmp &= ~ACC_CONTROL_RD_ERASED;
2224 
2225 	/* We need to turn on Read from erased paged protected by ECC */
2226 	if (ctrl->nand_version >= 0x0702)
2227 		tmp |= ACC_CONTROL_RD_ERASED;
2228 	tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
2229 	if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2230 		tmp &= ~ACC_CONTROL_PREFETCH;
2231 
2232 	nand_writereg(ctrl, offs, tmp);
2233 
2234 	return 0;
2235 }
2236 
2237 #ifndef __UBOOT__
brcmnand_init_cs(struct brcmnand_host * host,struct device_node * dn)2238 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
2239 #else
2240 static int brcmnand_init_cs(struct brcmnand_host *host, ofnode dn)
2241 #endif
2242 {
2243 	struct brcmnand_controller *ctrl = host->ctrl;
2244 #ifndef __UBOOT__
2245 	struct platform_device *pdev = host->pdev;
2246 #else
2247 	struct udevice *pdev = host->pdev;
2248 #endif /* __UBOOT__ */
2249 	struct mtd_info *mtd;
2250 	struct nand_chip *chip;
2251 	int ret;
2252 	u16 cfg_offs;
2253 
2254 #ifndef __UBOOT__
2255 	ret = of_property_read_u32(dn, "reg", &host->cs);
2256 #else
2257 	ret = ofnode_read_s32(dn, "reg", &host->cs);
2258 #endif
2259 	if (ret) {
2260 		dev_err(&pdev->dev, "can't get chip-select\n");
2261 		return -ENXIO;
2262 	}
2263 
2264 	mtd = nand_to_mtd(&host->chip);
2265 	chip = &host->chip;
2266 
2267 	nand_set_flash_node(chip, dn);
2268 	nand_set_controller_data(chip, host);
2269 #ifndef __UBOOT__
2270 	mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
2271 				   host->cs);
2272 #else
2273 	mtd->name = devm_kasprintf(pdev, GFP_KERNEL, "brcmnand.%d",
2274 				   host->cs);
2275 #endif /* __UBOOT__ */
2276 	if (!mtd->name)
2277 		return -ENOMEM;
2278 
2279 	mtd->owner = THIS_MODULE;
2280 #ifndef __UBOOT__
2281 	mtd->dev.parent = &pdev->dev;
2282 #else
2283 	mtd->dev->parent = pdev;
2284 #endif /* __UBOOT__ */
2285 
2286 	chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
2287 	chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
2288 
2289 	chip->cmd_ctrl = brcmnand_cmd_ctrl;
2290 	chip->cmdfunc = brcmnand_cmdfunc;
2291 	chip->waitfunc = brcmnand_waitfunc;
2292 	chip->read_byte = brcmnand_read_byte;
2293 	chip->read_buf = brcmnand_read_buf;
2294 	chip->write_buf = brcmnand_write_buf;
2295 
2296 	chip->ecc.mode = NAND_ECC_HW;
2297 	chip->ecc.read_page = brcmnand_read_page;
2298 	chip->ecc.write_page = brcmnand_write_page;
2299 	chip->ecc.read_page_raw = brcmnand_read_page_raw;
2300 	chip->ecc.write_page_raw = brcmnand_write_page_raw;
2301 	chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
2302 	chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
2303 	chip->ecc.read_oob = brcmnand_read_oob;
2304 	chip->ecc.write_oob = brcmnand_write_oob;
2305 
2306 	chip->controller = &ctrl->controller;
2307 
2308 	/*
2309 	 * The bootloader might have configured 16bit mode but
2310 	 * NAND READID command only works in 8bit mode. We force
2311 	 * 8bit mode here to ensure that NAND READID commands works.
2312 	 */
2313 	cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2314 	nand_writereg(ctrl, cfg_offs,
2315 		      nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2316 
2317 	ret = nand_scan_ident(mtd, 1, NULL);
2318 	if (ret)
2319 		return ret;
2320 
2321 	chip->options |= NAND_NO_SUBPAGE_WRITE;
2322 	/*
2323 	 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2324 	 * to/from, and have nand_base pass us a bounce buffer instead, as
2325 	 * needed.
2326 	 */
2327 	chip->options |= NAND_USE_BOUNCE_BUFFER;
2328 
2329 	if (chip->bbt_options & NAND_BBT_USE_FLASH)
2330 		chip->bbt_options |= NAND_BBT_NO_OOB;
2331 
2332 	if (brcmnand_setup_dev(host))
2333 		return -ENXIO;
2334 
2335 	chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
2336 	/* only use our internal HW threshold */
2337 	mtd->bitflip_threshold = 1;
2338 
2339 	chip->ecc.layout = brcmstb_choose_ecc_layout(host);
2340 	if (!chip->ecc.layout)
2341 		return -ENXIO;
2342 
2343 	ret = nand_scan_tail(mtd);
2344 	if (ret)
2345 		return ret;
2346 
2347 #ifndef __UBOOT__
2348 	ret = mtd_device_register(mtd, NULL, 0);
2349 	if (ret)
2350 		nand_cleanup(chip);
2351 #else
2352 	ret = nand_register(0, mtd);
2353 #endif /* __UBOOT__ */
2354 
2355 	return ret;
2356 }
2357 
2358 #ifndef __UBOOT__
brcmnand_save_restore_cs_config(struct brcmnand_host * host,int restore)2359 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
2360 					    int restore)
2361 {
2362 	struct brcmnand_controller *ctrl = host->ctrl;
2363 	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2364 	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2365 			BRCMNAND_CS_CFG_EXT);
2366 	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2367 			BRCMNAND_CS_ACC_CONTROL);
2368 	u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2369 	u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2370 
2371 	if (restore) {
2372 		nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2373 		if (cfg_offs != cfg_ext_offs)
2374 			nand_writereg(ctrl, cfg_ext_offs,
2375 				      host->hwcfg.config_ext);
2376 		nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2377 		nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2378 		nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2379 	} else {
2380 		host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2381 		if (cfg_offs != cfg_ext_offs)
2382 			host->hwcfg.config_ext =
2383 				nand_readreg(ctrl, cfg_ext_offs);
2384 		host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2385 		host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2386 		host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2387 	}
2388 }
2389 
brcmnand_suspend(struct device * dev)2390 static int brcmnand_suspend(struct device *dev)
2391 {
2392 	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2393 	struct brcmnand_host *host;
2394 
2395 	list_for_each_entry(host, &ctrl->host_list, node)
2396 		brcmnand_save_restore_cs_config(host, 0);
2397 
2398 	ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2399 	ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2400 	ctrl->corr_stat_threshold =
2401 		brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2402 
2403 	if (has_flash_dma(ctrl))
2404 		ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2405 
2406 	return 0;
2407 }
2408 
brcmnand_resume(struct device * dev)2409 static int brcmnand_resume(struct device *dev)
2410 {
2411 	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2412 	struct brcmnand_host *host;
2413 
2414 	if (has_flash_dma(ctrl)) {
2415 		flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2416 		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2417 	}
2418 
2419 	brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2420 	brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2421 	brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2422 			ctrl->corr_stat_threshold);
2423 	if (ctrl->soc) {
2424 		/* Clear/re-enable interrupt */
2425 		ctrl->soc->ctlrdy_ack(ctrl->soc);
2426 		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2427 	}
2428 
2429 	list_for_each_entry(host, &ctrl->host_list, node) {
2430 		struct nand_chip *chip = &host->chip;
2431 
2432 		brcmnand_save_restore_cs_config(host, 1);
2433 
2434 		/* Reset the chip, required by some chips after power-up */
2435 		nand_reset_op(chip);
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 const struct dev_pm_ops brcmnand_pm_ops = {
2442 	.suspend		= brcmnand_suspend,
2443 	.resume			= brcmnand_resume,
2444 };
2445 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
2446 
2447 static const struct of_device_id brcmnand_of_match[] = {
2448 	{ .compatible = "brcm,brcmnand-v4.0" },
2449 	{ .compatible = "brcm,brcmnand-v5.0" },
2450 	{ .compatible = "brcm,brcmnand-v6.0" },
2451 	{ .compatible = "brcm,brcmnand-v6.1" },
2452 	{ .compatible = "brcm,brcmnand-v6.2" },
2453 	{ .compatible = "brcm,brcmnand-v7.0" },
2454 	{ .compatible = "brcm,brcmnand-v7.1" },
2455 	{ .compatible = "brcm,brcmnand-v7.2" },
2456 	{},
2457 };
2458 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
2459 #endif  /* __UBOOT__ */
2460 
2461 /***********************************************************************
2462  * Platform driver setup (per controller)
2463  ***********************************************************************/
2464 
2465 #ifndef __UBOOT__
brcmnand_probe(struct platform_device * pdev,struct brcmnand_soc * soc)2466 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
2467 #else
2468 int brcmnand_probe(struct udevice *dev, struct brcmnand_soc *soc)
2469 #endif /* __UBOOT__ */
2470 {
2471 #ifndef __UBOOT__
2472 	struct device *dev = &pdev->dev;
2473 	struct device_node *dn = dev->of_node, *child;
2474 #else
2475 	ofnode child;
2476 	struct udevice *pdev = dev;
2477 #endif /* __UBOOT__ */
2478 	struct brcmnand_controller *ctrl;
2479 #ifndef __UBOOT__
2480 	struct resource *res;
2481 #else
2482 	struct resource res;
2483 #endif /* __UBOOT__ */
2484 	int ret;
2485 
2486 #ifndef __UBOOT__
2487 	/* We only support device-tree instantiation */
2488 	if (!dn)
2489 		return -ENODEV;
2490 
2491 	if (!of_match_node(brcmnand_of_match, dn))
2492 		return -ENODEV;
2493 #endif /* __UBOOT__ */
2494 
2495 	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
2496 	if (!ctrl)
2497 		return -ENOMEM;
2498 
2499 #ifndef __UBOOT__
2500 	dev_set_drvdata(dev, ctrl);
2501 #else
2502 	/*
2503 	 * in u-boot, the data for the driver is allocated before probing
2504 	 * so to keep the reference to ctrl, we store it in the variable soc
2505 	 */
2506 	soc->ctrl = ctrl;
2507 #endif /* __UBOOT__ */
2508 	ctrl->dev = dev;
2509 
2510 	init_completion(&ctrl->done);
2511 	init_completion(&ctrl->dma_done);
2512 	nand_hw_control_init(&ctrl->controller);
2513 	INIT_LIST_HEAD(&ctrl->host_list);
2514 
2515 	/* Is parameter page in big endian ? */
2516 	ctrl->parameter_page_big_endian =
2517 	    dev_read_u32_default(dev, "parameter-page-big-endian", 1);
2518 
2519 	/* NAND register range */
2520 #ifndef __UBOOT__
2521 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2522 	ctrl->nand_base = devm_ioremap_resource(dev, res);
2523 #else
2524 	dev_read_resource(pdev, 0, &res);
2525 	ctrl->nand_base = devm_ioremap(pdev, res.start, resource_size(&res));
2526 #endif
2527 	if (IS_ERR(ctrl->nand_base))
2528 		return PTR_ERR(ctrl->nand_base);
2529 
2530 	/* Enable clock before using NAND registers */
2531 	ctrl->clk = devm_clk_get(dev, "nand");
2532 	if (!IS_ERR(ctrl->clk)) {
2533 		ret = clk_prepare_enable(ctrl->clk);
2534 		if (ret)
2535 			return ret;
2536 	} else {
2537 		ret = PTR_ERR(ctrl->clk);
2538 		if (ret == -EPROBE_DEFER)
2539 			return ret;
2540 
2541 		ctrl->clk = NULL;
2542 	}
2543 
2544 	/* Initialize NAND revision */
2545 	ret = brcmnand_revision_init(ctrl);
2546 	if (ret)
2547 		goto err;
2548 
2549 	/*
2550 	 * Most chips have this cache at a fixed offset within 'nand' block.
2551 	 * Some must specify this region separately.
2552 	 */
2553 #ifndef __UBOOT__
2554 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
2555 	if (res) {
2556 		ctrl->nand_fc = devm_ioremap_resource(dev, res);
2557 		if (IS_ERR(ctrl->nand_fc)) {
2558 			ret = PTR_ERR(ctrl->nand_fc);
2559 			goto err;
2560 		}
2561 	} else {
2562 		ctrl->nand_fc = ctrl->nand_base +
2563 				ctrl->reg_offsets[BRCMNAND_FC_BASE];
2564 	}
2565 #else
2566 	if (!dev_read_resource_byname(pdev, "nand-cache", &res)) {
2567 		ctrl->nand_fc = devm_ioremap(dev, res.start,
2568 					     resource_size(&res));
2569 		if (IS_ERR(ctrl->nand_fc)) {
2570 			ret = PTR_ERR(ctrl->nand_fc);
2571 			goto err;
2572 		}
2573 	} else {
2574 		ctrl->nand_fc = ctrl->nand_base +
2575 				ctrl->reg_offsets[BRCMNAND_FC_BASE];
2576 	}
2577 #endif
2578 
2579 #ifndef __UBOOT__
2580 	/* FLASH_DMA */
2581 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
2582 	if (res) {
2583 		ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
2584 		if (IS_ERR(ctrl->flash_dma_base)) {
2585 			ret = PTR_ERR(ctrl->flash_dma_base);
2586 			goto err;
2587 		}
2588 
2589 		flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
2590 		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2591 
2592 		/* Allocate descriptor(s) */
2593 		ctrl->dma_desc = dmam_alloc_coherent(dev,
2594 						     sizeof(*ctrl->dma_desc),
2595 						     &ctrl->dma_pa, GFP_KERNEL);
2596 		if (!ctrl->dma_desc) {
2597 			ret = -ENOMEM;
2598 			goto err;
2599 		}
2600 
2601 		ctrl->dma_irq = platform_get_irq(pdev, 1);
2602 		if ((int)ctrl->dma_irq < 0) {
2603 			dev_err(dev, "missing FLASH_DMA IRQ\n");
2604 			ret = -ENODEV;
2605 			goto err;
2606 		}
2607 
2608 		ret = devm_request_irq(dev, ctrl->dma_irq,
2609 				brcmnand_dma_irq, 0, DRV_NAME,
2610 				ctrl);
2611 		if (ret < 0) {
2612 			dev_err(dev, "can't allocate IRQ %d: error %d\n",
2613 					ctrl->dma_irq, ret);
2614 			goto err;
2615 		}
2616 
2617 		dev_info(dev, "enabling FLASH_DMA\n");
2618 	}
2619 #endif /* __UBOOT__ */
2620 
2621 	/* Disable automatic device ID config, direct addressing */
2622 	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
2623 			 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
2624 	/* Disable XOR addressing */
2625 	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
2626 
2627 	/* Read the write-protect configuration in the device tree */
2628 	wp_on = dev_read_u32_default(dev, "write-protect", wp_on);
2629 
2630 	if (ctrl->features & BRCMNAND_HAS_WP) {
2631 		/* Permanently disable write protection */
2632 		if (wp_on == 2)
2633 			brcmnand_set_wp(ctrl, false);
2634 	} else {
2635 		wp_on = 0;
2636 	}
2637 
2638 #ifndef __UBOOT__
2639 	/* IRQ */
2640 	ctrl->irq = platform_get_irq(pdev, 0);
2641 	if ((int)ctrl->irq < 0) {
2642 		dev_err(dev, "no IRQ defined\n");
2643 		ret = -ENODEV;
2644 		goto err;
2645 	}
2646 
2647 	/*
2648 	 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2649 	 * interesting ways
2650 	 */
2651 	if (soc) {
2652 		ctrl->soc = soc;
2653 
2654 		ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
2655 				       DRV_NAME, ctrl);
2656 
2657 		/* Enable interrupt */
2658 		ctrl->soc->ctlrdy_ack(ctrl->soc);
2659 		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2660 	} else {
2661 		/* Use standard interrupt infrastructure */
2662 		ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
2663 				       DRV_NAME, ctrl);
2664 	}
2665 	if (ret < 0) {
2666 		dev_err(dev, "can't allocate IRQ %d: error %d\n",
2667 			ctrl->irq, ret);
2668 		goto err;
2669 	}
2670 #endif /* __UBOOT__ */
2671 
2672 #ifndef __UBOOT__
2673 	for_each_available_child_of_node(dn, child) {
2674 		if (of_device_is_compatible(child, "brcm,nandcs")) {
2675 			struct brcmnand_host *host;
2676 
2677 			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2678 			if (!host) {
2679 				of_node_put(child);
2680 				ret = -ENOMEM;
2681 				goto err;
2682 			}
2683 			host->pdev = pdev;
2684 			host->ctrl = ctrl;
2685 
2686 			ret = brcmnand_init_cs(host, child);
2687 			if (ret) {
2688 				devm_kfree(dev, host);
2689 				continue; /* Try all chip-selects */
2690 			}
2691 
2692 			list_add_tail(&host->node, &ctrl->host_list);
2693 		}
2694 	}
2695 #else
2696 	ofnode_for_each_subnode(child, dev_ofnode(dev)) {
2697 		if (ofnode_device_is_compatible(child, "brcm,nandcs")) {
2698 			struct brcmnand_host *host;
2699 
2700 			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2701 			if (!host) {
2702 				ret = -ENOMEM;
2703 				goto err;
2704 			}
2705 			host->pdev = pdev;
2706 			host->ctrl = ctrl;
2707 
2708 			ret = brcmnand_init_cs(host, child);
2709 			if (ret) {
2710 				devm_kfree(dev, host);
2711 				continue; /* Try all chip-selects */
2712 			}
2713 
2714 			list_add_tail(&host->node, &ctrl->host_list);
2715 		}
2716 	}
2717 #endif /* __UBOOT__ */
2718 
2719 	/* No chip-selects could initialize properly */
2720 	if (list_empty(&ctrl->host_list)) {
2721 		ret = -ENODEV;
2722 		goto err;
2723 	}
2724 
2725 	return 0;
2726 
2727 err:
2728 #ifndef __UBOOT__
2729 	clk_disable_unprepare(ctrl->clk);
2730 #else
2731 	if (ctrl->clk)
2732 		clk_disable(ctrl->clk);
2733 #endif /* __UBOOT__ */
2734 	return ret;
2735 }
2736 EXPORT_SYMBOL_GPL(brcmnand_probe);
2737 
2738 #ifndef __UBOOT__
brcmnand_remove(struct platform_device * pdev)2739 int brcmnand_remove(struct platform_device *pdev)
2740 {
2741 	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
2742 	struct brcmnand_host *host;
2743 
2744 	list_for_each_entry(host, &ctrl->host_list, node)
2745 		nand_release(nand_to_mtd(&host->chip));
2746 
2747 	clk_disable_unprepare(ctrl->clk);
2748 
2749 	dev_set_drvdata(&pdev->dev, NULL);
2750 
2751 	return 0;
2752 }
2753 #else
brcmnand_remove(struct udevice * pdev)2754 int brcmnand_remove(struct udevice *pdev)
2755 {
2756 	return 0;
2757 }
2758 #endif /* __UBOOT__ */
2759 EXPORT_SYMBOL_GPL(brcmnand_remove);
2760 
2761 MODULE_LICENSE("GPL v2");
2762 MODULE_AUTHOR("Kevin Cernekee");
2763 MODULE_AUTHOR("Brian Norris");
2764 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2765 MODULE_ALIAS("platform:brcmnand");
2766