1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author:
4  *	Chuanhong Guo <gch981213@gmail.com>
5  */
6 
7 #include <linux/device.h>
8 #include <linux/kernel.h>
9 #include <linux/mtd/spinand.h>
10 
11 #define SPINAND_MFR_GIGADEVICE			0xC8
12 
13 #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS	(1 << 4)
14 #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS	(3 << 4)
15 
16 #define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS	(1 << 4)
17 #define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS	(3 << 4)
18 
19 #define GD5FXGQXXEXXG_REG_STATUS2		0xf0
20 
21 #define GD5FXGQ4UXFXXG_STATUS_ECC_MASK		(7 << 4)
22 #define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS	(0 << 4)
23 #define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS	(1 << 4)
24 #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR	(7 << 4)
25 
26 static SPINAND_OP_VARIANTS(read_cache_variants,
27 		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
28 		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
29 		SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
30 		SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
31 		SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
32 		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
33 
34 static SPINAND_OP_VARIANTS(read_cache_variants_f,
35 		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
36 		SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
37 		SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
38 		SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
39 		SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
40 		SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
41 
42 static SPINAND_OP_VARIANTS(write_cache_variants,
43 		SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
44 		SPINAND_PROG_LOAD(true, 0, NULL, 0));
45 
46 static SPINAND_OP_VARIANTS(update_cache_variants,
47 		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
48 		SPINAND_PROG_LOAD(false, 0, NULL, 0));
49 
gd5fxgq4xa_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)50 static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
51 				  struct mtd_oob_region *region)
52 {
53 	if (section > 3)
54 		return -ERANGE;
55 
56 	region->offset = (16 * section) + 8;
57 	region->length = 8;
58 
59 	return 0;
60 }
61 
gd5fxgq4xa_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)62 static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
63 				   struct mtd_oob_region *region)
64 {
65 	if (section > 3)
66 		return -ERANGE;
67 
68 	if (section) {
69 		region->offset = 16 * section;
70 		region->length = 8;
71 	} else {
72 		/* section 0 has one byte reserved for bad block mark */
73 		region->offset = 1;
74 		region->length = 7;
75 	}
76 	return 0;
77 }
78 
79 static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
80 	.ecc = gd5fxgq4xa_ooblayout_ecc,
81 	.free = gd5fxgq4xa_ooblayout_free,
82 };
83 
gd5fxgq4xa_ecc_get_status(struct spinand_device * spinand,u8 status)84 static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
85 					 u8 status)
86 {
87 	switch (status & STATUS_ECC_MASK) {
88 	case STATUS_ECC_NO_BITFLIPS:
89 		return 0;
90 
91 	case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
92 		/* 1-7 bits are flipped. return the maximum. */
93 		return 7;
94 
95 	case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
96 		return 8;
97 
98 	case STATUS_ECC_UNCOR_ERROR:
99 		return -EBADMSG;
100 
101 	default:
102 		break;
103 	}
104 
105 	return -EINVAL;
106 }
107 
gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)108 static int gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
109 				       struct mtd_oob_region *region)
110 {
111 	if (section)
112 		return -ERANGE;
113 
114 	region->offset = 64;
115 	region->length = 64;
116 
117 	return 0;
118 }
119 
gd5fxgqx_variant2_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)120 static int gd5fxgqx_variant2_ooblayout_free(struct mtd_info *mtd, int section,
121 					struct mtd_oob_region *region)
122 {
123 	if (section)
124 		return -ERANGE;
125 
126 	/* Reserve 1 bytes for the BBM. */
127 	region->offset = 1;
128 	region->length = 63;
129 
130 	return 0;
131 }
132 
133 /* Valid for Q4/Q5 and Q6 (untested) devices */
134 static const struct mtd_ooblayout_ops gd5fxgqx_variant2_ooblayout = {
135 	.ecc = gd5fxgqx_variant2_ooblayout_ecc,
136 	.free = gd5fxgqx_variant2_ooblayout_free,
137 };
138 
gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)139 static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
140 					struct mtd_oob_region *oobregion)
141 {
142 	if (section)
143 		return -ERANGE;
144 
145 	oobregion->offset = 128;
146 	oobregion->length = 128;
147 
148 	return 0;
149 }
150 
gd5fxgq4xc_ooblayout_256_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)151 static int gd5fxgq4xc_ooblayout_256_free(struct mtd_info *mtd, int section,
152 					 struct mtd_oob_region *oobregion)
153 {
154 	if (section)
155 		return -ERANGE;
156 
157 	oobregion->offset = 1;
158 	oobregion->length = 127;
159 
160 	return 0;
161 }
162 
163 static const struct mtd_ooblayout_ops gd5fxgq4xc_oob_256_ops = {
164 	.ecc = gd5fxgq4xc_ooblayout_256_ecc,
165 	.free = gd5fxgq4xc_ooblayout_256_free,
166 };
167 
gd5fxgq4uexxg_ecc_get_status(struct spinand_device * spinand,u8 status)168 static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
169 					u8 status)
170 {
171 	u8 status2;
172 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
173 						      &status2);
174 	int ret;
175 
176 	switch (status & STATUS_ECC_MASK) {
177 	case STATUS_ECC_NO_BITFLIPS:
178 		return 0;
179 
180 	case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
181 		/*
182 		 * Read status2 register to determine a more fine grained
183 		 * bit error status
184 		 */
185 		ret = spi_mem_exec_op(spinand->spimem, &op);
186 		if (ret)
187 			return ret;
188 
189 		/*
190 		 * 4 ... 7 bits are flipped (1..4 can't be detected, so
191 		 * report the maximum of 4 in this case
192 		 */
193 		/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
194 		return ((status & STATUS_ECC_MASK) >> 2) |
195 			((status2 & STATUS_ECC_MASK) >> 4);
196 
197 	case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
198 		return 8;
199 
200 	case STATUS_ECC_UNCOR_ERROR:
201 		return -EBADMSG;
202 
203 	default:
204 		break;
205 	}
206 
207 	return -EINVAL;
208 }
209 
gd5fxgq5xexxg_ecc_get_status(struct spinand_device * spinand,u8 status)210 static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
211 					u8 status)
212 {
213 	u8 status2;
214 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
215 						      &status2);
216 	int ret;
217 
218 	switch (status & STATUS_ECC_MASK) {
219 	case STATUS_ECC_NO_BITFLIPS:
220 		return 0;
221 
222 	case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
223 		/*
224 		 * Read status2 register to determine a more fine grained
225 		 * bit error status
226 		 */
227 		ret = spi_mem_exec_op(spinand->spimem, &op);
228 		if (ret)
229 			return ret;
230 
231 		/*
232 		 * 1 ... 4 bits are flipped (and corrected)
233 		 */
234 		/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
235 		return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
236 
237 	case STATUS_ECC_UNCOR_ERROR:
238 		return -EBADMSG;
239 
240 	default:
241 		break;
242 	}
243 
244 	return -EINVAL;
245 }
246 
gd5fxgq4ufxxg_ecc_get_status(struct spinand_device * spinand,u8 status)247 static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
248 					u8 status)
249 {
250 	switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
251 	case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
252 		return 0;
253 
254 	case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
255 		return 3;
256 
257 	case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
258 		return -EBADMSG;
259 
260 	default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
261 		return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
262 	}
263 
264 	return -EINVAL;
265 }
266 
267 static const struct spinand_info gigadevice_spinand_table[] = {
268 	SPINAND_INFO("GD5F1GQ4xA",
269 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
270 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
271 		     NAND_ECCREQ(8, 512),
272 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
273 					      &write_cache_variants,
274 					      &update_cache_variants),
275 		     SPINAND_HAS_QE_BIT,
276 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
277 				     gd5fxgq4xa_ecc_get_status)),
278 	SPINAND_INFO("GD5F2GQ4xA",
279 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
280 		     NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
281 		     NAND_ECCREQ(8, 512),
282 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
283 					      &write_cache_variants,
284 					      &update_cache_variants),
285 		     SPINAND_HAS_QE_BIT,
286 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
287 				     gd5fxgq4xa_ecc_get_status)),
288 	SPINAND_INFO("GD5F4GQ4xA",
289 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
290 		     NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
291 		     NAND_ECCREQ(8, 512),
292 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
293 					      &write_cache_variants,
294 					      &update_cache_variants),
295 		     SPINAND_HAS_QE_BIT,
296 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
297 				     gd5fxgq4xa_ecc_get_status)),
298 	SPINAND_INFO("GD5F4GQ4RC",
299 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68),
300 		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
301 		     NAND_ECCREQ(8, 512),
302 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
303 					      &write_cache_variants,
304 					      &update_cache_variants),
305 		     SPINAND_HAS_QE_BIT,
306 		     SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
307 				     gd5fxgq4ufxxg_ecc_get_status)),
308 	SPINAND_INFO("GD5F4GQ4UC",
309 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68),
310 		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
311 		     NAND_ECCREQ(8, 512),
312 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
313 					      &write_cache_variants,
314 					      &update_cache_variants),
315 		     SPINAND_HAS_QE_BIT,
316 		     SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
317 				     gd5fxgq4ufxxg_ecc_get_status)),
318 	SPINAND_INFO("GD5F1GQ4UExxG",
319 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
320 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
321 		     NAND_ECCREQ(8, 512),
322 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
323 					      &write_cache_variants,
324 					      &update_cache_variants),
325 		     SPINAND_HAS_QE_BIT,
326 		     SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
327 				     gd5fxgq4uexxg_ecc_get_status)),
328 	SPINAND_INFO("GD5F1GQ4UFxxG",
329 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
330 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
331 		     NAND_ECCREQ(8, 512),
332 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
333 					      &write_cache_variants,
334 					      &update_cache_variants),
335 		     SPINAND_HAS_QE_BIT,
336 		     SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
337 				     gd5fxgq4ufxxg_ecc_get_status)),
338 	SPINAND_INFO("GD5F1GQ5UExxG",
339 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
340 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
341 		     NAND_ECCREQ(4, 512),
342 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
343 					      &write_cache_variants,
344 					      &update_cache_variants),
345 		     SPINAND_HAS_QE_BIT,
346 		     SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
347 				     gd5fxgq5xexxg_ecc_get_status)),
348 };
349 
350 static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
351 };
352 
353 const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
354 	.id = SPINAND_MFR_GIGADEVICE,
355 	.name = "GigaDevice",
356 	.chips = gigadevice_spinand_table,
357 	.nchips = ARRAY_SIZE(gigadevice_spinand_table),
358 	.ops = &gigadevice_spinand_manuf_ops,
359 };
360