xref: /linux/drivers/mtd/nand/ecc-mxic.c (revision 48e6633a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Macronix external hardware ECC engine for NAND devices, also
4  * called DPE for Data Processing Engine.
5  *
6  * Copyright © 2019 Macronix
7  * Author: Miquel Raynal <miquel.raynal@bootlin.com>
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/mutex.h>
20 #include <linux/of_device.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 
25 /* DPE Configuration */
26 #define DP_CONFIG 0x00
27 #define   ECC_EN BIT(0)
28 #define   ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
29 /* DPE Interrupt Status */
30 #define INTRPT_STS 0x04
31 #define   TRANS_CMPLT BIT(0)
32 #define   SDMA_MAIN BIT(1)
33 #define   SDMA_SPARE BIT(2)
34 #define   ECC_ERR BIT(3)
35 #define   TO_SPARE BIT(4)
36 #define   TO_MAIN BIT(5)
37 /* DPE Interrupt Status Enable */
38 #define INTRPT_STS_EN 0x08
39 /* DPE Interrupt Signal Enable */
40 #define INTRPT_SIG_EN 0x0C
41 /* Host Controller Configuration */
42 #define HC_CONFIG 0x10
43 #define   MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
44 #define   ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
45 #define   ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
46 #define   BURST_TYP_FIXED 0
47 #define   BURST_TYP_INCREASING BIT(0)
48 /* Host Controller Slave Address */
49 #define HC_SLV_ADDR 0x14
50 /* ECC Chunk Size */
51 #define CHUNK_SIZE 0x20
52 /* Main Data Size */
53 #define MAIN_SIZE 0x24
54 /* Spare Data Size */
55 #define SPARE_SIZE 0x28
56 #define   META_SZ(reg) ((reg) & GENMASK(7, 0))
57 #define   PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
58 #define   RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
59 #define   SPARE_SZ(reg) ((reg) >> 24)
60 /* ECC Chunk Count */
61 #define CHUNK_CNT 0x30
62 /* SDMA Control */
63 #define SDMA_CTRL 0x40
64 #define   WRITE_NAND 0
65 #define   READ_NAND BIT(1)
66 #define   CONT_NAND BIT(29)
67 #define   CONT_SYSM BIT(30) /* Continue System Memory? */
68 #define   SDMA_STRT BIT(31)
69 /* SDMA Address of Main Data */
70 #define SDMA_MAIN_ADDR 0x44
71 /* SDMA Address of Spare Data */
72 #define SDMA_SPARE_ADDR 0x48
73 /* DPE Version Number */
74 #define DP_VER 0xD0
75 #define   DP_VER_OFFSET 16
76 
77 /* Status bytes between each chunk of spare data */
78 #define STAT_BYTES 4
79 #define   NO_ERR 0x00
80 #define   MAX_CORR_ERR 0x28
81 #define   UNCORR_ERR 0xFE
82 #define   ERASED_CHUNK 0xFF
83 
84 struct mxic_ecc_engine {
85 	struct device *dev;
86 	void __iomem *regs;
87 	int irq;
88 	struct completion complete;
89 	struct nand_ecc_engine external_engine;
90 	struct mutex lock;
91 };
92 
93 struct mxic_ecc_ctx {
94 	/* ECC machinery */
95 	unsigned int data_step_sz;
96 	unsigned int oob_step_sz;
97 	unsigned int parity_sz;
98 	unsigned int meta_sz;
99 	u8 *status;
100 	int steps;
101 
102 	/* DMA boilerplate */
103 	struct nand_ecc_req_tweak_ctx req_ctx;
104 	u8 *oobwithstat;
105 	struct scatterlist sg[2];
106 	struct nand_page_io_req *req;
107 };
108 
109 static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
110 {
111 	return container_of(eng, struct mxic_ecc_engine, external_engine);
112 }
113 
114 static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
115 {
116 	struct nand_ecc_engine *eng = nand->ecc.engine;
117 
118 	return ext_ecc_eng_to_mxic(eng);
119 }
120 
121 static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
122 				  struct mtd_oob_region *oobregion)
123 {
124 	struct nand_device *nand = mtd_to_nanddev(mtd);
125 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
126 
127 	if (section < 0 || section >= ctx->steps)
128 		return -ERANGE;
129 
130 	oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
131 	oobregion->length = ctx->parity_sz;
132 
133 	return 0;
134 }
135 
136 static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
137 				   struct mtd_oob_region *oobregion)
138 {
139 	struct nand_device *nand = mtd_to_nanddev(mtd);
140 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
141 
142 	if (section < 0 || section >= ctx->steps)
143 		return -ERANGE;
144 
145 	if (!section) {
146 		oobregion->offset = 2;
147 		oobregion->length = ctx->meta_sz - 2;
148 	} else {
149 		oobregion->offset = section * ctx->oob_step_sz;
150 		oobregion->length = ctx->meta_sz;
151 	}
152 
153 	return 0;
154 }
155 
156 static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
157 	.ecc = mxic_ecc_ooblayout_ecc,
158 	.free = mxic_ecc_ooblayout_free,
159 };
160 
161 static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
162 {
163 	u32 reg;
164 
165 	reg = readl(mxic->regs + DP_CONFIG);
166 	reg &= ~ECC_EN;
167 	writel(reg, mxic->regs + DP_CONFIG);
168 }
169 
170 static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
171 {
172 	u32 reg;
173 
174 	reg = readl(mxic->regs + DP_CONFIG);
175 	reg |= ECC_EN;
176 	writel(reg, mxic->regs + DP_CONFIG);
177 }
178 
179 static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
180 {
181 	writel(0, mxic->regs + INTRPT_SIG_EN);
182 }
183 
184 static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
185 {
186 	writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
187 }
188 
189 static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
190 {
191 	struct mxic_ecc_engine *mxic = dev_id;
192 	u32 sts;
193 
194 	sts = readl(mxic->regs + INTRPT_STS);
195 	if (!sts)
196 		return IRQ_NONE;
197 
198 	if (sts & TRANS_CMPLT)
199 		complete(&mxic->complete);
200 
201 	writel(sts, mxic->regs + INTRPT_STS);
202 
203 	return IRQ_HANDLED;
204 }
205 
206 static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
207 {
208 	struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
209 	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
210 	struct nand_ecc_props *reqs = &nand->ecc.requirements;
211 	struct nand_ecc_props *user = &nand->ecc.user_conf;
212 	struct mtd_info *mtd = nanddev_to_mtd(nand);
213 	int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
214 	int possible_strength[] = {4, 8, 40, 48};
215 	int spare_size[] = {32, 32, 96, 96};
216 	struct mxic_ecc_ctx *ctx;
217 	u32 spare_reg;
218 	int ret;
219 
220 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
221 	if (!ctx)
222 		return -ENOMEM;
223 
224 	nand->ecc.ctx.priv = ctx;
225 
226 	/* Only large page NAND chips may use BCH */
227 	if (mtd->oobsize < 64) {
228 		pr_err("BCH cannot be used with small page NAND chips\n");
229 		return -EINVAL;
230 	}
231 
232 	mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
233 
234 	/* Enable all status bits */
235 	writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
236 	       TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
237 
238 	/* Configure the correction depending on the NAND device topology */
239 	if (user->step_size && user->strength) {
240 		step_size = user->step_size;
241 		strength = user->strength;
242 	} else if (reqs->step_size && reqs->strength) {
243 		step_size = reqs->step_size;
244 		strength = reqs->strength;
245 	}
246 
247 	if (step_size && strength) {
248 		steps = mtd->writesize / step_size;
249 		desired_correction = steps * strength;
250 	}
251 
252 	/* Step size is fixed to 1kiB, strength may vary (4 possible values) */
253 	conf->step_size = SZ_1K;
254 	steps = mtd->writesize / conf->step_size;
255 
256 	ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
257 	if (!ctx->status)
258 		return -ENOMEM;
259 
260 	if (desired_correction) {
261 		strength = desired_correction / steps;
262 
263 		for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
264 			if (possible_strength[idx] >= strength)
265 				break;
266 
267 		idx = min_t(unsigned int, idx,
268 			    ARRAY_SIZE(possible_strength) - 1);
269 	} else {
270 		/* Missing data, maximize the correction */
271 		idx = ARRAY_SIZE(possible_strength) - 1;
272 	}
273 
274 	/* Tune the selected strength until it fits in the OOB area */
275 	for (; idx >= 0; idx--) {
276 		if (spare_size[idx] * steps <= mtd->oobsize)
277 			break;
278 	}
279 
280 	/* This engine cannot be used with this NAND device */
281 	if (idx < 0)
282 		return -EINVAL;
283 
284 	/* Configure the engine for the desired strength */
285 	writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
286 	conf->strength = possible_strength[idx];
287 	spare_reg = readl(mxic->regs + SPARE_SIZE);
288 
289 	ctx->steps = steps;
290 	ctx->data_step_sz = mtd->writesize / steps;
291 	ctx->oob_step_sz = mtd->oobsize / steps;
292 	ctx->parity_sz = PARITY_SZ(spare_reg);
293 	ctx->meta_sz = META_SZ(spare_reg);
294 
295 	/* Ensure buffers will contain enough bytes to store the STAT_BYTES */
296 	ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
297 					(ctx->steps * STAT_BYTES);
298 	ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
299 	if (ret)
300 		return ret;
301 
302 	ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
303 				   GFP_KERNEL);
304 	if (!ctx->oobwithstat) {
305 		ret = -ENOMEM;
306 		goto cleanup_req_tweak;
307 	}
308 
309 	sg_init_table(ctx->sg, 2);
310 
311 	/* Configuration dump and sanity checks */
312 	dev_err(dev, "DPE version number: %d\n",
313 		readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
314 	dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
315 	dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
316 	dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
317 	dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
318 	dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
319 	dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
320 
321 	if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
322 	    SPARE_SZ(spare_reg)) {
323 		dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
324 			ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
325 			SPARE_SZ(spare_reg));
326 		ret = -EINVAL;
327 		goto free_oobwithstat;
328 	}
329 
330 	if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
331 		dev_err(dev, "Wrong OOB configuration: %d != %d\n",
332 			ctx->oob_step_sz, SPARE_SZ(spare_reg));
333 		ret = -EINVAL;
334 		goto free_oobwithstat;
335 	}
336 
337 	return 0;
338 
339 free_oobwithstat:
340 	kfree(ctx->oobwithstat);
341 cleanup_req_tweak:
342 	nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
343 
344 	return ret;
345 }
346 
347 static int mxic_ecc_init_ctx_external(struct nand_device *nand)
348 {
349 	struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
350 	struct device *dev = nand->ecc.engine->dev;
351 	int ret;
352 
353 	dev_info(dev, "Macronix ECC engine in external mode\n");
354 
355 	ret = mxic_ecc_init_ctx(nand, dev);
356 	if (ret)
357 		return ret;
358 
359 	/* Trigger each step manually */
360 	writel(1, mxic->regs + CHUNK_CNT);
361 	writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
362 	       mxic->regs + HC_CONFIG);
363 
364 	return 0;
365 }
366 
367 static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
368 {
369 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
370 
371 	if (ctx) {
372 		nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
373 		kfree(ctx->oobwithstat);
374 	}
375 }
376 
377 static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
378 {
379 	u32 val;
380 	int ret;
381 
382 	if (mxic->irq) {
383 		reinit_completion(&mxic->complete);
384 		mxic_ecc_enable_int(mxic);
385 		ret = wait_for_completion_timeout(&mxic->complete,
386 						  msecs_to_jiffies(1000));
387 		mxic_ecc_disable_int(mxic);
388 	} else {
389 		ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
390 					 val & TRANS_CMPLT, 10, USEC_PER_SEC);
391 		writel(val, mxic->regs + INTRPT_STS);
392 	}
393 
394 	if (ret) {
395 		dev_err(mxic->dev, "Timeout on data xfer completion\n");
396 		return -ETIMEDOUT;
397 	}
398 
399 	return 0;
400 }
401 
402 static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
403 				 unsigned int direction)
404 {
405 	unsigned int dir = (direction == NAND_PAGE_READ) ?
406 			   READ_NAND : WRITE_NAND;
407 	int ret;
408 
409 	mxic_ecc_enable_engine(mxic);
410 
411 	/* Trigger processing */
412 	writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
413 
414 	/* Wait for completion */
415 	ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
416 
417 	mxic_ecc_disable_engine(mxic);
418 
419 	return ret;
420 }
421 
422 static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
423 {
424 	u8 *buf = ctx->oobwithstat;
425 	int next_stat_pos;
426 	int step;
427 
428 	/* Extract the ECC status */
429 	for (step = 0; step < ctx->steps; step++) {
430 		next_stat_pos = ctx->oob_step_sz +
431 				((STAT_BYTES + ctx->oob_step_sz) * step);
432 
433 		ctx->status[step] = buf[next_stat_pos];
434 	}
435 }
436 
437 static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
438 					u8 *dst, const u8 *src)
439 {
440 	int step;
441 
442 	/* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
443 	for (step = 0; step < ctx->steps; step++)
444 		memcpy(dst + (step * ctx->oob_step_sz),
445 		       src + (step * (ctx->oob_step_sz + STAT_BYTES)),
446 		       ctx->oob_step_sz);
447 }
448 
449 static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
450 					u8 *dst, const u8 *src)
451 {
452 	int step;
453 
454 	/* Add some space in the OOB buffer for the status bytes */
455 	for (step = 0; step < ctx->steps; step++)
456 		memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
457 		       src + (step * ctx->oob_step_sz),
458 		       ctx->oob_step_sz);
459 }
460 
461 static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
462 				  struct nand_device *nand)
463 {
464 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
465 	struct mtd_info *mtd = nanddev_to_mtd(nand);
466 	struct device *dev = mxic->dev;
467 	unsigned int max_bf = 0;
468 	bool failure = false;
469 	int step;
470 
471 	for (step = 0; step < ctx->steps; step++) {
472 		u8 stat = ctx->status[step];
473 
474 		if (stat == NO_ERR) {
475 			dev_dbg(dev, "ECC step %d: no error\n", step);
476 		} else if (stat == ERASED_CHUNK) {
477 			dev_dbg(dev, "ECC step %d: erased\n", step);
478 		} else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
479 			dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
480 			mtd->ecc_stats.failed++;
481 			failure = true;
482 		} else {
483 			dev_dbg(dev, "ECC step %d: %d bits corrected\n",
484 				step, stat);
485 			max_bf = max_t(unsigned int, max_bf, stat);
486 			mtd->ecc_stats.corrected += stat;
487 		}
488 	}
489 
490 	return failure ? -EBADMSG : max_bf;
491 }
492 
493 /* External ECC engine helpers */
494 static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
495 					    struct nand_page_io_req *req)
496 {
497 	struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
498 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
499 	struct mtd_info *mtd = nanddev_to_mtd(nand);
500 	int offset, nents, step, ret;
501 
502 	if (req->mode == MTD_OPS_RAW)
503 		return 0;
504 
505 	nand_ecc_tweak_req(&ctx->req_ctx, req);
506 	ctx->req = req;
507 
508 	if (req->type == NAND_PAGE_READ)
509 		return 0;
510 
511 	mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
512 				    ctx->req->oobbuf.out);
513 
514 	sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
515 	sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
516 		   req->ooblen + (ctx->steps * STAT_BYTES));
517 
518 	nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
519 	if (!nents)
520 		return -EINVAL;
521 
522 	mutex_lock(&mxic->lock);
523 
524 	for (step = 0; step < ctx->steps; step++) {
525 		writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
526 		       mxic->regs + SDMA_MAIN_ADDR);
527 		writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
528 		       mxic->regs + SDMA_SPARE_ADDR);
529 		ret = mxic_ecc_process_data(mxic, ctx->req->type);
530 		if (ret)
531 			break;
532 	}
533 
534 	mutex_unlock(&mxic->lock);
535 
536 	dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
537 
538 	if (ret)
539 		return ret;
540 
541 	/* Retrieve the calculated ECC bytes */
542 	for (step = 0; step < ctx->steps; step++) {
543 		offset = ctx->meta_sz + (step * ctx->oob_step_sz);
544 		mtd_ooblayout_get_eccbytes(mtd,
545 					   (u8 *)ctx->req->oobbuf.out + offset,
546 					   ctx->oobwithstat + (step * STAT_BYTES),
547 					   step * ctx->parity_sz,
548 					   ctx->parity_sz);
549 	}
550 
551 	return 0;
552 }
553 
554 static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
555 					   struct nand_page_io_req *req)
556 {
557 	struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
558 	struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
559 	int nents, step, ret;
560 
561 	if (req->mode == MTD_OPS_RAW)
562 		return 0;
563 
564 	if (req->type == NAND_PAGE_WRITE) {
565 		nand_ecc_restore_req(&ctx->req_ctx, req);
566 		return 0;
567 	}
568 
569 	/* Copy the OOB buffer and add room for the ECC engine status bytes */
570 	mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
571 
572 	sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
573 	sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
574 		   req->ooblen + (ctx->steps * STAT_BYTES));
575 	nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
576 	if (!nents)
577 		return -EINVAL;
578 
579 	mutex_lock(&mxic->lock);
580 
581 	for (step = 0; step < ctx->steps; step++) {
582 		writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
583 		       mxic->regs + SDMA_MAIN_ADDR);
584 		writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
585 		       mxic->regs + SDMA_SPARE_ADDR);
586 		ret = mxic_ecc_process_data(mxic, ctx->req->type);
587 		if (ret)
588 			break;
589 	}
590 
591 	mutex_unlock(&mxic->lock);
592 
593 	dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
594 
595 	/* Extract the status bytes and reconstruct the buffer */
596 	mxic_ecc_extract_status_bytes(ctx);
597 	mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
598 
599 	nand_ecc_restore_req(&ctx->req_ctx, req);
600 
601 	return mxic_ecc_count_biterrs(mxic, nand);
602 }
603 
604 static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
605 	.init_ctx = mxic_ecc_init_ctx_external,
606 	.cleanup_ctx = mxic_ecc_cleanup_ctx,
607 	.prepare_io_req = mxic_ecc_prepare_io_req_external,
608 	.finish_io_req = mxic_ecc_finish_io_req_external,
609 };
610 
611 static int mxic_ecc_probe(struct platform_device *pdev)
612 {
613 	struct device *dev = &pdev->dev;
614 	struct mxic_ecc_engine *mxic;
615 	int ret;
616 
617 	mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
618 	if (!mxic)
619 		return -ENOMEM;
620 
621 	mxic->dev = &pdev->dev;
622 
623 	/*
624 	 * Both memory regions for the ECC engine itself and the AXI slave
625 	 * address are mandatory.
626 	 */
627 	mxic->regs = devm_platform_ioremap_resource(pdev, 0);
628 	if (IS_ERR(mxic->regs)) {
629 		dev_err(&pdev->dev, "Missing memory region\n");
630 		return PTR_ERR(mxic->regs);
631 	}
632 
633 	mxic_ecc_disable_engine(mxic);
634 	mxic_ecc_disable_int(mxic);
635 
636 	/* IRQ is optional yet much more efficient */
637 	mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
638 	if (mxic->irq > 0) {
639 		ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
640 				       "mxic-ecc", mxic);
641 		if (ret)
642 			return ret;
643 	} else {
644 		dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
645 		mxic->irq = 0;
646 	}
647 
648 	mutex_init(&mxic->lock);
649 
650 	/*
651 	 * In external mode, the device is the ECC engine. In pipelined mode,
652 	 * the device is the host controller. The device is used to match the
653 	 * right ECC engine based on the DT properties.
654 	 */
655 	mxic->external_engine.dev = &pdev->dev;
656 	mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
657 	mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
658 
659 	nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
660 
661 	platform_set_drvdata(pdev, mxic);
662 
663 	return 0;
664 }
665 
666 static int mxic_ecc_remove(struct platform_device *pdev)
667 {
668 	struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
669 
670 	nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
671 
672 	return 0;
673 }
674 
675 static const struct of_device_id mxic_ecc_of_ids[] = {
676 	{
677 		.compatible = "mxicy,nand-ecc-engine-rev3",
678 	},
679 	{ /* sentinel */ },
680 };
681 MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
682 
683 static struct platform_driver mxic_ecc_driver = {
684 	.driver	= {
685 		.name = "mxic-nand-ecc-engine",
686 		.of_match_table = mxic_ecc_of_ids,
687 	},
688 	.probe = mxic_ecc_probe,
689 	.remove	= mxic_ecc_remove,
690 };
691 module_platform_driver(mxic_ecc_driver);
692 
693 MODULE_LICENSE("GPL");
694 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
695 MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
696