1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
4 *
5 * Authors:
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
8 */
9
10 #define pr_fmt(fmt) "spi-nand: " fmt
11
12 #ifndef __UBOOT__
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #else
23 #include <common.h>
24 #include <errno.h>
25 #include <watchdog.h>
26 #include <spi.h>
27 #include <spi-mem.h>
28 #include <dm/device_compat.h>
29 #include <dm/devres.h>
30 #include <linux/bitops.h>
31 #include <linux/bug.h>
32 #include <linux/mtd/spinand.h>
33 #endif
34
35 /* SPI NAND index visible in MTD names */
36 static int spi_nand_idx;
37
spinand_cache_op_adjust_colum(struct spinand_device * spinand,const struct nand_page_io_req * req,u16 * column)38 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
39 const struct nand_page_io_req *req,
40 u16 *column)
41 {
42 struct nand_device *nand = spinand_to_nand(spinand);
43 unsigned int shift;
44
45 if (nand->memorg.planes_per_lun < 2)
46 return;
47
48 /* The plane number is passed in MSB just above the column address */
49 shift = fls(nand->memorg.pagesize);
50 *column |= req->pos.plane << shift;
51 }
52
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)53 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
54 {
55 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
56 spinand->scratchbuf);
57 int ret;
58
59 ret = spi_mem_exec_op(spinand->slave, &op);
60 if (ret)
61 return ret;
62
63 *val = *spinand->scratchbuf;
64 return 0;
65 }
66
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)67 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
68 {
69 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
70 spinand->scratchbuf);
71
72 *spinand->scratchbuf = val;
73 return spi_mem_exec_op(spinand->slave, &op);
74 }
75
spinand_read_status(struct spinand_device * spinand,u8 * status)76 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
77 {
78 return spinand_read_reg_op(spinand, REG_STATUS, status);
79 }
80
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)81 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
82 {
83 struct nand_device *nand = spinand_to_nand(spinand);
84
85 if (WARN_ON(spinand->cur_target < 0 ||
86 spinand->cur_target >= nand->memorg.ntargets))
87 return -EINVAL;
88
89 *cfg = spinand->cfg_cache[spinand->cur_target];
90 return 0;
91 }
92
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)93 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
94 {
95 struct nand_device *nand = spinand_to_nand(spinand);
96 int ret;
97
98 if (WARN_ON(spinand->cur_target < 0 ||
99 spinand->cur_target >= nand->memorg.ntargets))
100 return -EINVAL;
101
102 if (spinand->cfg_cache[spinand->cur_target] == cfg)
103 return 0;
104
105 ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
106 if (ret)
107 return ret;
108
109 spinand->cfg_cache[spinand->cur_target] = cfg;
110 return 0;
111 }
112
113 /**
114 * spinand_upd_cfg() - Update the configuration register
115 * @spinand: the spinand device
116 * @mask: the mask encoding the bits to update in the config reg
117 * @val: the new value to apply
118 *
119 * Update the configuration register.
120 *
121 * Return: 0 on success, a negative error code otherwise.
122 */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)123 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
124 {
125 int ret;
126 u8 cfg;
127
128 ret = spinand_get_cfg(spinand, &cfg);
129 if (ret)
130 return ret;
131
132 cfg &= ~mask;
133 cfg |= val;
134
135 return spinand_set_cfg(spinand, cfg);
136 }
137
138 /**
139 * spinand_select_target() - Select a specific NAND target/die
140 * @spinand: the spinand device
141 * @target: the target/die to select
142 *
143 * Select a new target/die. If chip only has one die, this function is a NOOP.
144 *
145 * Return: 0 on success, a negative error code otherwise.
146 */
spinand_select_target(struct spinand_device * spinand,unsigned int target)147 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
148 {
149 struct nand_device *nand = spinand_to_nand(spinand);
150 int ret;
151
152 if (WARN_ON(target >= nand->memorg.ntargets))
153 return -EINVAL;
154
155 if (spinand->cur_target == target)
156 return 0;
157
158 if (nand->memorg.ntargets == 1) {
159 spinand->cur_target = target;
160 return 0;
161 }
162
163 ret = spinand->select_target(spinand, target);
164 if (ret)
165 return ret;
166
167 spinand->cur_target = target;
168 return 0;
169 }
170
spinand_init_cfg_cache(struct spinand_device * spinand)171 static int spinand_init_cfg_cache(struct spinand_device *spinand)
172 {
173 struct nand_device *nand = spinand_to_nand(spinand);
174 struct udevice *dev = spinand->slave->dev;
175 unsigned int target;
176 int ret;
177
178 spinand->cfg_cache = devm_kzalloc(dev,
179 sizeof(*spinand->cfg_cache) *
180 nand->memorg.ntargets,
181 GFP_KERNEL);
182 if (!spinand->cfg_cache)
183 return -ENOMEM;
184
185 for (target = 0; target < nand->memorg.ntargets; target++) {
186 ret = spinand_select_target(spinand, target);
187 if (ret)
188 return ret;
189
190 /*
191 * We use spinand_read_reg_op() instead of spinand_get_cfg()
192 * here to bypass the config cache.
193 */
194 ret = spinand_read_reg_op(spinand, REG_CFG,
195 &spinand->cfg_cache[target]);
196 if (ret)
197 return ret;
198 }
199
200 return 0;
201 }
202
spinand_init_quad_enable(struct spinand_device * spinand)203 static int spinand_init_quad_enable(struct spinand_device *spinand)
204 {
205 bool enable = false;
206
207 if (!(spinand->flags & SPINAND_HAS_QE_BIT))
208 return 0;
209
210 if (spinand->op_templates.read_cache->data.buswidth == 4 ||
211 spinand->op_templates.write_cache->data.buswidth == 4 ||
212 spinand->op_templates.update_cache->data.buswidth == 4)
213 enable = true;
214
215 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
216 enable ? CFG_QUAD_ENABLE : 0);
217 }
218
spinand_ecc_enable(struct spinand_device * spinand,bool enable)219 static int spinand_ecc_enable(struct spinand_device *spinand,
220 bool enable)
221 {
222 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
223 enable ? CFG_ECC_ENABLE : 0);
224 }
225
spinand_write_enable_op(struct spinand_device * spinand)226 static int spinand_write_enable_op(struct spinand_device *spinand)
227 {
228 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
229
230 return spi_mem_exec_op(spinand->slave, &op);
231 }
232
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)233 static int spinand_load_page_op(struct spinand_device *spinand,
234 const struct nand_page_io_req *req)
235 {
236 struct nand_device *nand = spinand_to_nand(spinand);
237 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
238 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
239
240 return spi_mem_exec_op(spinand->slave, &op);
241 }
242
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)243 static int spinand_read_from_cache_op(struct spinand_device *spinand,
244 const struct nand_page_io_req *req)
245 {
246 struct spi_mem_op op = *spinand->op_templates.read_cache;
247 struct nand_device *nand = spinand_to_nand(spinand);
248 struct mtd_info *mtd = nanddev_to_mtd(nand);
249 struct nand_page_io_req adjreq = *req;
250 unsigned int nbytes = 0;
251 void *buf = NULL;
252 u16 column = 0;
253 int ret;
254
255 if (req->datalen) {
256 adjreq.datalen = nanddev_page_size(nand);
257 adjreq.dataoffs = 0;
258 adjreq.databuf.in = spinand->databuf;
259 buf = spinand->databuf;
260 nbytes = adjreq.datalen;
261 }
262
263 if (req->ooblen) {
264 adjreq.ooblen = nanddev_per_page_oobsize(nand);
265 adjreq.ooboffs = 0;
266 adjreq.oobbuf.in = spinand->oobbuf;
267 nbytes += nanddev_per_page_oobsize(nand);
268 if (!buf) {
269 buf = spinand->oobbuf;
270 column = nanddev_page_size(nand);
271 }
272 }
273
274 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
275 op.addr.val = column;
276
277 /*
278 * Some controllers are limited in term of max RX data size. In this
279 * case, just repeat the READ_CACHE operation after updating the
280 * column.
281 */
282 while (nbytes) {
283 op.data.buf.in = buf;
284 op.data.nbytes = nbytes;
285 ret = spi_mem_adjust_op_size(spinand->slave, &op);
286 if (ret)
287 return ret;
288
289 ret = spi_mem_exec_op(spinand->slave, &op);
290 if (ret)
291 return ret;
292
293 buf += op.data.nbytes;
294 nbytes -= op.data.nbytes;
295 op.addr.val += op.data.nbytes;
296 }
297
298 if (req->datalen)
299 memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
300 req->datalen);
301
302 if (req->ooblen) {
303 if (req->mode == MTD_OPS_AUTO_OOB)
304 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
305 spinand->oobbuf,
306 req->ooboffs,
307 req->ooblen);
308 else
309 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
310 req->ooblen);
311 }
312
313 return 0;
314 }
315
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)316 static int spinand_write_to_cache_op(struct spinand_device *spinand,
317 const struct nand_page_io_req *req)
318 {
319 struct spi_mem_op op = *spinand->op_templates.write_cache;
320 struct nand_device *nand = spinand_to_nand(spinand);
321 struct mtd_info *mtd = nanddev_to_mtd(nand);
322 struct nand_page_io_req adjreq = *req;
323 unsigned int nbytes = 0;
324 void *buf = NULL;
325 u16 column = 0;
326 int ret;
327
328 memset(spinand->databuf, 0xff,
329 nanddev_page_size(nand) +
330 nanddev_per_page_oobsize(nand));
331
332 if (req->datalen) {
333 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
334 req->datalen);
335 adjreq.dataoffs = 0;
336 adjreq.datalen = nanddev_page_size(nand);
337 adjreq.databuf.out = spinand->databuf;
338 nbytes = adjreq.datalen;
339 buf = spinand->databuf;
340 }
341
342 if (req->ooblen) {
343 if (req->mode == MTD_OPS_AUTO_OOB)
344 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
345 spinand->oobbuf,
346 req->ooboffs,
347 req->ooblen);
348 else
349 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
350 req->ooblen);
351
352 adjreq.ooblen = nanddev_per_page_oobsize(nand);
353 adjreq.ooboffs = 0;
354 nbytes += nanddev_per_page_oobsize(nand);
355 if (!buf) {
356 buf = spinand->oobbuf;
357 column = nanddev_page_size(nand);
358 }
359 }
360
361 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
362
363 op = *spinand->op_templates.write_cache;
364 op.addr.val = column;
365
366 /*
367 * Some controllers are limited in term of max TX data size. In this
368 * case, split the operation into one LOAD CACHE and one or more
369 * LOAD RANDOM CACHE.
370 */
371 while (nbytes) {
372 op.data.buf.out = buf;
373 op.data.nbytes = nbytes;
374
375 ret = spi_mem_adjust_op_size(spinand->slave, &op);
376 if (ret)
377 return ret;
378
379 ret = spi_mem_exec_op(spinand->slave, &op);
380 if (ret)
381 return ret;
382
383 buf += op.data.nbytes;
384 nbytes -= op.data.nbytes;
385 op.addr.val += op.data.nbytes;
386
387 /*
388 * We need to use the RANDOM LOAD CACHE operation if there's
389 * more than one iteration, because the LOAD operation resets
390 * the cache to 0xff.
391 */
392 if (nbytes) {
393 column = op.addr.val;
394 op = *spinand->op_templates.update_cache;
395 op.addr.val = column;
396 }
397 }
398
399 return 0;
400 }
401
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)402 static int spinand_program_op(struct spinand_device *spinand,
403 const struct nand_page_io_req *req)
404 {
405 struct nand_device *nand = spinand_to_nand(spinand);
406 unsigned int row = nanddev_pos_to_row(nand, &req->pos);
407 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
408
409 return spi_mem_exec_op(spinand->slave, &op);
410 }
411
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)412 static int spinand_erase_op(struct spinand_device *spinand,
413 const struct nand_pos *pos)
414 {
415 struct nand_device *nand = &spinand->base;
416 unsigned int row = nanddev_pos_to_row(nand, pos);
417 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
418
419 return spi_mem_exec_op(spinand->slave, &op);
420 }
421
spinand_wait(struct spinand_device * spinand,u8 * s)422 static int spinand_wait(struct spinand_device *spinand, u8 *s)
423 {
424 unsigned long start, stop;
425 u8 status;
426 int ret;
427
428 start = get_timer(0);
429 stop = 400;
430 do {
431 ret = spinand_read_status(spinand, &status);
432 if (ret)
433 return ret;
434
435 if (!(status & STATUS_BUSY))
436 goto out;
437 } while (get_timer(start) < stop);
438
439 /*
440 * Extra read, just in case the STATUS_READY bit has changed
441 * since our last check
442 */
443 ret = spinand_read_status(spinand, &status);
444 if (ret)
445 return ret;
446
447 out:
448 if (s)
449 *s = status;
450
451 return status & STATUS_BUSY ? -ETIMEDOUT : 0;
452 }
453
spinand_read_id_op(struct spinand_device * spinand,u8 * buf)454 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
455 {
456 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
457 SPINAND_MAX_ID_LEN);
458 int ret;
459
460 ret = spi_mem_exec_op(spinand->slave, &op);
461 if (!ret)
462 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
463
464 return ret;
465 }
466
spinand_reset_op(struct spinand_device * spinand)467 static int spinand_reset_op(struct spinand_device *spinand)
468 {
469 struct spi_mem_op op = SPINAND_RESET_OP;
470 int ret;
471
472 ret = spi_mem_exec_op(spinand->slave, &op);
473 if (ret)
474 return ret;
475
476 return spinand_wait(spinand, NULL);
477 }
478
spinand_lock_block(struct spinand_device * spinand,u8 lock)479 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
480 {
481 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
482 }
483
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)484 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
485 {
486 struct nand_device *nand = spinand_to_nand(spinand);
487
488 if (spinand->eccinfo.get_status)
489 return spinand->eccinfo.get_status(spinand, status);
490
491 switch (status & STATUS_ECC_MASK) {
492 case STATUS_ECC_NO_BITFLIPS:
493 return 0;
494
495 case STATUS_ECC_HAS_BITFLIPS:
496 /*
497 * We have no way to know exactly how many bitflips have been
498 * fixed, so let's return the maximum possible value so that
499 * wear-leveling layers move the data immediately.
500 */
501 return nand->eccreq.strength;
502
503 case STATUS_ECC_UNCOR_ERROR:
504 return -EBADMSG;
505
506 default:
507 break;
508 }
509
510 return -EINVAL;
511 }
512
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req,bool ecc_enabled)513 static int spinand_read_page(struct spinand_device *spinand,
514 const struct nand_page_io_req *req,
515 bool ecc_enabled)
516 {
517 u8 status;
518 int ret;
519
520 ret = spinand_load_page_op(spinand, req);
521 if (ret)
522 return ret;
523
524 ret = spinand_wait(spinand, &status);
525 if (ret < 0)
526 return ret;
527
528 ret = spinand_read_from_cache_op(spinand, req);
529 if (ret)
530 return ret;
531
532 if (!ecc_enabled)
533 return 0;
534
535 return spinand_check_ecc_status(spinand, status);
536 }
537
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)538 static int spinand_write_page(struct spinand_device *spinand,
539 const struct nand_page_io_req *req)
540 {
541 u8 status;
542 int ret;
543
544 ret = spinand_write_enable_op(spinand);
545 if (ret)
546 return ret;
547
548 ret = spinand_write_to_cache_op(spinand, req);
549 if (ret)
550 return ret;
551
552 ret = spinand_program_op(spinand, req);
553 if (ret)
554 return ret;
555
556 ret = spinand_wait(spinand, &status);
557 if (!ret && (status & STATUS_PROG_FAILED))
558 ret = -EIO;
559
560 return ret;
561 }
562
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)563 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
564 struct mtd_oob_ops *ops)
565 {
566 struct spinand_device *spinand = mtd_to_spinand(mtd);
567 struct nand_device *nand = mtd_to_nanddev(mtd);
568 unsigned int max_bitflips = 0;
569 struct nand_io_iter iter;
570 bool enable_ecc = false;
571 bool ecc_failed = false;
572 int ret = 0;
573
574 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
575 enable_ecc = true;
576
577 #ifndef __UBOOT__
578 mutex_lock(&spinand->lock);
579 #endif
580
581 nanddev_io_for_each_page(nand, from, ops, &iter) {
582 WATCHDOG_RESET();
583 ret = spinand_select_target(spinand, iter.req.pos.target);
584 if (ret)
585 break;
586
587 ret = spinand_ecc_enable(spinand, enable_ecc);
588 if (ret)
589 break;
590
591 ret = spinand_read_page(spinand, &iter.req, enable_ecc);
592 if (ret < 0 && ret != -EBADMSG)
593 break;
594
595 if (ret == -EBADMSG) {
596 ecc_failed = true;
597 mtd->ecc_stats.failed++;
598 ret = 0;
599 } else {
600 mtd->ecc_stats.corrected += ret;
601 max_bitflips = max_t(unsigned int, max_bitflips, ret);
602 }
603
604 ops->retlen += iter.req.datalen;
605 ops->oobretlen += iter.req.ooblen;
606 }
607
608 #ifndef __UBOOT__
609 mutex_unlock(&spinand->lock);
610 #endif
611 if (ecc_failed && !ret)
612 ret = -EBADMSG;
613
614 return ret ? ret : max_bitflips;
615 }
616
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)617 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
618 struct mtd_oob_ops *ops)
619 {
620 struct spinand_device *spinand = mtd_to_spinand(mtd);
621 struct nand_device *nand = mtd_to_nanddev(mtd);
622 struct nand_io_iter iter;
623 bool enable_ecc = false;
624 int ret = 0;
625
626 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
627 enable_ecc = true;
628
629 #ifndef __UBOOT__
630 mutex_lock(&spinand->lock);
631 #endif
632
633 nanddev_io_for_each_page(nand, to, ops, &iter) {
634 WATCHDOG_RESET();
635 ret = spinand_select_target(spinand, iter.req.pos.target);
636 if (ret)
637 break;
638
639 ret = spinand_ecc_enable(spinand, enable_ecc);
640 if (ret)
641 break;
642
643 ret = spinand_write_page(spinand, &iter.req);
644 if (ret)
645 break;
646
647 ops->retlen += iter.req.datalen;
648 ops->oobretlen += iter.req.ooblen;
649 }
650
651 #ifndef __UBOOT__
652 mutex_unlock(&spinand->lock);
653 #endif
654
655 return ret;
656 }
657
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)658 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
659 {
660 struct spinand_device *spinand = nand_to_spinand(nand);
661 u8 marker[2] = { };
662 struct nand_page_io_req req = {
663 .pos = *pos,
664 .ooblen = sizeof(marker),
665 .ooboffs = 0,
666 .oobbuf.in = marker,
667 .mode = MTD_OPS_RAW,
668 };
669 int ret;
670
671 ret = spinand_select_target(spinand, pos->target);
672 if (ret)
673 return ret;
674
675 ret = spinand_read_page(spinand, &req, false);
676 if (ret)
677 return ret;
678
679 if (marker[0] != 0xff || marker[1] != 0xff)
680 return true;
681
682 return false;
683 }
684
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)685 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
686 {
687 struct nand_device *nand = mtd_to_nanddev(mtd);
688 #ifndef __UBOOT__
689 struct spinand_device *spinand = nand_to_spinand(nand);
690 #endif
691 struct nand_pos pos;
692 int ret;
693
694 nanddev_offs_to_pos(nand, offs, &pos);
695 #ifndef __UBOOT__
696 mutex_lock(&spinand->lock);
697 #endif
698 ret = nanddev_isbad(nand, &pos);
699 #ifndef __UBOOT__
700 mutex_unlock(&spinand->lock);
701 #endif
702 return ret;
703 }
704
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)705 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
706 {
707 struct spinand_device *spinand = nand_to_spinand(nand);
708 u8 marker[2] = { };
709 struct nand_page_io_req req = {
710 .pos = *pos,
711 .ooboffs = 0,
712 .ooblen = sizeof(marker),
713 .oobbuf.out = marker,
714 .mode = MTD_OPS_RAW,
715 };
716 int ret;
717
718 ret = spinand_select_target(spinand, pos->target);
719 if (ret)
720 return ret;
721
722 return spinand_write_page(spinand, &req);
723 }
724
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)725 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
726 {
727 struct nand_device *nand = mtd_to_nanddev(mtd);
728 #ifndef __UBOOT__
729 struct spinand_device *spinand = nand_to_spinand(nand);
730 #endif
731 struct nand_pos pos;
732 int ret;
733
734 nanddev_offs_to_pos(nand, offs, &pos);
735 #ifndef __UBOOT__
736 mutex_lock(&spinand->lock);
737 #endif
738 ret = nanddev_markbad(nand, &pos);
739 #ifndef __UBOOT__
740 mutex_unlock(&spinand->lock);
741 #endif
742 return ret;
743 }
744
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)745 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
746 {
747 struct spinand_device *spinand = nand_to_spinand(nand);
748 u8 status;
749 int ret;
750
751 ret = spinand_select_target(spinand, pos->target);
752 if (ret)
753 return ret;
754
755 ret = spinand_write_enable_op(spinand);
756 if (ret)
757 return ret;
758
759 ret = spinand_erase_op(spinand, pos);
760 if (ret)
761 return ret;
762
763 ret = spinand_wait(spinand, &status);
764 if (!ret && (status & STATUS_ERASE_FAILED))
765 ret = -EIO;
766
767 return ret;
768 }
769
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)770 static int spinand_mtd_erase(struct mtd_info *mtd,
771 struct erase_info *einfo)
772 {
773 #ifndef __UBOOT__
774 struct spinand_device *spinand = mtd_to_spinand(mtd);
775 #endif
776 int ret;
777
778 #ifndef __UBOOT__
779 mutex_lock(&spinand->lock);
780 #endif
781 ret = nanddev_mtd_erase(mtd, einfo);
782 #ifndef __UBOOT__
783 mutex_unlock(&spinand->lock);
784 #endif
785
786 return ret;
787 }
788
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)789 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
790 {
791 #ifndef __UBOOT__
792 struct spinand_device *spinand = mtd_to_spinand(mtd);
793 #endif
794 struct nand_device *nand = mtd_to_nanddev(mtd);
795 struct nand_pos pos;
796 int ret;
797
798 nanddev_offs_to_pos(nand, offs, &pos);
799 #ifndef __UBOOT__
800 mutex_lock(&spinand->lock);
801 #endif
802 ret = nanddev_isreserved(nand, &pos);
803 #ifndef __UBOOT__
804 mutex_unlock(&spinand->lock);
805 #endif
806
807 return ret;
808 }
809
810 const struct spi_mem_op *
spinand_find_supported_op(struct spinand_device * spinand,const struct spi_mem_op * ops,unsigned int nops)811 spinand_find_supported_op(struct spinand_device *spinand,
812 const struct spi_mem_op *ops,
813 unsigned int nops)
814 {
815 unsigned int i;
816
817 for (i = 0; i < nops; i++) {
818 if (spi_mem_supports_op(spinand->slave, &ops[i]))
819 return &ops[i];
820 }
821
822 return NULL;
823 }
824
825 static const struct nand_ops spinand_ops = {
826 .erase = spinand_erase,
827 .markbad = spinand_markbad,
828 .isbad = spinand_isbad,
829 };
830
831 static const struct spinand_manufacturer *spinand_manufacturers[] = {
832 &gigadevice_spinand_manufacturer,
833 ¯onix_spinand_manufacturer,
834 µn_spinand_manufacturer,
835 &toshiba_spinand_manufacturer,
836 &winbond_spinand_manufacturer,
837 };
838
spinand_manufacturer_detect(struct spinand_device * spinand)839 static int spinand_manufacturer_detect(struct spinand_device *spinand)
840 {
841 unsigned int i;
842 int ret;
843
844 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
845 ret = spinand_manufacturers[i]->ops->detect(spinand);
846 if (ret > 0) {
847 spinand->manufacturer = spinand_manufacturers[i];
848 return 0;
849 } else if (ret < 0) {
850 return ret;
851 }
852 }
853
854 return -ENOTSUPP;
855 }
856
spinand_manufacturer_init(struct spinand_device * spinand)857 static int spinand_manufacturer_init(struct spinand_device *spinand)
858 {
859 if (spinand->manufacturer->ops->init)
860 return spinand->manufacturer->ops->init(spinand);
861
862 return 0;
863 }
864
spinand_manufacturer_cleanup(struct spinand_device * spinand)865 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
866 {
867 /* Release manufacturer private data */
868 if (spinand->manufacturer->ops->cleanup)
869 return spinand->manufacturer->ops->cleanup(spinand);
870 }
871
872 static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)873 spinand_select_op_variant(struct spinand_device *spinand,
874 const struct spinand_op_variants *variants)
875 {
876 struct nand_device *nand = spinand_to_nand(spinand);
877 unsigned int i;
878
879 for (i = 0; i < variants->nops; i++) {
880 struct spi_mem_op op = variants->ops[i];
881 unsigned int nbytes;
882 int ret;
883
884 nbytes = nanddev_per_page_oobsize(nand) +
885 nanddev_page_size(nand);
886
887 while (nbytes) {
888 op.data.nbytes = nbytes;
889 ret = spi_mem_adjust_op_size(spinand->slave, &op);
890 if (ret)
891 break;
892
893 if (!spi_mem_supports_op(spinand->slave, &op))
894 break;
895
896 nbytes -= op.data.nbytes;
897 }
898
899 if (!nbytes)
900 return &variants->ops[i];
901 }
902
903 return NULL;
904 }
905
906 /**
907 * spinand_match_and_init() - Try to find a match between a device ID and an
908 * entry in a spinand_info table
909 * @spinand: SPI NAND object
910 * @table: SPI NAND device description table
911 * @table_size: size of the device description table
912 *
913 * Should be used by SPI NAND manufacturer drivers when they want to find a
914 * match between a device ID retrieved through the READ_ID command and an
915 * entry in the SPI NAND description table. If a match is found, the spinand
916 * object will be initialized with information provided by the matching
917 * spinand_info entry.
918 *
919 * Return: 0 on success, a negative error code otherwise.
920 */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,u8 devid)921 int spinand_match_and_init(struct spinand_device *spinand,
922 const struct spinand_info *table,
923 unsigned int table_size, u8 devid)
924 {
925 struct nand_device *nand = spinand_to_nand(spinand);
926 unsigned int i;
927
928 for (i = 0; i < table_size; i++) {
929 const struct spinand_info *info = &table[i];
930 const struct spi_mem_op *op;
931
932 if (devid != info->devid)
933 continue;
934
935 nand->memorg = table[i].memorg;
936 nand->eccreq = table[i].eccreq;
937 spinand->eccinfo = table[i].eccinfo;
938 spinand->flags = table[i].flags;
939 spinand->select_target = table[i].select_target;
940
941 op = spinand_select_op_variant(spinand,
942 info->op_variants.read_cache);
943 if (!op)
944 return -ENOTSUPP;
945
946 spinand->op_templates.read_cache = op;
947
948 op = spinand_select_op_variant(spinand,
949 info->op_variants.write_cache);
950 if (!op)
951 return -ENOTSUPP;
952
953 spinand->op_templates.write_cache = op;
954
955 op = spinand_select_op_variant(spinand,
956 info->op_variants.update_cache);
957 spinand->op_templates.update_cache = op;
958
959 return 0;
960 }
961
962 return -ENOTSUPP;
963 }
964
spinand_detect(struct spinand_device * spinand)965 static int spinand_detect(struct spinand_device *spinand)
966 {
967 struct nand_device *nand = spinand_to_nand(spinand);
968 int ret;
969
970 ret = spinand_reset_op(spinand);
971 if (ret)
972 return ret;
973
974 ret = spinand_read_id_op(spinand, spinand->id.data);
975 if (ret)
976 return ret;
977
978 spinand->id.len = SPINAND_MAX_ID_LEN;
979
980 ret = spinand_manufacturer_detect(spinand);
981 if (ret) {
982 dev_err(spinand->slave->dev, "unknown raw ID %*phN\n",
983 SPINAND_MAX_ID_LEN, spinand->id.data);
984 return ret;
985 }
986
987 if (nand->memorg.ntargets > 1 && !spinand->select_target) {
988 dev_err(spinand->slave->dev,
989 "SPI NANDs with more than one die must implement ->select_target()\n");
990 return -EINVAL;
991 }
992
993 dev_info(spinand->slave->dev,
994 "%s SPI NAND was found.\n", spinand->manufacturer->name);
995 dev_info(spinand->slave->dev,
996 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
997 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
998 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
999
1000 return 0;
1001 }
1002
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)1003 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1004 struct mtd_oob_region *region)
1005 {
1006 return -ERANGE;
1007 }
1008
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)1009 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1010 struct mtd_oob_region *region)
1011 {
1012 if (section)
1013 return -ERANGE;
1014
1015 /* Reserve 2 bytes for the BBM. */
1016 region->offset = 2;
1017 region->length = 62;
1018
1019 return 0;
1020 }
1021
1022 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1023 .ecc = spinand_noecc_ooblayout_ecc,
1024 .rfree = spinand_noecc_ooblayout_free,
1025 };
1026
spinand_init(struct spinand_device * spinand)1027 static int spinand_init(struct spinand_device *spinand)
1028 {
1029 struct mtd_info *mtd = spinand_to_mtd(spinand);
1030 struct nand_device *nand = mtd_to_nanddev(mtd);
1031 int ret, i;
1032
1033 /*
1034 * We need a scratch buffer because the spi_mem interface requires that
1035 * buf passed in spi_mem_op->data.buf be DMA-able.
1036 */
1037 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1038 if (!spinand->scratchbuf)
1039 return -ENOMEM;
1040
1041 ret = spinand_detect(spinand);
1042 if (ret)
1043 goto err_free_bufs;
1044
1045 /*
1046 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1047 * may use this buffer for DMA access.
1048 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1049 */
1050 spinand->databuf = kzalloc(nanddev_page_size(nand) +
1051 nanddev_per_page_oobsize(nand),
1052 GFP_KERNEL);
1053 if (!spinand->databuf) {
1054 ret = -ENOMEM;
1055 goto err_free_bufs;
1056 }
1057
1058 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1059
1060 ret = spinand_init_cfg_cache(spinand);
1061 if (ret)
1062 goto err_free_bufs;
1063
1064 ret = spinand_init_quad_enable(spinand);
1065 if (ret)
1066 goto err_free_bufs;
1067
1068 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1069 if (ret)
1070 goto err_free_bufs;
1071
1072 ret = spinand_manufacturer_init(spinand);
1073 if (ret) {
1074 dev_err(spinand->slave->dev,
1075 "Failed to initialize the SPI NAND chip (err = %d)\n",
1076 ret);
1077 goto err_free_bufs;
1078 }
1079
1080 /* After power up, all blocks are locked, so unlock them here. */
1081 for (i = 0; i < nand->memorg.ntargets; i++) {
1082 ret = spinand_select_target(spinand, i);
1083 if (ret)
1084 goto err_free_bufs;
1085
1086 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1087 if (ret)
1088 goto err_free_bufs;
1089 }
1090
1091 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1092 if (ret)
1093 goto err_manuf_cleanup;
1094
1095 /*
1096 * Right now, we don't support ECC, so let the whole oob
1097 * area is available for user.
1098 */
1099 mtd->_read_oob = spinand_mtd_read;
1100 mtd->_write_oob = spinand_mtd_write;
1101 mtd->_block_isbad = spinand_mtd_block_isbad;
1102 mtd->_block_markbad = spinand_mtd_block_markbad;
1103 mtd->_block_isreserved = spinand_mtd_block_isreserved;
1104 mtd->_erase = spinand_mtd_erase;
1105
1106 if (spinand->eccinfo.ooblayout)
1107 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1108 else
1109 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1110
1111 ret = mtd_ooblayout_count_freebytes(mtd);
1112 if (ret < 0)
1113 goto err_cleanup_nanddev;
1114
1115 mtd->oobavail = ret;
1116
1117 return 0;
1118
1119 err_cleanup_nanddev:
1120 nanddev_cleanup(nand);
1121
1122 err_manuf_cleanup:
1123 spinand_manufacturer_cleanup(spinand);
1124
1125 err_free_bufs:
1126 kfree(spinand->databuf);
1127 kfree(spinand->scratchbuf);
1128 return ret;
1129 }
1130
spinand_cleanup(struct spinand_device * spinand)1131 static void spinand_cleanup(struct spinand_device *spinand)
1132 {
1133 struct nand_device *nand = spinand_to_nand(spinand);
1134
1135 nanddev_cleanup(nand);
1136 spinand_manufacturer_cleanup(spinand);
1137 kfree(spinand->databuf);
1138 kfree(spinand->scratchbuf);
1139 }
1140
spinand_probe(struct udevice * dev)1141 static int spinand_probe(struct udevice *dev)
1142 {
1143 struct spinand_device *spinand = dev_get_priv(dev);
1144 struct spi_slave *slave = dev_get_parent_priv(dev);
1145 struct mtd_info *mtd = dev_get_uclass_priv(dev);
1146 struct nand_device *nand = spinand_to_nand(spinand);
1147 int ret;
1148
1149 #ifndef __UBOOT__
1150 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1151 GFP_KERNEL);
1152 if (!spinand)
1153 return -ENOMEM;
1154
1155 spinand->spimem = mem;
1156 spi_mem_set_drvdata(mem, spinand);
1157 spinand_set_of_node(spinand, mem->spi->dev.of_node);
1158 mutex_init(&spinand->lock);
1159
1160 mtd = spinand_to_mtd(spinand);
1161 mtd->dev.parent = &mem->spi->dev;
1162 #else
1163 nand->mtd = mtd;
1164 mtd->priv = nand;
1165 mtd->dev = dev;
1166 mtd->name = malloc(20);
1167 if (!mtd->name)
1168 return -ENOMEM;
1169 sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1170 spinand->slave = slave;
1171 spinand_set_ofnode(spinand, dev_ofnode(dev));
1172 #endif
1173
1174 ret = spinand_init(spinand);
1175 if (ret)
1176 return ret;
1177
1178 #ifndef __UBOOT__
1179 ret = mtd_device_register(mtd, NULL, 0);
1180 #else
1181 ret = add_mtd_device(mtd);
1182 #endif
1183 if (ret)
1184 goto err_spinand_cleanup;
1185
1186 return 0;
1187
1188 err_spinand_cleanup:
1189 spinand_cleanup(spinand);
1190
1191 return ret;
1192 }
1193
1194 #ifndef __UBOOT__
spinand_remove(struct udevice * slave)1195 static int spinand_remove(struct udevice *slave)
1196 {
1197 struct spinand_device *spinand;
1198 struct mtd_info *mtd;
1199 int ret;
1200
1201 spinand = spi_mem_get_drvdata(slave);
1202 mtd = spinand_to_mtd(spinand);
1203 free(mtd->name);
1204
1205 ret = mtd_device_unregister(mtd);
1206 if (ret)
1207 return ret;
1208
1209 spinand_cleanup(spinand);
1210
1211 return 0;
1212 }
1213
1214 static const struct spi_device_id spinand_ids[] = {
1215 { .name = "spi-nand" },
1216 { /* sentinel */ },
1217 };
1218
1219 #ifdef CONFIG_OF
1220 static const struct of_device_id spinand_of_ids[] = {
1221 { .compatible = "spi-nand" },
1222 { /* sentinel */ },
1223 };
1224 #endif
1225
1226 static struct spi_mem_driver spinand_drv = {
1227 .spidrv = {
1228 .id_table = spinand_ids,
1229 .driver = {
1230 .name = "spi-nand",
1231 .of_match_table = of_match_ptr(spinand_of_ids),
1232 },
1233 },
1234 .probe = spinand_probe,
1235 .remove = spinand_remove,
1236 };
1237 module_spi_mem_driver(spinand_drv);
1238
1239 MODULE_DESCRIPTION("SPI NAND framework");
1240 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1241 MODULE_LICENSE("GPL v2");
1242 #endif /* __UBOOT__ */
1243
1244 static const struct udevice_id spinand_ids[] = {
1245 { .compatible = "spi-nand" },
1246 { /* sentinel */ },
1247 };
1248
1249 U_BOOT_DRIVER(spinand) = {
1250 .name = "spi_nand",
1251 .id = UCLASS_MTD,
1252 .of_match = spinand_ids,
1253 .priv_auto = sizeof(struct spinand_device),
1254 .probe = spinand_probe,
1255 };
1256