1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Updated, and converted to generic GPIO based driver by Russell King.
4  *
5  * Written by Ben Dooks <ben@simtec.co.uk>
6  *   Based on 2.4 version by Mark Whittaker
7  *
8  * © 2004 Simtec Electronics
9  *
10  * Device driver for NAND flash that uses a memory mapped interface to
11  * read/write the NAND commands and data, and GPIO pins for control signals
12  * (the DT binding refers to this as "GPIO assisted NAND flash")
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/io.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/rawnand.h>
24 #include <linux/mtd/partitions.h>
25 #include <linux/mtd/nand-gpio.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/delay.h>
29 
30 struct gpiomtd {
31 	struct nand_controller	base;
32 	void __iomem		*io;
33 	void __iomem		*io_sync;
34 	struct nand_chip	nand_chip;
35 	struct gpio_nand_platdata plat;
36 	struct gpio_desc *nce; /* Optional chip enable */
37 	struct gpio_desc *cle;
38 	struct gpio_desc *ale;
39 	struct gpio_desc *rdy;
40 	struct gpio_desc *nwp; /* Optional write protection */
41 };
42 
gpio_nand_getpriv(struct mtd_info * mtd)43 static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
44 {
45 	return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
46 }
47 
48 
49 #ifdef CONFIG_ARM
50 /* gpio_nand_dosync()
51  *
52  * Make sure the GPIO state changes occur in-order with writes to NAND
53  * memory region.
54  * Needed on PXA due to bus-reordering within the SoC itself (see section on
55  * I/O ordering in PXA manual (section 2.3, p35)
56  */
gpio_nand_dosync(struct gpiomtd * gpiomtd)57 static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
58 {
59 	unsigned long tmp;
60 
61 	if (gpiomtd->io_sync) {
62 		/*
63 		 * Linux memory barriers don't cater for what's required here.
64 		 * What's required is what's here - a read from a separate
65 		 * region with a dependency on that read.
66 		 */
67 		tmp = readl(gpiomtd->io_sync);
68 		asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
69 	}
70 }
71 #else
gpio_nand_dosync(struct gpiomtd * gpiomtd)72 static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
73 #endif
74 
gpio_nand_exec_instr(struct nand_chip * chip,const struct nand_op_instr * instr)75 static int gpio_nand_exec_instr(struct nand_chip *chip,
76 				const struct nand_op_instr *instr)
77 {
78 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
79 	unsigned int i;
80 
81 	switch (instr->type) {
82 	case NAND_OP_CMD_INSTR:
83 		gpio_nand_dosync(gpiomtd);
84 		gpiod_set_value(gpiomtd->cle, 1);
85 		gpio_nand_dosync(gpiomtd);
86 		writeb(instr->ctx.cmd.opcode, gpiomtd->io);
87 		gpio_nand_dosync(gpiomtd);
88 		gpiod_set_value(gpiomtd->cle, 0);
89 		return 0;
90 
91 	case NAND_OP_ADDR_INSTR:
92 		gpio_nand_dosync(gpiomtd);
93 		gpiod_set_value(gpiomtd->ale, 1);
94 		gpio_nand_dosync(gpiomtd);
95 		for (i = 0; i < instr->ctx.addr.naddrs; i++)
96 			writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
97 		gpio_nand_dosync(gpiomtd);
98 		gpiod_set_value(gpiomtd->ale, 0);
99 		return 0;
100 
101 	case NAND_OP_DATA_IN_INSTR:
102 		gpio_nand_dosync(gpiomtd);
103 		if ((chip->options & NAND_BUSWIDTH_16) &&
104 		    !instr->ctx.data.force_8bit)
105 			ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
106 				     instr->ctx.data.len / 2);
107 		else
108 			ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
109 				    instr->ctx.data.len);
110 		return 0;
111 
112 	case NAND_OP_DATA_OUT_INSTR:
113 		gpio_nand_dosync(gpiomtd);
114 		if ((chip->options & NAND_BUSWIDTH_16) &&
115 		    !instr->ctx.data.force_8bit)
116 			iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
117 				      instr->ctx.data.len / 2);
118 		else
119 			iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
120 				     instr->ctx.data.len);
121 		return 0;
122 
123 	case NAND_OP_WAITRDY_INSTR:
124 		if (!gpiomtd->rdy)
125 			return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
126 
127 		return nand_gpio_waitrdy(chip, gpiomtd->rdy,
128 					 instr->ctx.waitrdy.timeout_ms);
129 
130 	default:
131 		return -EINVAL;
132 	}
133 
134 	return 0;
135 }
136 
gpio_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)137 static int gpio_nand_exec_op(struct nand_chip *chip,
138 			     const struct nand_operation *op,
139 			     bool check_only)
140 {
141 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
142 	unsigned int i;
143 	int ret = 0;
144 
145 	if (check_only)
146 		return 0;
147 
148 	gpio_nand_dosync(gpiomtd);
149 	gpiod_set_value(gpiomtd->nce, 0);
150 	for (i = 0; i < op->ninstrs; i++) {
151 		ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
152 		if (ret)
153 			break;
154 
155 		if (op->instrs[i].delay_ns)
156 			ndelay(op->instrs[i].delay_ns);
157 	}
158 	gpio_nand_dosync(gpiomtd);
159 	gpiod_set_value(gpiomtd->nce, 1);
160 
161 	return ret;
162 }
163 
gpio_nand_attach_chip(struct nand_chip * chip)164 static int gpio_nand_attach_chip(struct nand_chip *chip)
165 {
166 	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
167 
168 	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
169 		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
170 
171 	return 0;
172 }
173 
174 static const struct nand_controller_ops gpio_nand_ops = {
175 	.exec_op = gpio_nand_exec_op,
176 	.attach_chip = gpio_nand_attach_chip,
177 };
178 
179 #ifdef CONFIG_OF
180 static const struct of_device_id gpio_nand_id_table[] = {
181 	{ .compatible = "gpio-control-nand" },
182 	{}
183 };
184 MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
185 
gpio_nand_get_config_of(const struct device * dev,struct gpio_nand_platdata * plat)186 static int gpio_nand_get_config_of(const struct device *dev,
187 				   struct gpio_nand_platdata *plat)
188 {
189 	u32 val;
190 
191 	if (!dev->of_node)
192 		return -ENODEV;
193 
194 	if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
195 		if (val == 2) {
196 			plat->options |= NAND_BUSWIDTH_16;
197 		} else if (val != 1) {
198 			dev_err(dev, "invalid bank-width %u\n", val);
199 			return -EINVAL;
200 		}
201 	}
202 
203 	if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
204 		plat->chip_delay = val;
205 
206 	return 0;
207 }
208 
gpio_nand_get_io_sync_of(struct platform_device * pdev)209 static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
210 {
211 	struct resource *r;
212 	u64 addr;
213 
214 	if (of_property_read_u64(pdev->dev.of_node,
215 				       "gpio-control-nand,io-sync-reg", &addr))
216 		return NULL;
217 
218 	r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
219 	if (!r)
220 		return NULL;
221 
222 	r->start = addr;
223 	r->end = r->start + 0x3;
224 	r->flags = IORESOURCE_MEM;
225 
226 	return r;
227 }
228 #else /* CONFIG_OF */
gpio_nand_get_config_of(const struct device * dev,struct gpio_nand_platdata * plat)229 static inline int gpio_nand_get_config_of(const struct device *dev,
230 					  struct gpio_nand_platdata *plat)
231 {
232 	return -ENOSYS;
233 }
234 
235 static inline struct resource *
gpio_nand_get_io_sync_of(struct platform_device * pdev)236 gpio_nand_get_io_sync_of(struct platform_device *pdev)
237 {
238 	return NULL;
239 }
240 #endif /* CONFIG_OF */
241 
gpio_nand_get_config(const struct device * dev,struct gpio_nand_platdata * plat)242 static inline int gpio_nand_get_config(const struct device *dev,
243 				       struct gpio_nand_platdata *plat)
244 {
245 	int ret = gpio_nand_get_config_of(dev, plat);
246 
247 	if (!ret)
248 		return ret;
249 
250 	if (dev_get_platdata(dev)) {
251 		memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
252 		return 0;
253 	}
254 
255 	return -EINVAL;
256 }
257 
258 static inline struct resource *
gpio_nand_get_io_sync(struct platform_device * pdev)259 gpio_nand_get_io_sync(struct platform_device *pdev)
260 {
261 	struct resource *r = gpio_nand_get_io_sync_of(pdev);
262 
263 	if (r)
264 		return r;
265 
266 	return platform_get_resource(pdev, IORESOURCE_MEM, 1);
267 }
268 
gpio_nand_remove(struct platform_device * pdev)269 static int gpio_nand_remove(struct platform_device *pdev)
270 {
271 	struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
272 	struct nand_chip *chip = &gpiomtd->nand_chip;
273 	int ret;
274 
275 	ret = mtd_device_unregister(nand_to_mtd(chip));
276 	WARN_ON(ret);
277 	nand_cleanup(chip);
278 
279 	/* Enable write protection and disable the chip */
280 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
281 		gpiod_set_value(gpiomtd->nwp, 0);
282 	if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
283 		gpiod_set_value(gpiomtd->nce, 0);
284 
285 	return 0;
286 }
287 
gpio_nand_probe(struct platform_device * pdev)288 static int gpio_nand_probe(struct platform_device *pdev)
289 {
290 	struct gpiomtd *gpiomtd;
291 	struct nand_chip *chip;
292 	struct mtd_info *mtd;
293 	struct resource *res;
294 	struct device *dev = &pdev->dev;
295 	int ret = 0;
296 
297 	if (!dev->of_node && !dev_get_platdata(dev))
298 		return -EINVAL;
299 
300 	gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
301 	if (!gpiomtd)
302 		return -ENOMEM;
303 
304 	chip = &gpiomtd->nand_chip;
305 
306 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
307 	gpiomtd->io = devm_ioremap_resource(dev, res);
308 	if (IS_ERR(gpiomtd->io))
309 		return PTR_ERR(gpiomtd->io);
310 
311 	res = gpio_nand_get_io_sync(pdev);
312 	if (res) {
313 		gpiomtd->io_sync = devm_ioremap_resource(dev, res);
314 		if (IS_ERR(gpiomtd->io_sync))
315 			return PTR_ERR(gpiomtd->io_sync);
316 	}
317 
318 	ret = gpio_nand_get_config(dev, &gpiomtd->plat);
319 	if (ret)
320 		return ret;
321 
322 	/* Just enable the chip */
323 	gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
324 	if (IS_ERR(gpiomtd->nce))
325 		return PTR_ERR(gpiomtd->nce);
326 
327 	/* We disable write protection once we know probe() will succeed */
328 	gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
329 	if (IS_ERR(gpiomtd->nwp)) {
330 		ret = PTR_ERR(gpiomtd->nwp);
331 		goto out_ce;
332 	}
333 
334 	gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
335 	if (IS_ERR(gpiomtd->ale)) {
336 		ret = PTR_ERR(gpiomtd->ale);
337 		goto out_ce;
338 	}
339 
340 	gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
341 	if (IS_ERR(gpiomtd->cle)) {
342 		ret = PTR_ERR(gpiomtd->cle);
343 		goto out_ce;
344 	}
345 
346 	gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
347 	if (IS_ERR(gpiomtd->rdy)) {
348 		ret = PTR_ERR(gpiomtd->rdy);
349 		goto out_ce;
350 	}
351 
352 	nand_controller_init(&gpiomtd->base);
353 	gpiomtd->base.ops = &gpio_nand_ops;
354 
355 	nand_set_flash_node(chip, pdev->dev.of_node);
356 	chip->options		= gpiomtd->plat.options;
357 	chip->controller	= &gpiomtd->base;
358 
359 	mtd			= nand_to_mtd(chip);
360 	mtd->dev.parent		= dev;
361 
362 	platform_set_drvdata(pdev, gpiomtd);
363 
364 	/* Disable write protection, if wired up */
365 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
366 		gpiod_direction_output(gpiomtd->nwp, 1);
367 
368 	ret = nand_scan(chip, 1);
369 	if (ret)
370 		goto err_wp;
371 
372 	if (gpiomtd->plat.adjust_parts)
373 		gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
374 
375 	ret = mtd_device_register(mtd, gpiomtd->plat.parts,
376 				  gpiomtd->plat.num_parts);
377 	if (!ret)
378 		return 0;
379 
380 err_wp:
381 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
382 		gpiod_set_value(gpiomtd->nwp, 0);
383 out_ce:
384 	if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
385 		gpiod_set_value(gpiomtd->nce, 0);
386 
387 	return ret;
388 }
389 
390 static struct platform_driver gpio_nand_driver = {
391 	.probe		= gpio_nand_probe,
392 	.remove		= gpio_nand_remove,
393 	.driver		= {
394 		.name	= "gpio-nand",
395 		.of_match_table = of_match_ptr(gpio_nand_id_table),
396 	},
397 };
398 
399 module_platform_driver(gpio_nand_driver);
400 
401 MODULE_LICENSE("GPL");
402 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
403 MODULE_DESCRIPTION("GPIO NAND Driver");
404