1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55 }
56
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74 {
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83 }
84
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87 {
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118 }
119
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122 {
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141 }
142
143 #define SPI_STATISTICS_ATTRS(field, file) \
144 static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147 { \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 } \
152 static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155 }; \
156 static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159 { \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 } \
163 static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166 }
167
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171 { \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174 } \
175 SPI_STATISTICS_ATTRS(name, file)
176
177 #define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218 static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222 };
223
224 static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226 };
227
228 static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258 };
259
260 static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263 };
264
265 static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269 };
270
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301 };
302
303 static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306 };
307
308 static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311 };
312
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_message *msg)
316 {
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if (spi_valid_txbuf(msg, xfer))
332 u64_stats_add(&stats->bytes_tx, xfer->len);
333 if (spi_valid_rxbuf(msg, xfer))
334 u64_stats_add(&stats->bytes_rx, xfer->len);
335
336 u64_stats_update_end(&stats->syncp);
337 put_cpu();
338 }
339
340 /*
341 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
342 * and the sysfs version makes coldplug work too.
343 */
spi_match_id(const struct spi_device_id * id,const char * name)344 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
345 {
346 while (id->name[0]) {
347 if (!strcmp(name, id->name))
348 return id;
349 id++;
350 }
351 return NULL;
352 }
353
spi_get_device_id(const struct spi_device * sdev)354 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
355 {
356 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
357
358 return spi_match_id(sdrv->id_table, sdev->modalias);
359 }
360 EXPORT_SYMBOL_GPL(spi_get_device_id);
361
spi_get_device_match_data(const struct spi_device * sdev)362 const void *spi_get_device_match_data(const struct spi_device *sdev)
363 {
364 const void *match;
365
366 match = device_get_match_data(&sdev->dev);
367 if (match)
368 return match;
369
370 return (const void *)spi_get_device_id(sdev)->driver_data;
371 }
372 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
373
spi_match_device(struct device * dev,struct device_driver * drv)374 static int spi_match_device(struct device *dev, struct device_driver *drv)
375 {
376 const struct spi_device *spi = to_spi_device(dev);
377 const struct spi_driver *sdrv = to_spi_driver(drv);
378
379 /* Check override first, and if set, only use the named driver */
380 if (spi->driver_override)
381 return strcmp(spi->driver_override, drv->name) == 0;
382
383 /* Attempt an OF style match */
384 if (of_driver_match_device(dev, drv))
385 return 1;
386
387 /* Then try ACPI */
388 if (acpi_driver_match_device(dev, drv))
389 return 1;
390
391 if (sdrv->id_table)
392 return !!spi_match_id(sdrv->id_table, spi->modalias);
393
394 return strcmp(spi->modalias, drv->name) == 0;
395 }
396
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)397 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
398 {
399 const struct spi_device *spi = to_spi_device(dev);
400 int rc;
401
402 rc = acpi_device_uevent_modalias(dev, env);
403 if (rc != -ENODEV)
404 return rc;
405
406 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
407 }
408
spi_probe(struct device * dev)409 static int spi_probe(struct device *dev)
410 {
411 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
412 struct spi_device *spi = to_spi_device(dev);
413 int ret;
414
415 ret = of_clk_set_defaults(dev->of_node, false);
416 if (ret)
417 return ret;
418
419 if (dev->of_node) {
420 spi->irq = of_irq_get(dev->of_node, 0);
421 if (spi->irq == -EPROBE_DEFER)
422 return -EPROBE_DEFER;
423 if (spi->irq < 0)
424 spi->irq = 0;
425 }
426
427 ret = dev_pm_domain_attach(dev, true);
428 if (ret)
429 return ret;
430
431 if (sdrv->probe) {
432 ret = sdrv->probe(spi);
433 if (ret)
434 dev_pm_domain_detach(dev, true);
435 }
436
437 return ret;
438 }
439
spi_remove(struct device * dev)440 static void spi_remove(struct device *dev)
441 {
442 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
443
444 if (sdrv->remove)
445 sdrv->remove(to_spi_device(dev));
446
447 dev_pm_domain_detach(dev, true);
448 }
449
spi_shutdown(struct device * dev)450 static void spi_shutdown(struct device *dev)
451 {
452 if (dev->driver) {
453 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
454
455 if (sdrv->shutdown)
456 sdrv->shutdown(to_spi_device(dev));
457 }
458 }
459
460 const struct bus_type spi_bus_type = {
461 .name = "spi",
462 .dev_groups = spi_dev_groups,
463 .match = spi_match_device,
464 .uevent = spi_uevent,
465 .probe = spi_probe,
466 .remove = spi_remove,
467 .shutdown = spi_shutdown,
468 };
469 EXPORT_SYMBOL_GPL(spi_bus_type);
470
471 /**
472 * __spi_register_driver - register a SPI driver
473 * @owner: owner module of the driver to register
474 * @sdrv: the driver to register
475 * Context: can sleep
476 *
477 * Return: zero on success, else a negative error code.
478 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)479 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
480 {
481 sdrv->driver.owner = owner;
482 sdrv->driver.bus = &spi_bus_type;
483
484 /*
485 * For Really Good Reasons we use spi: modaliases not of:
486 * modaliases for DT so module autoloading won't work if we
487 * don't have a spi_device_id as well as a compatible string.
488 */
489 if (sdrv->driver.of_match_table) {
490 const struct of_device_id *of_id;
491
492 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
493 of_id++) {
494 const char *of_name;
495
496 /* Strip off any vendor prefix */
497 of_name = strnchr(of_id->compatible,
498 sizeof(of_id->compatible), ',');
499 if (of_name)
500 of_name++;
501 else
502 of_name = of_id->compatible;
503
504 if (sdrv->id_table) {
505 const struct spi_device_id *spi_id;
506
507 spi_id = spi_match_id(sdrv->id_table, of_name);
508 if (spi_id)
509 continue;
510 } else {
511 if (strcmp(sdrv->driver.name, of_name) == 0)
512 continue;
513 }
514
515 pr_warn("SPI driver %s has no spi_device_id for %s\n",
516 sdrv->driver.name, of_id->compatible);
517 }
518 }
519
520 return driver_register(&sdrv->driver);
521 }
522 EXPORT_SYMBOL_GPL(__spi_register_driver);
523
524 /*-------------------------------------------------------------------------*/
525
526 /*
527 * SPI devices should normally not be created by SPI device drivers; that
528 * would make them board-specific. Similarly with SPI controller drivers.
529 * Device registration normally goes into like arch/.../mach.../board-YYY.c
530 * with other readonly (flashable) information about mainboard devices.
531 */
532
533 struct boardinfo {
534 struct list_head list;
535 struct spi_board_info board_info;
536 };
537
538 static LIST_HEAD(board_list);
539 static LIST_HEAD(spi_controller_list);
540
541 /*
542 * Used to protect add/del operation for board_info list and
543 * spi_controller list, and their matching process also used
544 * to protect object of type struct idr.
545 */
546 static DEFINE_MUTEX(board_lock);
547
548 /**
549 * spi_alloc_device - Allocate a new SPI device
550 * @ctlr: Controller to which device is connected
551 * Context: can sleep
552 *
553 * Allows a driver to allocate and initialize a spi_device without
554 * registering it immediately. This allows a driver to directly
555 * fill the spi_device with device parameters before calling
556 * spi_add_device() on it.
557 *
558 * Caller is responsible to call spi_add_device() on the returned
559 * spi_device structure to add it to the SPI controller. If the caller
560 * needs to discard the spi_device without adding it, then it should
561 * call spi_dev_put() on it.
562 *
563 * Return: a pointer to the new device, or NULL.
564 */
spi_alloc_device(struct spi_controller * ctlr)565 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
566 {
567 struct spi_device *spi;
568
569 if (!spi_controller_get(ctlr))
570 return NULL;
571
572 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
573 if (!spi) {
574 spi_controller_put(ctlr);
575 return NULL;
576 }
577
578 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
579 if (!spi->pcpu_statistics) {
580 kfree(spi);
581 spi_controller_put(ctlr);
582 return NULL;
583 }
584
585 spi->controller = ctlr;
586 spi->dev.parent = &ctlr->dev;
587 spi->dev.bus = &spi_bus_type;
588 spi->dev.release = spidev_release;
589 spi->mode = ctlr->buswidth_override_bits;
590
591 device_initialize(&spi->dev);
592 return spi;
593 }
594 EXPORT_SYMBOL_GPL(spi_alloc_device);
595
spi_dev_set_name(struct spi_device * spi)596 static void spi_dev_set_name(struct spi_device *spi)
597 {
598 struct device *dev = &spi->dev;
599 struct fwnode_handle *fwnode = dev_fwnode(dev);
600
601 if (is_acpi_device_node(fwnode)) {
602 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
603 return;
604 }
605
606 if (is_software_node(fwnode)) {
607 dev_set_name(dev, "spi-%pfwP", fwnode);
608 return;
609 }
610
611 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
612 spi_get_chipselect(spi, 0));
613 }
614
615 /*
616 * Zero(0) is a valid physical CS value and can be located at any
617 * logical CS in the spi->chip_select[]. If all the physical CS
618 * are initialized to 0 then It would be difficult to differentiate
619 * between a valid physical CS 0 & an unused logical CS whose physical
620 * CS can be 0. As a solution to this issue initialize all the CS to -1.
621 * Now all the unused logical CS will have -1 physical CS value & can be
622 * ignored while performing physical CS validity checks.
623 */
624 #define SPI_INVALID_CS ((s8)-1)
625
is_valid_cs(s8 chip_select)626 static inline bool is_valid_cs(s8 chip_select)
627 {
628 return chip_select != SPI_INVALID_CS;
629 }
630
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)631 static inline int spi_dev_check_cs(struct device *dev,
632 struct spi_device *spi, u8 idx,
633 struct spi_device *new_spi, u8 new_idx)
634 {
635 u8 cs, cs_new;
636 u8 idx_new;
637
638 cs = spi_get_chipselect(spi, idx);
639 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
640 cs_new = spi_get_chipselect(new_spi, idx_new);
641 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
642 dev_err(dev, "chipselect %u already in use\n", cs_new);
643 return -EBUSY;
644 }
645 }
646 return 0;
647 }
648
spi_dev_check(struct device * dev,void * data)649 static int spi_dev_check(struct device *dev, void *data)
650 {
651 struct spi_device *spi = to_spi_device(dev);
652 struct spi_device *new_spi = data;
653 int status, idx;
654
655 if (spi->controller == new_spi->controller) {
656 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
657 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
658 if (status)
659 return status;
660 }
661 }
662 return 0;
663 }
664
spi_cleanup(struct spi_device * spi)665 static void spi_cleanup(struct spi_device *spi)
666 {
667 if (spi->controller->cleanup)
668 spi->controller->cleanup(spi);
669 }
670
__spi_add_device(struct spi_device * spi)671 static int __spi_add_device(struct spi_device *spi)
672 {
673 struct spi_controller *ctlr = spi->controller;
674 struct device *dev = ctlr->dev.parent;
675 int status, idx;
676 u8 cs;
677
678 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
679 /* Chipselects are numbered 0..max; validate. */
680 cs = spi_get_chipselect(spi, idx);
681 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
682 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
683 ctlr->num_chipselect);
684 return -EINVAL;
685 }
686 }
687
688 /*
689 * Make sure that multiple logical CS doesn't map to the same physical CS.
690 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
691 */
692 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
693 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
694 if (status)
695 return status;
696 }
697
698 /* Set the bus ID string */
699 spi_dev_set_name(spi);
700
701 /*
702 * We need to make sure there's no other device with this
703 * chipselect **BEFORE** we call setup(), else we'll trash
704 * its configuration.
705 */
706 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
707 if (status)
708 return status;
709
710 /* Controller may unregister concurrently */
711 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
712 !device_is_registered(&ctlr->dev)) {
713 return -ENODEV;
714 }
715
716 if (ctlr->cs_gpiods) {
717 u8 cs;
718
719 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
720 cs = spi_get_chipselect(spi, idx);
721 if (is_valid_cs(cs))
722 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
723 }
724 }
725
726 /*
727 * Drivers may modify this initial i/o setup, but will
728 * normally rely on the device being setup. Devices
729 * using SPI_CS_HIGH can't coexist well otherwise...
730 */
731 status = spi_setup(spi);
732 if (status < 0) {
733 dev_err(dev, "can't setup %s, status %d\n",
734 dev_name(&spi->dev), status);
735 return status;
736 }
737
738 /* Device may be bound to an active driver when this returns */
739 status = device_add(&spi->dev);
740 if (status < 0) {
741 dev_err(dev, "can't add %s, status %d\n",
742 dev_name(&spi->dev), status);
743 spi_cleanup(spi);
744 } else {
745 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
746 }
747
748 return status;
749 }
750
751 /**
752 * spi_add_device - Add spi_device allocated with spi_alloc_device
753 * @spi: spi_device to register
754 *
755 * Companion function to spi_alloc_device. Devices allocated with
756 * spi_alloc_device can be added onto the SPI bus with this function.
757 *
758 * Return: 0 on success; negative errno on failure
759 */
spi_add_device(struct spi_device * spi)760 int spi_add_device(struct spi_device *spi)
761 {
762 struct spi_controller *ctlr = spi->controller;
763 int status;
764
765 /* Set the bus ID string */
766 spi_dev_set_name(spi);
767
768 mutex_lock(&ctlr->add_lock);
769 status = __spi_add_device(spi);
770 mutex_unlock(&ctlr->add_lock);
771 return status;
772 }
773 EXPORT_SYMBOL_GPL(spi_add_device);
774
spi_set_all_cs_unused(struct spi_device * spi)775 static void spi_set_all_cs_unused(struct spi_device *spi)
776 {
777 u8 idx;
778
779 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
780 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
781 }
782
783 /**
784 * spi_new_device - instantiate one new SPI device
785 * @ctlr: Controller to which device is connected
786 * @chip: Describes the SPI device
787 * Context: can sleep
788 *
789 * On typical mainboards, this is purely internal; and it's not needed
790 * after board init creates the hard-wired devices. Some development
791 * platforms may not be able to use spi_register_board_info though, and
792 * this is exported so that for example a USB or parport based adapter
793 * driver could add devices (which it would learn about out-of-band).
794 *
795 * Return: the new device, or NULL.
796 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)797 struct spi_device *spi_new_device(struct spi_controller *ctlr,
798 struct spi_board_info *chip)
799 {
800 struct spi_device *proxy;
801 int status;
802
803 /*
804 * NOTE: caller did any chip->bus_num checks necessary.
805 *
806 * Also, unless we change the return value convention to use
807 * error-or-pointer (not NULL-or-pointer), troubleshootability
808 * suggests syslogged diagnostics are best here (ugh).
809 */
810
811 proxy = spi_alloc_device(ctlr);
812 if (!proxy)
813 return NULL;
814
815 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
816
817 /* Use provided chip-select for proxy device */
818 spi_set_all_cs_unused(proxy);
819 spi_set_chipselect(proxy, 0, chip->chip_select);
820
821 proxy->max_speed_hz = chip->max_speed_hz;
822 proxy->mode = chip->mode;
823 proxy->irq = chip->irq;
824 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
825 proxy->dev.platform_data = (void *) chip->platform_data;
826 proxy->controller_data = chip->controller_data;
827 proxy->controller_state = NULL;
828 /*
829 * By default spi->chip_select[0] will hold the physical CS number,
830 * so set bit 0 in spi->cs_index_mask.
831 */
832 proxy->cs_index_mask = BIT(0);
833
834 if (chip->swnode) {
835 status = device_add_software_node(&proxy->dev, chip->swnode);
836 if (status) {
837 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
838 chip->modalias, status);
839 goto err_dev_put;
840 }
841 }
842
843 status = spi_add_device(proxy);
844 if (status < 0)
845 goto err_dev_put;
846
847 return proxy;
848
849 err_dev_put:
850 device_remove_software_node(&proxy->dev);
851 spi_dev_put(proxy);
852 return NULL;
853 }
854 EXPORT_SYMBOL_GPL(spi_new_device);
855
856 /**
857 * spi_unregister_device - unregister a single SPI device
858 * @spi: spi_device to unregister
859 *
860 * Start making the passed SPI device vanish. Normally this would be handled
861 * by spi_unregister_controller().
862 */
spi_unregister_device(struct spi_device * spi)863 void spi_unregister_device(struct spi_device *spi)
864 {
865 if (!spi)
866 return;
867
868 if (spi->dev.of_node) {
869 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
870 of_node_put(spi->dev.of_node);
871 }
872 if (ACPI_COMPANION(&spi->dev))
873 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
874 device_remove_software_node(&spi->dev);
875 device_del(&spi->dev);
876 spi_cleanup(spi);
877 put_device(&spi->dev);
878 }
879 EXPORT_SYMBOL_GPL(spi_unregister_device);
880
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)881 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
882 struct spi_board_info *bi)
883 {
884 struct spi_device *dev;
885
886 if (ctlr->bus_num != bi->bus_num)
887 return;
888
889 dev = spi_new_device(ctlr, bi);
890 if (!dev)
891 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
892 bi->modalias);
893 }
894
895 /**
896 * spi_register_board_info - register SPI devices for a given board
897 * @info: array of chip descriptors
898 * @n: how many descriptors are provided
899 * Context: can sleep
900 *
901 * Board-specific early init code calls this (probably during arch_initcall)
902 * with segments of the SPI device table. Any device nodes are created later,
903 * after the relevant parent SPI controller (bus_num) is defined. We keep
904 * this table of devices forever, so that reloading a controller driver will
905 * not make Linux forget about these hard-wired devices.
906 *
907 * Other code can also call this, e.g. a particular add-on board might provide
908 * SPI devices through its expansion connector, so code initializing that board
909 * would naturally declare its SPI devices.
910 *
911 * The board info passed can safely be __initdata ... but be careful of
912 * any embedded pointers (platform_data, etc), they're copied as-is.
913 *
914 * Return: zero on success, else a negative error code.
915 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)916 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
917 {
918 struct boardinfo *bi;
919 int i;
920
921 if (!n)
922 return 0;
923
924 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
925 if (!bi)
926 return -ENOMEM;
927
928 for (i = 0; i < n; i++, bi++, info++) {
929 struct spi_controller *ctlr;
930
931 memcpy(&bi->board_info, info, sizeof(*info));
932
933 mutex_lock(&board_lock);
934 list_add_tail(&bi->list, &board_list);
935 list_for_each_entry(ctlr, &spi_controller_list, list)
936 spi_match_controller_to_boardinfo(ctlr,
937 &bi->board_info);
938 mutex_unlock(&board_lock);
939 }
940
941 return 0;
942 }
943
944 /*-------------------------------------------------------------------------*/
945
946 /* Core methods for SPI resource management */
947
948 /**
949 * spi_res_alloc - allocate a spi resource that is life-cycle managed
950 * during the processing of a spi_message while using
951 * spi_transfer_one
952 * @spi: the SPI device for which we allocate memory
953 * @release: the release code to execute for this resource
954 * @size: size to alloc and return
955 * @gfp: GFP allocation flags
956 *
957 * Return: the pointer to the allocated data
958 *
959 * This may get enhanced in the future to allocate from a memory pool
960 * of the @spi_device or @spi_controller to avoid repeated allocations.
961 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)962 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
963 size_t size, gfp_t gfp)
964 {
965 struct spi_res *sres;
966
967 sres = kzalloc(sizeof(*sres) + size, gfp);
968 if (!sres)
969 return NULL;
970
971 INIT_LIST_HEAD(&sres->entry);
972 sres->release = release;
973
974 return sres->data;
975 }
976
977 /**
978 * spi_res_free - free an SPI resource
979 * @res: pointer to the custom data of a resource
980 */
spi_res_free(void * res)981 static void spi_res_free(void *res)
982 {
983 struct spi_res *sres = container_of(res, struct spi_res, data);
984
985 if (!res)
986 return;
987
988 WARN_ON(!list_empty(&sres->entry));
989 kfree(sres);
990 }
991
992 /**
993 * spi_res_add - add a spi_res to the spi_message
994 * @message: the SPI message
995 * @res: the spi_resource
996 */
spi_res_add(struct spi_message * message,void * res)997 static void spi_res_add(struct spi_message *message, void *res)
998 {
999 struct spi_res *sres = container_of(res, struct spi_res, data);
1000
1001 WARN_ON(!list_empty(&sres->entry));
1002 list_add_tail(&sres->entry, &message->resources);
1003 }
1004
1005 /**
1006 * spi_res_release - release all SPI resources for this message
1007 * @ctlr: the @spi_controller
1008 * @message: the @spi_message
1009 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1010 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1011 {
1012 struct spi_res *res, *tmp;
1013
1014 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1015 if (res->release)
1016 res->release(ctlr, message, res->data);
1017
1018 list_del(&res->entry);
1019
1020 kfree(res);
1021 }
1022 }
1023
1024 /*-------------------------------------------------------------------------*/
1025 #define spi_for_each_valid_cs(spi, idx) \
1026 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \
1027 if (!(spi->cs_index_mask & BIT(idx))) {} else
1028
spi_is_last_cs(struct spi_device * spi)1029 static inline bool spi_is_last_cs(struct spi_device *spi)
1030 {
1031 u8 idx;
1032 bool last = false;
1033
1034 spi_for_each_valid_cs(spi, idx) {
1035 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1036 last = true;
1037 }
1038 return last;
1039 }
1040
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1041 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1042 {
1043 /*
1044 * Historically ACPI has no means of the GPIO polarity and
1045 * thus the SPISerialBus() resource defines it on the per-chip
1046 * basis. In order to avoid a chain of negations, the GPIO
1047 * polarity is considered being Active High. Even for the cases
1048 * when _DSD() is involved (in the updated versions of ACPI)
1049 * the GPIO CS polarity must be defined Active High to avoid
1050 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1051 * into account.
1052 */
1053 if (has_acpi_companion(&spi->dev))
1054 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1055 else
1056 /* Polarity handled by GPIO library */
1057 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1058
1059 if (activate)
1060 spi_delay_exec(&spi->cs_setup, NULL);
1061 else
1062 spi_delay_exec(&spi->cs_inactive, NULL);
1063 }
1064
spi_set_cs(struct spi_device * spi,bool enable,bool force)1065 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1066 {
1067 bool activate = enable;
1068 u8 idx;
1069
1070 /*
1071 * Avoid calling into the driver (or doing delays) if the chip select
1072 * isn't actually changing from the last time this was called.
1073 */
1074 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1075 spi_is_last_cs(spi)) ||
1076 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1077 !spi_is_last_cs(spi))) &&
1078 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1079 return;
1080
1081 trace_spi_set_cs(spi, activate);
1082
1083 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1084 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1085 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1086 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1087
1088 if (spi->mode & SPI_CS_HIGH)
1089 enable = !enable;
1090
1091 /*
1092 * Handle chip select delays for GPIO based CS or controllers without
1093 * programmable chip select timing.
1094 */
1095 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1096 spi_delay_exec(&spi->cs_hold, NULL);
1097
1098 if (spi_is_csgpiod(spi)) {
1099 if (!(spi->mode & SPI_NO_CS)) {
1100 spi_for_each_valid_cs(spi, idx) {
1101 if (spi_get_csgpiod(spi, idx))
1102 spi_toggle_csgpiod(spi, idx, enable, activate);
1103 }
1104 }
1105 /* Some SPI masters need both GPIO CS & slave_select */
1106 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1107 spi->controller->set_cs)
1108 spi->controller->set_cs(spi, !enable);
1109 } else if (spi->controller->set_cs) {
1110 spi->controller->set_cs(spi, !enable);
1111 }
1112
1113 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1114 if (activate)
1115 spi_delay_exec(&spi->cs_setup, NULL);
1116 else
1117 spi_delay_exec(&spi->cs_inactive, NULL);
1118 }
1119 }
1120
1121 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1122 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1123 struct sg_table *sgt, void *buf, size_t len,
1124 enum dma_data_direction dir, unsigned long attrs)
1125 {
1126 const bool vmalloced_buf = is_vmalloc_addr(buf);
1127 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1128 #ifdef CONFIG_HIGHMEM
1129 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1130 (unsigned long)buf < (PKMAP_BASE +
1131 (LAST_PKMAP * PAGE_SIZE)));
1132 #else
1133 const bool kmap_buf = false;
1134 #endif
1135 int desc_len;
1136 int sgs;
1137 struct page *vm_page;
1138 struct scatterlist *sg;
1139 void *sg_buf;
1140 size_t min;
1141 int i, ret;
1142
1143 if (vmalloced_buf || kmap_buf) {
1144 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1145 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1146 } else if (virt_addr_valid(buf)) {
1147 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1148 sgs = DIV_ROUND_UP(len, desc_len);
1149 } else {
1150 return -EINVAL;
1151 }
1152
1153 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1154 if (ret != 0)
1155 return ret;
1156
1157 sg = &sgt->sgl[0];
1158 for (i = 0; i < sgs; i++) {
1159
1160 if (vmalloced_buf || kmap_buf) {
1161 /*
1162 * Next scatterlist entry size is the minimum between
1163 * the desc_len and the remaining buffer length that
1164 * fits in a page.
1165 */
1166 min = min_t(size_t, desc_len,
1167 min_t(size_t, len,
1168 PAGE_SIZE - offset_in_page(buf)));
1169 if (vmalloced_buf)
1170 vm_page = vmalloc_to_page(buf);
1171 else
1172 vm_page = kmap_to_page(buf);
1173 if (!vm_page) {
1174 sg_free_table(sgt);
1175 return -ENOMEM;
1176 }
1177 sg_set_page(sg, vm_page,
1178 min, offset_in_page(buf));
1179 } else {
1180 min = min_t(size_t, len, desc_len);
1181 sg_buf = buf;
1182 sg_set_buf(sg, sg_buf, min);
1183 }
1184
1185 buf += min;
1186 len -= min;
1187 sg = sg_next(sg);
1188 }
1189
1190 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1191 if (ret < 0) {
1192 sg_free_table(sgt);
1193 return ret;
1194 }
1195
1196 return 0;
1197 }
1198
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1199 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1200 struct sg_table *sgt, void *buf, size_t len,
1201 enum dma_data_direction dir)
1202 {
1203 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1204 }
1205
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1206 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1207 struct device *dev, struct sg_table *sgt,
1208 enum dma_data_direction dir,
1209 unsigned long attrs)
1210 {
1211 dma_unmap_sgtable(dev, sgt, dir, attrs);
1212 sg_free_table(sgt);
1213 sgt->orig_nents = 0;
1214 sgt->nents = 0;
1215 }
1216
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1217 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1218 struct sg_table *sgt, enum dma_data_direction dir)
1219 {
1220 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1221 }
1222
1223 /* Dummy SG for unidirect transfers */
1224 static struct scatterlist dummy_sg = {
1225 .page_link = SG_END,
1226 };
1227
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1228 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1229 {
1230 struct device *tx_dev, *rx_dev;
1231 struct spi_transfer *xfer;
1232 int ret;
1233
1234 if (!ctlr->can_dma)
1235 return 0;
1236
1237 if (ctlr->dma_tx)
1238 tx_dev = ctlr->dma_tx->device->dev;
1239 else if (ctlr->dma_map_dev)
1240 tx_dev = ctlr->dma_map_dev;
1241 else
1242 tx_dev = ctlr->dev.parent;
1243
1244 if (ctlr->dma_rx)
1245 rx_dev = ctlr->dma_rx->device->dev;
1246 else if (ctlr->dma_map_dev)
1247 rx_dev = ctlr->dma_map_dev;
1248 else
1249 rx_dev = ctlr->dev.parent;
1250
1251 ret = -ENOMSG;
1252 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1253 /* The sync is done before each transfer. */
1254 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1255
1256 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1257 continue;
1258
1259 if (xfer->tx_buf != NULL) {
1260 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1261 (void *)xfer->tx_buf,
1262 xfer->len, DMA_TO_DEVICE,
1263 attrs);
1264 if (ret != 0)
1265 return ret;
1266 } else {
1267 xfer->tx_sg.sgl = &dummy_sg;
1268 }
1269
1270 if (xfer->rx_buf != NULL) {
1271 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1272 xfer->rx_buf, xfer->len,
1273 DMA_FROM_DEVICE, attrs);
1274 if (ret != 0) {
1275 spi_unmap_buf_attrs(ctlr, tx_dev,
1276 &xfer->tx_sg, DMA_TO_DEVICE,
1277 attrs);
1278
1279 return ret;
1280 }
1281 } else {
1282 xfer->rx_sg.sgl = &dummy_sg;
1283 }
1284 }
1285 /* No transfer has been mapped, bail out with success */
1286 if (ret)
1287 return 0;
1288
1289 ctlr->cur_rx_dma_dev = rx_dev;
1290 ctlr->cur_tx_dma_dev = tx_dev;
1291 ctlr->cur_msg_mapped = true;
1292
1293 return 0;
1294 }
1295
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1296 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1297 {
1298 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1299 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1300 struct spi_transfer *xfer;
1301
1302 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1303 return 0;
1304
1305 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1306 /* The sync has already been done after each transfer. */
1307 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1308
1309 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1310 continue;
1311
1312 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1313 DMA_FROM_DEVICE, attrs);
1314 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1315 DMA_TO_DEVICE, attrs);
1316 }
1317
1318 ctlr->cur_msg_mapped = false;
1319
1320 return 0;
1321 }
1322
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1323 static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
1324 struct spi_transfer *xfer)
1325 {
1326 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1327 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1328
1329 if (!ctlr->cur_msg_mapped)
1330 return;
1331
1332 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1333 return;
1334
1335 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1336 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1337 }
1338
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1339 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
1340 struct spi_transfer *xfer)
1341 {
1342 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1343 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1344
1345 if (!ctlr->cur_msg_mapped)
1346 return;
1347
1348 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1349 return;
1350
1351 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1352 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1353 }
1354 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1355 static inline int __spi_map_msg(struct spi_controller *ctlr,
1356 struct spi_message *msg)
1357 {
1358 return 0;
1359 }
1360
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1361 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1362 struct spi_message *msg)
1363 {
1364 return 0;
1365 }
1366
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_message * msg,struct spi_transfer * xfer)1367 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1368 struct spi_message *msg,
1369 struct spi_transfer *xfer)
1370 {
1371 }
1372
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_message * msg,struct spi_transfer * xfer)1373 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1374 struct spi_message *msg,
1375 struct spi_transfer *xfer)
1376 {
1377 }
1378 #endif /* !CONFIG_HAS_DMA */
1379
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1380 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1381 struct spi_message *msg)
1382 {
1383 struct spi_transfer *xfer;
1384
1385 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1386 /*
1387 * Restore the original value of tx_buf or rx_buf if they are
1388 * NULL.
1389 */
1390 if (xfer->tx_buf == ctlr->dummy_tx)
1391 xfer->tx_buf = NULL;
1392 if (xfer->rx_buf == ctlr->dummy_rx)
1393 xfer->rx_buf = NULL;
1394 }
1395
1396 return __spi_unmap_msg(ctlr, msg);
1397 }
1398
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1399 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1400 {
1401 struct spi_transfer *xfer;
1402 void *tmp;
1403 unsigned int max_tx, max_rx;
1404
1405 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1406 && !(msg->spi->mode & SPI_3WIRE)) {
1407 max_tx = 0;
1408 max_rx = 0;
1409
1410 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1411 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1412 !xfer->tx_buf)
1413 max_tx = max(xfer->len, max_tx);
1414 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1415 !xfer->rx_buf)
1416 max_rx = max(xfer->len, max_rx);
1417 }
1418
1419 if (max_tx) {
1420 tmp = krealloc(ctlr->dummy_tx, max_tx,
1421 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1422 if (!tmp)
1423 return -ENOMEM;
1424 ctlr->dummy_tx = tmp;
1425 }
1426
1427 if (max_rx) {
1428 tmp = krealloc(ctlr->dummy_rx, max_rx,
1429 GFP_KERNEL | GFP_DMA);
1430 if (!tmp)
1431 return -ENOMEM;
1432 ctlr->dummy_rx = tmp;
1433 }
1434
1435 if (max_tx || max_rx) {
1436 list_for_each_entry(xfer, &msg->transfers,
1437 transfer_list) {
1438 if (!xfer->len)
1439 continue;
1440 if (!xfer->tx_buf)
1441 xfer->tx_buf = ctlr->dummy_tx;
1442 if (!xfer->rx_buf)
1443 xfer->rx_buf = ctlr->dummy_rx;
1444 }
1445 }
1446 }
1447
1448 return __spi_map_msg(ctlr, msg);
1449 }
1450
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1451 static int spi_transfer_wait(struct spi_controller *ctlr,
1452 struct spi_message *msg,
1453 struct spi_transfer *xfer)
1454 {
1455 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1456 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1457 u32 speed_hz = xfer->speed_hz;
1458 unsigned long long ms;
1459
1460 if (spi_controller_is_slave(ctlr)) {
1461 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1462 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1463 return -EINTR;
1464 }
1465 } else {
1466 if (!speed_hz)
1467 speed_hz = 100000;
1468
1469 /*
1470 * For each byte we wait for 8 cycles of the SPI clock.
1471 * Since speed is defined in Hz and we want milliseconds,
1472 * use respective multiplier, but before the division,
1473 * otherwise we may get 0 for short transfers.
1474 */
1475 ms = 8LL * MSEC_PER_SEC * xfer->len;
1476 do_div(ms, speed_hz);
1477
1478 /*
1479 * Increase it twice and add 200 ms tolerance, use
1480 * predefined maximum in case of overflow.
1481 */
1482 ms += ms + 200;
1483 if (ms > UINT_MAX)
1484 ms = UINT_MAX;
1485
1486 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1487 msecs_to_jiffies(ms));
1488
1489 if (ms == 0) {
1490 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1491 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1492 dev_err(&msg->spi->dev,
1493 "SPI transfer timed out\n");
1494 return -ETIMEDOUT;
1495 }
1496
1497 if (xfer->error & SPI_TRANS_FAIL_IO)
1498 return -EIO;
1499 }
1500
1501 return 0;
1502 }
1503
_spi_transfer_delay_ns(u32 ns)1504 static void _spi_transfer_delay_ns(u32 ns)
1505 {
1506 if (!ns)
1507 return;
1508 if (ns <= NSEC_PER_USEC) {
1509 ndelay(ns);
1510 } else {
1511 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1512
1513 if (us <= 10)
1514 udelay(us);
1515 else
1516 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1517 }
1518 }
1519
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1520 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1521 {
1522 u32 delay = _delay->value;
1523 u32 unit = _delay->unit;
1524 u32 hz;
1525
1526 if (!delay)
1527 return 0;
1528
1529 switch (unit) {
1530 case SPI_DELAY_UNIT_USECS:
1531 delay *= NSEC_PER_USEC;
1532 break;
1533 case SPI_DELAY_UNIT_NSECS:
1534 /* Nothing to do here */
1535 break;
1536 case SPI_DELAY_UNIT_SCK:
1537 /* Clock cycles need to be obtained from spi_transfer */
1538 if (!xfer)
1539 return -EINVAL;
1540 /*
1541 * If there is unknown effective speed, approximate it
1542 * by underestimating with half of the requested Hz.
1543 */
1544 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1545 if (!hz)
1546 return -EINVAL;
1547
1548 /* Convert delay to nanoseconds */
1549 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1550 break;
1551 default:
1552 return -EINVAL;
1553 }
1554
1555 return delay;
1556 }
1557 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1558
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1559 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1560 {
1561 int delay;
1562
1563 might_sleep();
1564
1565 if (!_delay)
1566 return -EINVAL;
1567
1568 delay = spi_delay_to_ns(_delay, xfer);
1569 if (delay < 0)
1570 return delay;
1571
1572 _spi_transfer_delay_ns(delay);
1573
1574 return 0;
1575 }
1576 EXPORT_SYMBOL_GPL(spi_delay_exec);
1577
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1578 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1579 struct spi_transfer *xfer)
1580 {
1581 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1582 u32 delay = xfer->cs_change_delay.value;
1583 u32 unit = xfer->cs_change_delay.unit;
1584 int ret;
1585
1586 /* Return early on "fast" mode - for everything but USECS */
1587 if (!delay) {
1588 if (unit == SPI_DELAY_UNIT_USECS)
1589 _spi_transfer_delay_ns(default_delay_ns);
1590 return;
1591 }
1592
1593 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1594 if (ret) {
1595 dev_err_once(&msg->spi->dev,
1596 "Use of unsupported delay unit %i, using default of %luus\n",
1597 unit, default_delay_ns / NSEC_PER_USEC);
1598 _spi_transfer_delay_ns(default_delay_ns);
1599 }
1600 }
1601
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1602 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1603 struct spi_transfer *xfer)
1604 {
1605 _spi_transfer_cs_change_delay(msg, xfer);
1606 }
1607 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1608
1609 /*
1610 * spi_transfer_one_message - Default implementation of transfer_one_message()
1611 *
1612 * This is a standard implementation of transfer_one_message() for
1613 * drivers which implement a transfer_one() operation. It provides
1614 * standard handling of delays and chip select management.
1615 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1616 static int spi_transfer_one_message(struct spi_controller *ctlr,
1617 struct spi_message *msg)
1618 {
1619 struct spi_transfer *xfer;
1620 bool keep_cs = false;
1621 int ret = 0;
1622 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1623 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1624
1625 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1626 spi_set_cs(msg->spi, !xfer->cs_off, false);
1627
1628 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1629 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1630
1631 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1632 trace_spi_transfer_start(msg, xfer);
1633
1634 spi_statistics_add_transfer_stats(statm, xfer, msg);
1635 spi_statistics_add_transfer_stats(stats, xfer, msg);
1636
1637 if (!ctlr->ptp_sts_supported) {
1638 xfer->ptp_sts_word_pre = 0;
1639 ptp_read_system_prets(xfer->ptp_sts);
1640 }
1641
1642 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1643 reinit_completion(&ctlr->xfer_completion);
1644
1645 fallback_pio:
1646 spi_dma_sync_for_device(ctlr, msg, xfer);
1647 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1648 if (ret < 0) {
1649 spi_dma_sync_for_cpu(ctlr, msg, xfer);
1650
1651 if (ctlr->cur_msg_mapped &&
1652 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1653 __spi_unmap_msg(ctlr, msg);
1654 ctlr->fallback = true;
1655 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1656 goto fallback_pio;
1657 }
1658
1659 SPI_STATISTICS_INCREMENT_FIELD(statm,
1660 errors);
1661 SPI_STATISTICS_INCREMENT_FIELD(stats,
1662 errors);
1663 dev_err(&msg->spi->dev,
1664 "SPI transfer failed: %d\n", ret);
1665 goto out;
1666 }
1667
1668 if (ret > 0) {
1669 ret = spi_transfer_wait(ctlr, msg, xfer);
1670 if (ret < 0)
1671 msg->status = ret;
1672 }
1673
1674 spi_dma_sync_for_cpu(ctlr, msg, xfer);
1675 } else {
1676 if (xfer->len)
1677 dev_err(&msg->spi->dev,
1678 "Bufferless transfer has length %u\n",
1679 xfer->len);
1680 }
1681
1682 if (!ctlr->ptp_sts_supported) {
1683 ptp_read_system_postts(xfer->ptp_sts);
1684 xfer->ptp_sts_word_post = xfer->len;
1685 }
1686
1687 trace_spi_transfer_stop(msg, xfer);
1688
1689 if (msg->status != -EINPROGRESS)
1690 goto out;
1691
1692 spi_transfer_delay_exec(xfer);
1693
1694 if (xfer->cs_change) {
1695 if (list_is_last(&xfer->transfer_list,
1696 &msg->transfers)) {
1697 keep_cs = true;
1698 } else {
1699 if (!xfer->cs_off)
1700 spi_set_cs(msg->spi, false, false);
1701 _spi_transfer_cs_change_delay(msg, xfer);
1702 if (!list_next_entry(xfer, transfer_list)->cs_off)
1703 spi_set_cs(msg->spi, true, false);
1704 }
1705 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1706 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1707 spi_set_cs(msg->spi, xfer->cs_off, false);
1708 }
1709
1710 msg->actual_length += xfer->len;
1711 }
1712
1713 out:
1714 if (ret != 0 || !keep_cs)
1715 spi_set_cs(msg->spi, false, false);
1716
1717 if (msg->status == -EINPROGRESS)
1718 msg->status = ret;
1719
1720 if (msg->status && ctlr->handle_err)
1721 ctlr->handle_err(ctlr, msg);
1722
1723 spi_finalize_current_message(ctlr);
1724
1725 return ret;
1726 }
1727
1728 /**
1729 * spi_finalize_current_transfer - report completion of a transfer
1730 * @ctlr: the controller reporting completion
1731 *
1732 * Called by SPI drivers using the core transfer_one_message()
1733 * implementation to notify it that the current interrupt driven
1734 * transfer has finished and the next one may be scheduled.
1735 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1736 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1737 {
1738 complete(&ctlr->xfer_completion);
1739 }
1740 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1741
spi_idle_runtime_pm(struct spi_controller * ctlr)1742 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1743 {
1744 if (ctlr->auto_runtime_pm) {
1745 pm_runtime_mark_last_busy(ctlr->dev.parent);
1746 pm_runtime_put_autosuspend(ctlr->dev.parent);
1747 }
1748 }
1749
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1750 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1751 struct spi_message *msg, bool was_busy)
1752 {
1753 struct spi_transfer *xfer;
1754 int ret;
1755
1756 if (!was_busy && ctlr->auto_runtime_pm) {
1757 ret = pm_runtime_get_sync(ctlr->dev.parent);
1758 if (ret < 0) {
1759 pm_runtime_put_noidle(ctlr->dev.parent);
1760 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1761 ret);
1762
1763 msg->status = ret;
1764 spi_finalize_current_message(ctlr);
1765
1766 return ret;
1767 }
1768 }
1769
1770 if (!was_busy)
1771 trace_spi_controller_busy(ctlr);
1772
1773 if (!was_busy && ctlr->prepare_transfer_hardware) {
1774 ret = ctlr->prepare_transfer_hardware(ctlr);
1775 if (ret) {
1776 dev_err(&ctlr->dev,
1777 "failed to prepare transfer hardware: %d\n",
1778 ret);
1779
1780 if (ctlr->auto_runtime_pm)
1781 pm_runtime_put(ctlr->dev.parent);
1782
1783 msg->status = ret;
1784 spi_finalize_current_message(ctlr);
1785
1786 return ret;
1787 }
1788 }
1789
1790 trace_spi_message_start(msg);
1791
1792 if (ctlr->prepare_message) {
1793 ret = ctlr->prepare_message(ctlr, msg);
1794 if (ret) {
1795 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1796 ret);
1797 msg->status = ret;
1798 spi_finalize_current_message(ctlr);
1799 return ret;
1800 }
1801 msg->prepared = true;
1802 }
1803
1804 ret = spi_map_msg(ctlr, msg);
1805 if (ret) {
1806 msg->status = ret;
1807 spi_finalize_current_message(ctlr);
1808 return ret;
1809 }
1810
1811 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1812 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1813 xfer->ptp_sts_word_pre = 0;
1814 ptp_read_system_prets(xfer->ptp_sts);
1815 }
1816 }
1817
1818 /*
1819 * Drivers implementation of transfer_one_message() must arrange for
1820 * spi_finalize_current_message() to get called. Most drivers will do
1821 * this in the calling context, but some don't. For those cases, a
1822 * completion is used to guarantee that this function does not return
1823 * until spi_finalize_current_message() is done accessing
1824 * ctlr->cur_msg.
1825 * Use of the following two flags enable to opportunistically skip the
1826 * use of the completion since its use involves expensive spin locks.
1827 * In case of a race with the context that calls
1828 * spi_finalize_current_message() the completion will always be used,
1829 * due to strict ordering of these flags using barriers.
1830 */
1831 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1832 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1833 reinit_completion(&ctlr->cur_msg_completion);
1834 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1835
1836 ret = ctlr->transfer_one_message(ctlr, msg);
1837 if (ret) {
1838 dev_err(&ctlr->dev,
1839 "failed to transfer one message from queue\n");
1840 return ret;
1841 }
1842
1843 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1844 smp_mb(); /* See spi_finalize_current_message()... */
1845 if (READ_ONCE(ctlr->cur_msg_incomplete))
1846 wait_for_completion(&ctlr->cur_msg_completion);
1847
1848 return 0;
1849 }
1850
1851 /**
1852 * __spi_pump_messages - function which processes SPI message queue
1853 * @ctlr: controller to process queue for
1854 * @in_kthread: true if we are in the context of the message pump thread
1855 *
1856 * This function checks if there is any SPI message in the queue that
1857 * needs processing and if so call out to the driver to initialize hardware
1858 * and transfer each message.
1859 *
1860 * Note that it is called both from the kthread itself and also from
1861 * inside spi_sync(); the queue extraction handling at the top of the
1862 * function should deal with this safely.
1863 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1864 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1865 {
1866 struct spi_message *msg;
1867 bool was_busy = false;
1868 unsigned long flags;
1869 int ret;
1870
1871 /* Take the I/O mutex */
1872 mutex_lock(&ctlr->io_mutex);
1873
1874 /* Lock queue */
1875 spin_lock_irqsave(&ctlr->queue_lock, flags);
1876
1877 /* Make sure we are not already running a message */
1878 if (ctlr->cur_msg)
1879 goto out_unlock;
1880
1881 /* Check if the queue is idle */
1882 if (list_empty(&ctlr->queue) || !ctlr->running) {
1883 if (!ctlr->busy)
1884 goto out_unlock;
1885
1886 /* Defer any non-atomic teardown to the thread */
1887 if (!in_kthread) {
1888 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1889 !ctlr->unprepare_transfer_hardware) {
1890 spi_idle_runtime_pm(ctlr);
1891 ctlr->busy = false;
1892 ctlr->queue_empty = true;
1893 trace_spi_controller_idle(ctlr);
1894 } else {
1895 kthread_queue_work(ctlr->kworker,
1896 &ctlr->pump_messages);
1897 }
1898 goto out_unlock;
1899 }
1900
1901 ctlr->busy = false;
1902 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1903
1904 kfree(ctlr->dummy_rx);
1905 ctlr->dummy_rx = NULL;
1906 kfree(ctlr->dummy_tx);
1907 ctlr->dummy_tx = NULL;
1908 if (ctlr->unprepare_transfer_hardware &&
1909 ctlr->unprepare_transfer_hardware(ctlr))
1910 dev_err(&ctlr->dev,
1911 "failed to unprepare transfer hardware\n");
1912 spi_idle_runtime_pm(ctlr);
1913 trace_spi_controller_idle(ctlr);
1914
1915 spin_lock_irqsave(&ctlr->queue_lock, flags);
1916 ctlr->queue_empty = true;
1917 goto out_unlock;
1918 }
1919
1920 /* Extract head of queue */
1921 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1922 ctlr->cur_msg = msg;
1923
1924 list_del_init(&msg->queue);
1925 if (ctlr->busy)
1926 was_busy = true;
1927 else
1928 ctlr->busy = true;
1929 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1930
1931 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1932 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1933
1934 ctlr->cur_msg = NULL;
1935 ctlr->fallback = false;
1936
1937 mutex_unlock(&ctlr->io_mutex);
1938
1939 /* Prod the scheduler in case transfer_one() was busy waiting */
1940 if (!ret)
1941 cond_resched();
1942 return;
1943
1944 out_unlock:
1945 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1946 mutex_unlock(&ctlr->io_mutex);
1947 }
1948
1949 /**
1950 * spi_pump_messages - kthread work function which processes spi message queue
1951 * @work: pointer to kthread work struct contained in the controller struct
1952 */
spi_pump_messages(struct kthread_work * work)1953 static void spi_pump_messages(struct kthread_work *work)
1954 {
1955 struct spi_controller *ctlr =
1956 container_of(work, struct spi_controller, pump_messages);
1957
1958 __spi_pump_messages(ctlr, true);
1959 }
1960
1961 /**
1962 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1963 * @ctlr: Pointer to the spi_controller structure of the driver
1964 * @xfer: Pointer to the transfer being timestamped
1965 * @progress: How many words (not bytes) have been transferred so far
1966 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1967 * transfer, for less jitter in time measurement. Only compatible
1968 * with PIO drivers. If true, must follow up with
1969 * spi_take_timestamp_post or otherwise system will crash.
1970 * WARNING: for fully predictable results, the CPU frequency must
1971 * also be under control (governor).
1972 *
1973 * This is a helper for drivers to collect the beginning of the TX timestamp
1974 * for the requested byte from the SPI transfer. The frequency with which this
1975 * function must be called (once per word, once for the whole transfer, once
1976 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1977 * greater than or equal to the requested byte at the time of the call. The
1978 * timestamp is only taken once, at the first such call. It is assumed that
1979 * the driver advances its @tx buffer pointer monotonically.
1980 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1981 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1982 struct spi_transfer *xfer,
1983 size_t progress, bool irqs_off)
1984 {
1985 if (!xfer->ptp_sts)
1986 return;
1987
1988 if (xfer->timestamped)
1989 return;
1990
1991 if (progress > xfer->ptp_sts_word_pre)
1992 return;
1993
1994 /* Capture the resolution of the timestamp */
1995 xfer->ptp_sts_word_pre = progress;
1996
1997 if (irqs_off) {
1998 local_irq_save(ctlr->irq_flags);
1999 preempt_disable();
2000 }
2001
2002 ptp_read_system_prets(xfer->ptp_sts);
2003 }
2004 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
2005
2006 /**
2007 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
2008 * @ctlr: Pointer to the spi_controller structure of the driver
2009 * @xfer: Pointer to the transfer being timestamped
2010 * @progress: How many words (not bytes) have been transferred so far
2011 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
2012 *
2013 * This is a helper for drivers to collect the end of the TX timestamp for
2014 * the requested byte from the SPI transfer. Can be called with an arbitrary
2015 * frequency: only the first call where @tx exceeds or is equal to the
2016 * requested word will be timestamped.
2017 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)2018 void spi_take_timestamp_post(struct spi_controller *ctlr,
2019 struct spi_transfer *xfer,
2020 size_t progress, bool irqs_off)
2021 {
2022 if (!xfer->ptp_sts)
2023 return;
2024
2025 if (xfer->timestamped)
2026 return;
2027
2028 if (progress < xfer->ptp_sts_word_post)
2029 return;
2030
2031 ptp_read_system_postts(xfer->ptp_sts);
2032
2033 if (irqs_off) {
2034 local_irq_restore(ctlr->irq_flags);
2035 preempt_enable();
2036 }
2037
2038 /* Capture the resolution of the timestamp */
2039 xfer->ptp_sts_word_post = progress;
2040
2041 xfer->timestamped = 1;
2042 }
2043 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2044
2045 /**
2046 * spi_set_thread_rt - set the controller to pump at realtime priority
2047 * @ctlr: controller to boost priority of
2048 *
2049 * This can be called because the controller requested realtime priority
2050 * (by setting the ->rt value before calling spi_register_controller()) or
2051 * because a device on the bus said that its transfers needed realtime
2052 * priority.
2053 *
2054 * NOTE: at the moment if any device on a bus says it needs realtime then
2055 * the thread will be at realtime priority for all transfers on that
2056 * controller. If this eventually becomes a problem we may see if we can
2057 * find a way to boost the priority only temporarily during relevant
2058 * transfers.
2059 */
spi_set_thread_rt(struct spi_controller * ctlr)2060 static void spi_set_thread_rt(struct spi_controller *ctlr)
2061 {
2062 dev_info(&ctlr->dev,
2063 "will run message pump with realtime priority\n");
2064 sched_set_fifo(ctlr->kworker->task);
2065 }
2066
spi_init_queue(struct spi_controller * ctlr)2067 static int spi_init_queue(struct spi_controller *ctlr)
2068 {
2069 ctlr->running = false;
2070 ctlr->busy = false;
2071 ctlr->queue_empty = true;
2072
2073 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2074 if (IS_ERR(ctlr->kworker)) {
2075 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2076 return PTR_ERR(ctlr->kworker);
2077 }
2078
2079 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2080
2081 /*
2082 * Controller config will indicate if this controller should run the
2083 * message pump with high (realtime) priority to reduce the transfer
2084 * latency on the bus by minimising the delay between a transfer
2085 * request and the scheduling of the message pump thread. Without this
2086 * setting the message pump thread will remain at default priority.
2087 */
2088 if (ctlr->rt)
2089 spi_set_thread_rt(ctlr);
2090
2091 return 0;
2092 }
2093
2094 /**
2095 * spi_get_next_queued_message() - called by driver to check for queued
2096 * messages
2097 * @ctlr: the controller to check for queued messages
2098 *
2099 * If there are more messages in the queue, the next message is returned from
2100 * this call.
2101 *
2102 * Return: the next message in the queue, else NULL if the queue is empty.
2103 */
spi_get_next_queued_message(struct spi_controller * ctlr)2104 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2105 {
2106 struct spi_message *next;
2107 unsigned long flags;
2108
2109 /* Get a pointer to the next message, if any */
2110 spin_lock_irqsave(&ctlr->queue_lock, flags);
2111 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2112 queue);
2113 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2114
2115 return next;
2116 }
2117 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2118
2119 /*
2120 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2121 * and spi_maybe_unoptimize_message()
2122 * @msg: the message to unoptimize
2123 *
2124 * Peripheral drivers should use spi_unoptimize_message() and callers inside
2125 * core should use spi_maybe_unoptimize_message() rather than calling this
2126 * function directly.
2127 *
2128 * It is not valid to call this on a message that is not currently optimized.
2129 */
__spi_unoptimize_message(struct spi_message * msg)2130 static void __spi_unoptimize_message(struct spi_message *msg)
2131 {
2132 struct spi_controller *ctlr = msg->spi->controller;
2133
2134 if (ctlr->unoptimize_message)
2135 ctlr->unoptimize_message(msg);
2136
2137 spi_res_release(ctlr, msg);
2138
2139 msg->optimized = false;
2140 msg->opt_state = NULL;
2141 }
2142
2143 /*
2144 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2145 * @msg: the message to unoptimize
2146 *
2147 * This function is used to unoptimize a message if and only if it was
2148 * optimized by the core (via spi_maybe_optimize_message()).
2149 */
spi_maybe_unoptimize_message(struct spi_message * msg)2150 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2151 {
2152 if (!msg->pre_optimized && msg->optimized)
2153 __spi_unoptimize_message(msg);
2154 }
2155
2156 /**
2157 * spi_finalize_current_message() - the current message is complete
2158 * @ctlr: the controller to return the message to
2159 *
2160 * Called by the driver to notify the core that the message in the front of the
2161 * queue is complete and can be removed from the queue.
2162 */
spi_finalize_current_message(struct spi_controller * ctlr)2163 void spi_finalize_current_message(struct spi_controller *ctlr)
2164 {
2165 struct spi_transfer *xfer;
2166 struct spi_message *mesg;
2167 int ret;
2168
2169 mesg = ctlr->cur_msg;
2170
2171 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2172 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2173 ptp_read_system_postts(xfer->ptp_sts);
2174 xfer->ptp_sts_word_post = xfer->len;
2175 }
2176 }
2177
2178 if (unlikely(ctlr->ptp_sts_supported))
2179 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2180 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2181
2182 spi_unmap_msg(ctlr, mesg);
2183
2184 if (mesg->prepared && ctlr->unprepare_message) {
2185 ret = ctlr->unprepare_message(ctlr, mesg);
2186 if (ret) {
2187 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2188 ret);
2189 }
2190 }
2191
2192 mesg->prepared = false;
2193
2194 spi_maybe_unoptimize_message(mesg);
2195
2196 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2197 smp_mb(); /* See __spi_pump_transfer_message()... */
2198 if (READ_ONCE(ctlr->cur_msg_need_completion))
2199 complete(&ctlr->cur_msg_completion);
2200
2201 trace_spi_message_done(mesg);
2202
2203 mesg->state = NULL;
2204 if (mesg->complete)
2205 mesg->complete(mesg->context);
2206 }
2207 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2208
spi_start_queue(struct spi_controller * ctlr)2209 static int spi_start_queue(struct spi_controller *ctlr)
2210 {
2211 unsigned long flags;
2212
2213 spin_lock_irqsave(&ctlr->queue_lock, flags);
2214
2215 if (ctlr->running || ctlr->busy) {
2216 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2217 return -EBUSY;
2218 }
2219
2220 ctlr->running = true;
2221 ctlr->cur_msg = NULL;
2222 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2223
2224 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2225
2226 return 0;
2227 }
2228
spi_stop_queue(struct spi_controller * ctlr)2229 static int spi_stop_queue(struct spi_controller *ctlr)
2230 {
2231 unsigned long flags;
2232 unsigned limit = 500;
2233 int ret = 0;
2234
2235 spin_lock_irqsave(&ctlr->queue_lock, flags);
2236
2237 /*
2238 * This is a bit lame, but is optimized for the common execution path.
2239 * A wait_queue on the ctlr->busy could be used, but then the common
2240 * execution path (pump_messages) would be required to call wake_up or
2241 * friends on every SPI message. Do this instead.
2242 */
2243 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2244 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2245 usleep_range(10000, 11000);
2246 spin_lock_irqsave(&ctlr->queue_lock, flags);
2247 }
2248
2249 if (!list_empty(&ctlr->queue) || ctlr->busy)
2250 ret = -EBUSY;
2251 else
2252 ctlr->running = false;
2253
2254 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2255
2256 return ret;
2257 }
2258
spi_destroy_queue(struct spi_controller * ctlr)2259 static int spi_destroy_queue(struct spi_controller *ctlr)
2260 {
2261 int ret;
2262
2263 ret = spi_stop_queue(ctlr);
2264
2265 /*
2266 * kthread_flush_worker will block until all work is done.
2267 * If the reason that stop_queue timed out is that the work will never
2268 * finish, then it does no good to call flush/stop thread, so
2269 * return anyway.
2270 */
2271 if (ret) {
2272 dev_err(&ctlr->dev, "problem destroying queue\n");
2273 return ret;
2274 }
2275
2276 kthread_destroy_worker(ctlr->kworker);
2277
2278 return 0;
2279 }
2280
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2281 static int __spi_queued_transfer(struct spi_device *spi,
2282 struct spi_message *msg,
2283 bool need_pump)
2284 {
2285 struct spi_controller *ctlr = spi->controller;
2286 unsigned long flags;
2287
2288 spin_lock_irqsave(&ctlr->queue_lock, flags);
2289
2290 if (!ctlr->running) {
2291 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2292 return -ESHUTDOWN;
2293 }
2294 msg->actual_length = 0;
2295 msg->status = -EINPROGRESS;
2296
2297 list_add_tail(&msg->queue, &ctlr->queue);
2298 ctlr->queue_empty = false;
2299 if (!ctlr->busy && need_pump)
2300 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2301
2302 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2303 return 0;
2304 }
2305
2306 /**
2307 * spi_queued_transfer - transfer function for queued transfers
2308 * @spi: SPI device which is requesting transfer
2309 * @msg: SPI message which is to handled is queued to driver queue
2310 *
2311 * Return: zero on success, else a negative error code.
2312 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2313 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2314 {
2315 return __spi_queued_transfer(spi, msg, true);
2316 }
2317
spi_controller_initialize_queue(struct spi_controller * ctlr)2318 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2319 {
2320 int ret;
2321
2322 ctlr->transfer = spi_queued_transfer;
2323 if (!ctlr->transfer_one_message)
2324 ctlr->transfer_one_message = spi_transfer_one_message;
2325
2326 /* Initialize and start queue */
2327 ret = spi_init_queue(ctlr);
2328 if (ret) {
2329 dev_err(&ctlr->dev, "problem initializing queue\n");
2330 goto err_init_queue;
2331 }
2332 ctlr->queued = true;
2333 ret = spi_start_queue(ctlr);
2334 if (ret) {
2335 dev_err(&ctlr->dev, "problem starting queue\n");
2336 goto err_start_queue;
2337 }
2338
2339 return 0;
2340
2341 err_start_queue:
2342 spi_destroy_queue(ctlr);
2343 err_init_queue:
2344 return ret;
2345 }
2346
2347 /**
2348 * spi_flush_queue - Send all pending messages in the queue from the callers'
2349 * context
2350 * @ctlr: controller to process queue for
2351 *
2352 * This should be used when one wants to ensure all pending messages have been
2353 * sent before doing something. Is used by the spi-mem code to make sure SPI
2354 * memory operations do not preempt regular SPI transfers that have been queued
2355 * before the spi-mem operation.
2356 */
spi_flush_queue(struct spi_controller * ctlr)2357 void spi_flush_queue(struct spi_controller *ctlr)
2358 {
2359 if (ctlr->transfer == spi_queued_transfer)
2360 __spi_pump_messages(ctlr, false);
2361 }
2362
2363 /*-------------------------------------------------------------------------*/
2364
2365 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2366 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2367 struct spi_delay *delay, const char *prop)
2368 {
2369 u32 value;
2370
2371 if (!of_property_read_u32(nc, prop, &value)) {
2372 if (value > U16_MAX) {
2373 delay->value = DIV_ROUND_UP(value, 1000);
2374 delay->unit = SPI_DELAY_UNIT_USECS;
2375 } else {
2376 delay->value = value;
2377 delay->unit = SPI_DELAY_UNIT_NSECS;
2378 }
2379 }
2380 }
2381
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2382 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2383 struct device_node *nc)
2384 {
2385 u32 value, cs[SPI_CS_CNT_MAX];
2386 int rc, idx;
2387
2388 /* Mode (clock phase/polarity/etc.) */
2389 if (of_property_read_bool(nc, "spi-cpha"))
2390 spi->mode |= SPI_CPHA;
2391 if (of_property_read_bool(nc, "spi-cpol"))
2392 spi->mode |= SPI_CPOL;
2393 if (of_property_read_bool(nc, "spi-3wire"))
2394 spi->mode |= SPI_3WIRE;
2395 if (of_property_read_bool(nc, "spi-lsb-first"))
2396 spi->mode |= SPI_LSB_FIRST;
2397 if (of_property_read_bool(nc, "spi-cs-high"))
2398 spi->mode |= SPI_CS_HIGH;
2399
2400 /* Device DUAL/QUAD mode */
2401 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2402 switch (value) {
2403 case 0:
2404 spi->mode |= SPI_NO_TX;
2405 break;
2406 case 1:
2407 break;
2408 case 2:
2409 spi->mode |= SPI_TX_DUAL;
2410 break;
2411 case 4:
2412 spi->mode |= SPI_TX_QUAD;
2413 break;
2414 case 8:
2415 spi->mode |= SPI_TX_OCTAL;
2416 break;
2417 default:
2418 dev_warn(&ctlr->dev,
2419 "spi-tx-bus-width %d not supported\n",
2420 value);
2421 break;
2422 }
2423 }
2424
2425 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2426 switch (value) {
2427 case 0:
2428 spi->mode |= SPI_NO_RX;
2429 break;
2430 case 1:
2431 break;
2432 case 2:
2433 spi->mode |= SPI_RX_DUAL;
2434 break;
2435 case 4:
2436 spi->mode |= SPI_RX_QUAD;
2437 break;
2438 case 8:
2439 spi->mode |= SPI_RX_OCTAL;
2440 break;
2441 default:
2442 dev_warn(&ctlr->dev,
2443 "spi-rx-bus-width %d not supported\n",
2444 value);
2445 break;
2446 }
2447 }
2448
2449 if (spi_controller_is_slave(ctlr)) {
2450 if (!of_node_name_eq(nc, "slave")) {
2451 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2452 nc);
2453 return -EINVAL;
2454 }
2455 return 0;
2456 }
2457
2458 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2459 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2460 return -EINVAL;
2461 }
2462
2463 spi_set_all_cs_unused(spi);
2464
2465 /* Device address */
2466 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2467 SPI_CS_CNT_MAX);
2468 if (rc < 0) {
2469 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2470 nc, rc);
2471 return rc;
2472 }
2473 if (rc > ctlr->num_chipselect) {
2474 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2475 nc, rc);
2476 return rc;
2477 }
2478 if ((of_property_read_bool(nc, "parallel-memories")) &&
2479 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2480 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2481 return -EINVAL;
2482 }
2483 for (idx = 0; idx < rc; idx++)
2484 spi_set_chipselect(spi, idx, cs[idx]);
2485
2486 /*
2487 * By default spi->chip_select[0] will hold the physical CS number,
2488 * so set bit 0 in spi->cs_index_mask.
2489 */
2490 spi->cs_index_mask = BIT(0);
2491
2492 /* Device speed */
2493 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2494 spi->max_speed_hz = value;
2495
2496 /* Device CS delays */
2497 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2498 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2499 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2500
2501 return 0;
2502 }
2503
2504 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2505 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2506 {
2507 struct spi_device *spi;
2508 int rc;
2509
2510 /* Alloc an spi_device */
2511 spi = spi_alloc_device(ctlr);
2512 if (!spi) {
2513 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2514 rc = -ENOMEM;
2515 goto err_out;
2516 }
2517
2518 /* Select device driver */
2519 rc = of_alias_from_compatible(nc, spi->modalias,
2520 sizeof(spi->modalias));
2521 if (rc < 0) {
2522 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2523 goto err_out;
2524 }
2525
2526 rc = of_spi_parse_dt(ctlr, spi, nc);
2527 if (rc)
2528 goto err_out;
2529
2530 /* Store a pointer to the node in the device structure */
2531 of_node_get(nc);
2532
2533 device_set_node(&spi->dev, of_fwnode_handle(nc));
2534
2535 /* Register the new device */
2536 rc = spi_add_device(spi);
2537 if (rc) {
2538 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2539 goto err_of_node_put;
2540 }
2541
2542 return spi;
2543
2544 err_of_node_put:
2545 of_node_put(nc);
2546 err_out:
2547 spi_dev_put(spi);
2548 return ERR_PTR(rc);
2549 }
2550
2551 /**
2552 * of_register_spi_devices() - Register child devices onto the SPI bus
2553 * @ctlr: Pointer to spi_controller device
2554 *
2555 * Registers an spi_device for each child node of controller node which
2556 * represents a valid SPI slave.
2557 */
of_register_spi_devices(struct spi_controller * ctlr)2558 static void of_register_spi_devices(struct spi_controller *ctlr)
2559 {
2560 struct spi_device *spi;
2561 struct device_node *nc;
2562
2563 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2564 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2565 continue;
2566 spi = of_register_spi_device(ctlr, nc);
2567 if (IS_ERR(spi)) {
2568 dev_warn(&ctlr->dev,
2569 "Failed to create SPI device for %pOF\n", nc);
2570 of_node_clear_flag(nc, OF_POPULATED);
2571 }
2572 }
2573 }
2574 #else
of_register_spi_devices(struct spi_controller * ctlr)2575 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2576 #endif
2577
2578 /**
2579 * spi_new_ancillary_device() - Register ancillary SPI device
2580 * @spi: Pointer to the main SPI device registering the ancillary device
2581 * @chip_select: Chip Select of the ancillary device
2582 *
2583 * Register an ancillary SPI device; for example some chips have a chip-select
2584 * for normal device usage and another one for setup/firmware upload.
2585 *
2586 * This may only be called from main SPI device's probe routine.
2587 *
2588 * Return: 0 on success; negative errno on failure
2589 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2590 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2591 u8 chip_select)
2592 {
2593 struct spi_controller *ctlr = spi->controller;
2594 struct spi_device *ancillary;
2595 int rc = 0;
2596
2597 /* Alloc an spi_device */
2598 ancillary = spi_alloc_device(ctlr);
2599 if (!ancillary) {
2600 rc = -ENOMEM;
2601 goto err_out;
2602 }
2603
2604 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2605
2606 /* Use provided chip-select for ancillary device */
2607 spi_set_all_cs_unused(ancillary);
2608 spi_set_chipselect(ancillary, 0, chip_select);
2609
2610 /* Take over SPI mode/speed from SPI main device */
2611 ancillary->max_speed_hz = spi->max_speed_hz;
2612 ancillary->mode = spi->mode;
2613 /*
2614 * By default spi->chip_select[0] will hold the physical CS number,
2615 * so set bit 0 in spi->cs_index_mask.
2616 */
2617 ancillary->cs_index_mask = BIT(0);
2618
2619 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2620
2621 /* Register the new device */
2622 rc = __spi_add_device(ancillary);
2623 if (rc) {
2624 dev_err(&spi->dev, "failed to register ancillary device\n");
2625 goto err_out;
2626 }
2627
2628 return ancillary;
2629
2630 err_out:
2631 spi_dev_put(ancillary);
2632 return ERR_PTR(rc);
2633 }
2634 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2635
2636 #ifdef CONFIG_ACPI
2637 struct acpi_spi_lookup {
2638 struct spi_controller *ctlr;
2639 u32 max_speed_hz;
2640 u32 mode;
2641 int irq;
2642 u8 bits_per_word;
2643 u8 chip_select;
2644 int n;
2645 int index;
2646 };
2647
acpi_spi_count(struct acpi_resource * ares,void * data)2648 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2649 {
2650 struct acpi_resource_spi_serialbus *sb;
2651 int *count = data;
2652
2653 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2654 return 1;
2655
2656 sb = &ares->data.spi_serial_bus;
2657 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2658 return 1;
2659
2660 *count = *count + 1;
2661
2662 return 1;
2663 }
2664
2665 /**
2666 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2667 * @adev: ACPI device
2668 *
2669 * Return: the number of SpiSerialBus resources in the ACPI-device's
2670 * resource-list; or a negative error code.
2671 */
acpi_spi_count_resources(struct acpi_device * adev)2672 int acpi_spi_count_resources(struct acpi_device *adev)
2673 {
2674 LIST_HEAD(r);
2675 int count = 0;
2676 int ret;
2677
2678 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2679 if (ret < 0)
2680 return ret;
2681
2682 acpi_dev_free_resource_list(&r);
2683
2684 return count;
2685 }
2686 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2687
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2688 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2689 struct acpi_spi_lookup *lookup)
2690 {
2691 const union acpi_object *obj;
2692
2693 if (!x86_apple_machine)
2694 return;
2695
2696 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2697 && obj->buffer.length >= 4)
2698 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2699
2700 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2701 && obj->buffer.length == 8)
2702 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2703
2704 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2705 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2706 lookup->mode |= SPI_LSB_FIRST;
2707
2708 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2709 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2710 lookup->mode |= SPI_CPOL;
2711
2712 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2713 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2714 lookup->mode |= SPI_CPHA;
2715 }
2716
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2717 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2718 {
2719 struct acpi_spi_lookup *lookup = data;
2720 struct spi_controller *ctlr = lookup->ctlr;
2721
2722 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2723 struct acpi_resource_spi_serialbus *sb;
2724 acpi_handle parent_handle;
2725 acpi_status status;
2726
2727 sb = &ares->data.spi_serial_bus;
2728 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2729
2730 if (lookup->index != -1 && lookup->n++ != lookup->index)
2731 return 1;
2732
2733 status = acpi_get_handle(NULL,
2734 sb->resource_source.string_ptr,
2735 &parent_handle);
2736
2737 if (ACPI_FAILURE(status))
2738 return -ENODEV;
2739
2740 if (ctlr) {
2741 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2742 return -ENODEV;
2743 } else {
2744 struct acpi_device *adev;
2745
2746 adev = acpi_fetch_acpi_dev(parent_handle);
2747 if (!adev)
2748 return -ENODEV;
2749
2750 ctlr = acpi_spi_find_controller_by_adev(adev);
2751 if (!ctlr)
2752 return -EPROBE_DEFER;
2753
2754 lookup->ctlr = ctlr;
2755 }
2756
2757 /*
2758 * ACPI DeviceSelection numbering is handled by the
2759 * host controller driver in Windows and can vary
2760 * from driver to driver. In Linux we always expect
2761 * 0 .. max - 1 so we need to ask the driver to
2762 * translate between the two schemes.
2763 */
2764 if (ctlr->fw_translate_cs) {
2765 int cs = ctlr->fw_translate_cs(ctlr,
2766 sb->device_selection);
2767 if (cs < 0)
2768 return cs;
2769 lookup->chip_select = cs;
2770 } else {
2771 lookup->chip_select = sb->device_selection;
2772 }
2773
2774 lookup->max_speed_hz = sb->connection_speed;
2775 lookup->bits_per_word = sb->data_bit_length;
2776
2777 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2778 lookup->mode |= SPI_CPHA;
2779 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2780 lookup->mode |= SPI_CPOL;
2781 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2782 lookup->mode |= SPI_CS_HIGH;
2783 }
2784 } else if (lookup->irq < 0) {
2785 struct resource r;
2786
2787 if (acpi_dev_resource_interrupt(ares, 0, &r))
2788 lookup->irq = r.start;
2789 }
2790
2791 /* Always tell the ACPI core to skip this resource */
2792 return 1;
2793 }
2794
2795 /**
2796 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2797 * @ctlr: controller to which the spi device belongs
2798 * @adev: ACPI Device for the spi device
2799 * @index: Index of the spi resource inside the ACPI Node
2800 *
2801 * This should be used to allocate a new SPI device from and ACPI Device node.
2802 * The caller is responsible for calling spi_add_device to register the SPI device.
2803 *
2804 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2805 * using the resource.
2806 * If index is set to -1, index is not used.
2807 * Note: If index is -1, ctlr must be set.
2808 *
2809 * Return: a pointer to the new device, or ERR_PTR on error.
2810 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2811 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2812 struct acpi_device *adev,
2813 int index)
2814 {
2815 acpi_handle parent_handle = NULL;
2816 struct list_head resource_list;
2817 struct acpi_spi_lookup lookup = {};
2818 struct spi_device *spi;
2819 int ret;
2820
2821 if (!ctlr && index == -1)
2822 return ERR_PTR(-EINVAL);
2823
2824 lookup.ctlr = ctlr;
2825 lookup.irq = -1;
2826 lookup.index = index;
2827 lookup.n = 0;
2828
2829 INIT_LIST_HEAD(&resource_list);
2830 ret = acpi_dev_get_resources(adev, &resource_list,
2831 acpi_spi_add_resource, &lookup);
2832 acpi_dev_free_resource_list(&resource_list);
2833
2834 if (ret < 0)
2835 /* Found SPI in _CRS but it points to another controller */
2836 return ERR_PTR(ret);
2837
2838 if (!lookup.max_speed_hz &&
2839 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2840 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2841 /* Apple does not use _CRS but nested devices for SPI slaves */
2842 acpi_spi_parse_apple_properties(adev, &lookup);
2843 }
2844
2845 if (!lookup.max_speed_hz)
2846 return ERR_PTR(-ENODEV);
2847
2848 spi = spi_alloc_device(lookup.ctlr);
2849 if (!spi) {
2850 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2851 dev_name(&adev->dev));
2852 return ERR_PTR(-ENOMEM);
2853 }
2854
2855 spi_set_all_cs_unused(spi);
2856 spi_set_chipselect(spi, 0, lookup.chip_select);
2857
2858 ACPI_COMPANION_SET(&spi->dev, adev);
2859 spi->max_speed_hz = lookup.max_speed_hz;
2860 spi->mode |= lookup.mode;
2861 spi->irq = lookup.irq;
2862 spi->bits_per_word = lookup.bits_per_word;
2863 /*
2864 * By default spi->chip_select[0] will hold the physical CS number,
2865 * so set bit 0 in spi->cs_index_mask.
2866 */
2867 spi->cs_index_mask = BIT(0);
2868
2869 return spi;
2870 }
2871 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2872
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2873 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2874 struct acpi_device *adev)
2875 {
2876 struct spi_device *spi;
2877
2878 if (acpi_bus_get_status(adev) || !adev->status.present ||
2879 acpi_device_enumerated(adev))
2880 return AE_OK;
2881
2882 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2883 if (IS_ERR(spi)) {
2884 if (PTR_ERR(spi) == -ENOMEM)
2885 return AE_NO_MEMORY;
2886 else
2887 return AE_OK;
2888 }
2889
2890 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2891 sizeof(spi->modalias));
2892
2893 if (spi->irq < 0)
2894 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2895
2896 acpi_device_set_enumerated(adev);
2897
2898 adev->power.flags.ignore_parent = true;
2899 if (spi_add_device(spi)) {
2900 adev->power.flags.ignore_parent = false;
2901 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2902 dev_name(&adev->dev));
2903 spi_dev_put(spi);
2904 }
2905
2906 return AE_OK;
2907 }
2908
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2909 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2910 void *data, void **return_value)
2911 {
2912 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2913 struct spi_controller *ctlr = data;
2914
2915 if (!adev)
2916 return AE_OK;
2917
2918 return acpi_register_spi_device(ctlr, adev);
2919 }
2920
2921 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2922
acpi_register_spi_devices(struct spi_controller * ctlr)2923 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2924 {
2925 acpi_status status;
2926 acpi_handle handle;
2927
2928 handle = ACPI_HANDLE(ctlr->dev.parent);
2929 if (!handle)
2930 return;
2931
2932 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2933 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2934 acpi_spi_add_device, NULL, ctlr, NULL);
2935 if (ACPI_FAILURE(status))
2936 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2937 }
2938 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2939 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2940 #endif /* CONFIG_ACPI */
2941
spi_controller_release(struct device * dev)2942 static void spi_controller_release(struct device *dev)
2943 {
2944 struct spi_controller *ctlr;
2945
2946 ctlr = container_of(dev, struct spi_controller, dev);
2947 kfree(ctlr);
2948 }
2949
2950 static struct class spi_master_class = {
2951 .name = "spi_master",
2952 .dev_release = spi_controller_release,
2953 .dev_groups = spi_master_groups,
2954 };
2955
2956 #ifdef CONFIG_SPI_SLAVE
2957 /**
2958 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2959 * controller
2960 * @spi: device used for the current transfer
2961 */
spi_slave_abort(struct spi_device * spi)2962 int spi_slave_abort(struct spi_device *spi)
2963 {
2964 struct spi_controller *ctlr = spi->controller;
2965
2966 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2967 return ctlr->slave_abort(ctlr);
2968
2969 return -ENOTSUPP;
2970 }
2971 EXPORT_SYMBOL_GPL(spi_slave_abort);
2972
spi_target_abort(struct spi_device * spi)2973 int spi_target_abort(struct spi_device *spi)
2974 {
2975 struct spi_controller *ctlr = spi->controller;
2976
2977 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2978 return ctlr->target_abort(ctlr);
2979
2980 return -ENOTSUPP;
2981 }
2982 EXPORT_SYMBOL_GPL(spi_target_abort);
2983
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2984 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2985 char *buf)
2986 {
2987 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2988 dev);
2989 struct device *child;
2990
2991 child = device_find_any_child(&ctlr->dev);
2992 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2993 }
2994
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2995 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2996 const char *buf, size_t count)
2997 {
2998 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2999 dev);
3000 struct spi_device *spi;
3001 struct device *child;
3002 char name[32];
3003 int rc;
3004
3005 rc = sscanf(buf, "%31s", name);
3006 if (rc != 1 || !name[0])
3007 return -EINVAL;
3008
3009 child = device_find_any_child(&ctlr->dev);
3010 if (child) {
3011 /* Remove registered slave */
3012 device_unregister(child);
3013 put_device(child);
3014 }
3015
3016 if (strcmp(name, "(null)")) {
3017 /* Register new slave */
3018 spi = spi_alloc_device(ctlr);
3019 if (!spi)
3020 return -ENOMEM;
3021
3022 strscpy(spi->modalias, name, sizeof(spi->modalias));
3023
3024 rc = spi_add_device(spi);
3025 if (rc) {
3026 spi_dev_put(spi);
3027 return rc;
3028 }
3029 }
3030
3031 return count;
3032 }
3033
3034 static DEVICE_ATTR_RW(slave);
3035
3036 static struct attribute *spi_slave_attrs[] = {
3037 &dev_attr_slave.attr,
3038 NULL,
3039 };
3040
3041 static const struct attribute_group spi_slave_group = {
3042 .attrs = spi_slave_attrs,
3043 };
3044
3045 static const struct attribute_group *spi_slave_groups[] = {
3046 &spi_controller_statistics_group,
3047 &spi_slave_group,
3048 NULL,
3049 };
3050
3051 static struct class spi_slave_class = {
3052 .name = "spi_slave",
3053 .dev_release = spi_controller_release,
3054 .dev_groups = spi_slave_groups,
3055 };
3056 #else
3057 extern struct class spi_slave_class; /* dummy */
3058 #endif
3059
3060 /**
3061 * __spi_alloc_controller - allocate an SPI master or slave controller
3062 * @dev: the controller, possibly using the platform_bus
3063 * @size: how much zeroed driver-private data to allocate; the pointer to this
3064 * memory is in the driver_data field of the returned device, accessible
3065 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3066 * drivers granting DMA access to portions of their private data need to
3067 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3068 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3069 * slave (true) controller
3070 * Context: can sleep
3071 *
3072 * This call is used only by SPI controller drivers, which are the
3073 * only ones directly touching chip registers. It's how they allocate
3074 * an spi_controller structure, prior to calling spi_register_controller().
3075 *
3076 * This must be called from context that can sleep.
3077 *
3078 * The caller is responsible for assigning the bus number and initializing the
3079 * controller's methods before calling spi_register_controller(); and (after
3080 * errors adding the device) calling spi_controller_put() to prevent a memory
3081 * leak.
3082 *
3083 * Return: the SPI controller structure on success, else NULL.
3084 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3085 struct spi_controller *__spi_alloc_controller(struct device *dev,
3086 unsigned int size, bool slave)
3087 {
3088 struct spi_controller *ctlr;
3089 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3090
3091 if (!dev)
3092 return NULL;
3093
3094 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3095 if (!ctlr)
3096 return NULL;
3097
3098 device_initialize(&ctlr->dev);
3099 INIT_LIST_HEAD(&ctlr->queue);
3100 spin_lock_init(&ctlr->queue_lock);
3101 spin_lock_init(&ctlr->bus_lock_spinlock);
3102 mutex_init(&ctlr->bus_lock_mutex);
3103 mutex_init(&ctlr->io_mutex);
3104 mutex_init(&ctlr->add_lock);
3105 ctlr->bus_num = -1;
3106 ctlr->num_chipselect = 1;
3107 ctlr->slave = slave;
3108 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3109 ctlr->dev.class = &spi_slave_class;
3110 else
3111 ctlr->dev.class = &spi_master_class;
3112 ctlr->dev.parent = dev;
3113 pm_suspend_ignore_children(&ctlr->dev, true);
3114 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3115
3116 return ctlr;
3117 }
3118 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3119
devm_spi_release_controller(struct device * dev,void * ctlr)3120 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3121 {
3122 spi_controller_put(*(struct spi_controller **)ctlr);
3123 }
3124
3125 /**
3126 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3127 * @dev: physical device of SPI controller
3128 * @size: how much zeroed driver-private data to allocate
3129 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3130 * Context: can sleep
3131 *
3132 * Allocate an SPI controller and automatically release a reference on it
3133 * when @dev is unbound from its driver. Drivers are thus relieved from
3134 * having to call spi_controller_put().
3135 *
3136 * The arguments to this function are identical to __spi_alloc_controller().
3137 *
3138 * Return: the SPI controller structure on success, else NULL.
3139 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3140 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3141 unsigned int size,
3142 bool slave)
3143 {
3144 struct spi_controller **ptr, *ctlr;
3145
3146 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3147 GFP_KERNEL);
3148 if (!ptr)
3149 return NULL;
3150
3151 ctlr = __spi_alloc_controller(dev, size, slave);
3152 if (ctlr) {
3153 ctlr->devm_allocated = true;
3154 *ptr = ctlr;
3155 devres_add(dev, ptr);
3156 } else {
3157 devres_free(ptr);
3158 }
3159
3160 return ctlr;
3161 }
3162 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3163
3164 /**
3165 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3166 * @ctlr: The SPI master to grab GPIO descriptors for
3167 */
spi_get_gpio_descs(struct spi_controller * ctlr)3168 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3169 {
3170 int nb, i;
3171 struct gpio_desc **cs;
3172 struct device *dev = &ctlr->dev;
3173 unsigned long native_cs_mask = 0;
3174 unsigned int num_cs_gpios = 0;
3175
3176 nb = gpiod_count(dev, "cs");
3177 if (nb < 0) {
3178 /* No GPIOs at all is fine, else return the error */
3179 if (nb == -ENOENT)
3180 return 0;
3181 return nb;
3182 }
3183
3184 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3185
3186 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3187 GFP_KERNEL);
3188 if (!cs)
3189 return -ENOMEM;
3190 ctlr->cs_gpiods = cs;
3191
3192 for (i = 0; i < nb; i++) {
3193 /*
3194 * Most chipselects are active low, the inverted
3195 * semantics are handled by special quirks in gpiolib,
3196 * so initializing them GPIOD_OUT_LOW here means
3197 * "unasserted", in most cases this will drive the physical
3198 * line high.
3199 */
3200 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3201 GPIOD_OUT_LOW);
3202 if (IS_ERR(cs[i]))
3203 return PTR_ERR(cs[i]);
3204
3205 if (cs[i]) {
3206 /*
3207 * If we find a CS GPIO, name it after the device and
3208 * chip select line.
3209 */
3210 char *gpioname;
3211
3212 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3213 dev_name(dev), i);
3214 if (!gpioname)
3215 return -ENOMEM;
3216 gpiod_set_consumer_name(cs[i], gpioname);
3217 num_cs_gpios++;
3218 continue;
3219 }
3220
3221 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3222 dev_err(dev, "Invalid native chip select %d\n", i);
3223 return -EINVAL;
3224 }
3225 native_cs_mask |= BIT(i);
3226 }
3227
3228 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3229
3230 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3231 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3232 dev_err(dev, "No unused native chip select available\n");
3233 return -EINVAL;
3234 }
3235
3236 return 0;
3237 }
3238
spi_controller_check_ops(struct spi_controller * ctlr)3239 static int spi_controller_check_ops(struct spi_controller *ctlr)
3240 {
3241 /*
3242 * The controller may implement only the high-level SPI-memory like
3243 * operations if it does not support regular SPI transfers, and this is
3244 * valid use case.
3245 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3246 * one of the ->transfer_xxx() method be implemented.
3247 */
3248 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3249 if (!ctlr->transfer && !ctlr->transfer_one &&
3250 !ctlr->transfer_one_message) {
3251 return -EINVAL;
3252 }
3253 }
3254
3255 return 0;
3256 }
3257
3258 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3259 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3260 {
3261 int id;
3262
3263 mutex_lock(&board_lock);
3264 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3265 mutex_unlock(&board_lock);
3266 if (WARN(id < 0, "couldn't get idr"))
3267 return id == -ENOSPC ? -EBUSY : id;
3268 ctlr->bus_num = id;
3269 return 0;
3270 }
3271
3272 /**
3273 * spi_register_controller - register SPI master or slave controller
3274 * @ctlr: initialized master, originally from spi_alloc_master() or
3275 * spi_alloc_slave()
3276 * Context: can sleep
3277 *
3278 * SPI controllers connect to their drivers using some non-SPI bus,
3279 * such as the platform bus. The final stage of probe() in that code
3280 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3281 *
3282 * SPI controllers use board specific (often SOC specific) bus numbers,
3283 * and board-specific addressing for SPI devices combines those numbers
3284 * with chip select numbers. Since SPI does not directly support dynamic
3285 * device identification, boards need configuration tables telling which
3286 * chip is at which address.
3287 *
3288 * This must be called from context that can sleep. It returns zero on
3289 * success, else a negative error code (dropping the controller's refcount).
3290 * After a successful return, the caller is responsible for calling
3291 * spi_unregister_controller().
3292 *
3293 * Return: zero on success, else a negative error code.
3294 */
spi_register_controller(struct spi_controller * ctlr)3295 int spi_register_controller(struct spi_controller *ctlr)
3296 {
3297 struct device *dev = ctlr->dev.parent;
3298 struct boardinfo *bi;
3299 int first_dynamic;
3300 int status;
3301 int idx;
3302
3303 if (!dev)
3304 return -ENODEV;
3305
3306 /*
3307 * Make sure all necessary hooks are implemented before registering
3308 * the SPI controller.
3309 */
3310 status = spi_controller_check_ops(ctlr);
3311 if (status)
3312 return status;
3313
3314 if (ctlr->bus_num < 0)
3315 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3316 if (ctlr->bus_num >= 0) {
3317 /* Devices with a fixed bus num must check-in with the num */
3318 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3319 if (status)
3320 return status;
3321 }
3322 if (ctlr->bus_num < 0) {
3323 first_dynamic = of_alias_get_highest_id("spi");
3324 if (first_dynamic < 0)
3325 first_dynamic = 0;
3326 else
3327 first_dynamic++;
3328
3329 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3330 if (status)
3331 return status;
3332 }
3333 ctlr->bus_lock_flag = 0;
3334 init_completion(&ctlr->xfer_completion);
3335 init_completion(&ctlr->cur_msg_completion);
3336 if (!ctlr->max_dma_len)
3337 ctlr->max_dma_len = INT_MAX;
3338
3339 /*
3340 * Register the device, then userspace will see it.
3341 * Registration fails if the bus ID is in use.
3342 */
3343 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3344
3345 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3346 status = spi_get_gpio_descs(ctlr);
3347 if (status)
3348 goto free_bus_id;
3349 /*
3350 * A controller using GPIO descriptors always
3351 * supports SPI_CS_HIGH if need be.
3352 */
3353 ctlr->mode_bits |= SPI_CS_HIGH;
3354 }
3355
3356 /*
3357 * Even if it's just one always-selected device, there must
3358 * be at least one chipselect.
3359 */
3360 if (!ctlr->num_chipselect) {
3361 status = -EINVAL;
3362 goto free_bus_id;
3363 }
3364
3365 /* Setting last_cs to SPI_INVALID_CS means no chip selected */
3366 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3367 ctlr->last_cs[idx] = SPI_INVALID_CS;
3368
3369 status = device_add(&ctlr->dev);
3370 if (status < 0)
3371 goto free_bus_id;
3372 dev_dbg(dev, "registered %s %s\n",
3373 spi_controller_is_slave(ctlr) ? "slave" : "master",
3374 dev_name(&ctlr->dev));
3375
3376 /*
3377 * If we're using a queued driver, start the queue. Note that we don't
3378 * need the queueing logic if the driver is only supporting high-level
3379 * memory operations.
3380 */
3381 if (ctlr->transfer) {
3382 dev_info(dev, "controller is unqueued, this is deprecated\n");
3383 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3384 status = spi_controller_initialize_queue(ctlr);
3385 if (status) {
3386 device_del(&ctlr->dev);
3387 goto free_bus_id;
3388 }
3389 }
3390 /* Add statistics */
3391 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3392 if (!ctlr->pcpu_statistics) {
3393 dev_err(dev, "Error allocating per-cpu statistics\n");
3394 status = -ENOMEM;
3395 goto destroy_queue;
3396 }
3397
3398 mutex_lock(&board_lock);
3399 list_add_tail(&ctlr->list, &spi_controller_list);
3400 list_for_each_entry(bi, &board_list, list)
3401 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3402 mutex_unlock(&board_lock);
3403
3404 /* Register devices from the device tree and ACPI */
3405 of_register_spi_devices(ctlr);
3406 acpi_register_spi_devices(ctlr);
3407 return status;
3408
3409 destroy_queue:
3410 spi_destroy_queue(ctlr);
3411 free_bus_id:
3412 mutex_lock(&board_lock);
3413 idr_remove(&spi_master_idr, ctlr->bus_num);
3414 mutex_unlock(&board_lock);
3415 return status;
3416 }
3417 EXPORT_SYMBOL_GPL(spi_register_controller);
3418
devm_spi_unregister(struct device * dev,void * res)3419 static void devm_spi_unregister(struct device *dev, void *res)
3420 {
3421 spi_unregister_controller(*(struct spi_controller **)res);
3422 }
3423
3424 /**
3425 * devm_spi_register_controller - register managed SPI master or slave
3426 * controller
3427 * @dev: device managing SPI controller
3428 * @ctlr: initialized controller, originally from spi_alloc_master() or
3429 * spi_alloc_slave()
3430 * Context: can sleep
3431 *
3432 * Register a SPI device as with spi_register_controller() which will
3433 * automatically be unregistered and freed.
3434 *
3435 * Return: zero on success, else a negative error code.
3436 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3437 int devm_spi_register_controller(struct device *dev,
3438 struct spi_controller *ctlr)
3439 {
3440 struct spi_controller **ptr;
3441 int ret;
3442
3443 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3444 if (!ptr)
3445 return -ENOMEM;
3446
3447 ret = spi_register_controller(ctlr);
3448 if (!ret) {
3449 *ptr = ctlr;
3450 devres_add(dev, ptr);
3451 } else {
3452 devres_free(ptr);
3453 }
3454
3455 return ret;
3456 }
3457 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3458
__unregister(struct device * dev,void * null)3459 static int __unregister(struct device *dev, void *null)
3460 {
3461 spi_unregister_device(to_spi_device(dev));
3462 return 0;
3463 }
3464
3465 /**
3466 * spi_unregister_controller - unregister SPI master or slave controller
3467 * @ctlr: the controller being unregistered
3468 * Context: can sleep
3469 *
3470 * This call is used only by SPI controller drivers, which are the
3471 * only ones directly touching chip registers.
3472 *
3473 * This must be called from context that can sleep.
3474 *
3475 * Note that this function also drops a reference to the controller.
3476 */
spi_unregister_controller(struct spi_controller * ctlr)3477 void spi_unregister_controller(struct spi_controller *ctlr)
3478 {
3479 struct spi_controller *found;
3480 int id = ctlr->bus_num;
3481
3482 /* Prevent addition of new devices, unregister existing ones */
3483 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3484 mutex_lock(&ctlr->add_lock);
3485
3486 device_for_each_child(&ctlr->dev, NULL, __unregister);
3487
3488 /* First make sure that this controller was ever added */
3489 mutex_lock(&board_lock);
3490 found = idr_find(&spi_master_idr, id);
3491 mutex_unlock(&board_lock);
3492 if (ctlr->queued) {
3493 if (spi_destroy_queue(ctlr))
3494 dev_err(&ctlr->dev, "queue remove failed\n");
3495 }
3496 mutex_lock(&board_lock);
3497 list_del(&ctlr->list);
3498 mutex_unlock(&board_lock);
3499
3500 device_del(&ctlr->dev);
3501
3502 /* Free bus id */
3503 mutex_lock(&board_lock);
3504 if (found == ctlr)
3505 idr_remove(&spi_master_idr, id);
3506 mutex_unlock(&board_lock);
3507
3508 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3509 mutex_unlock(&ctlr->add_lock);
3510
3511 /*
3512 * Release the last reference on the controller if its driver
3513 * has not yet been converted to devm_spi_alloc_master/slave().
3514 */
3515 if (!ctlr->devm_allocated)
3516 put_device(&ctlr->dev);
3517 }
3518 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3519
__spi_check_suspended(const struct spi_controller * ctlr)3520 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3521 {
3522 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3523 }
3524
__spi_mark_suspended(struct spi_controller * ctlr)3525 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3526 {
3527 mutex_lock(&ctlr->bus_lock_mutex);
3528 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3529 mutex_unlock(&ctlr->bus_lock_mutex);
3530 }
3531
__spi_mark_resumed(struct spi_controller * ctlr)3532 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3533 {
3534 mutex_lock(&ctlr->bus_lock_mutex);
3535 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3536 mutex_unlock(&ctlr->bus_lock_mutex);
3537 }
3538
spi_controller_suspend(struct spi_controller * ctlr)3539 int spi_controller_suspend(struct spi_controller *ctlr)
3540 {
3541 int ret = 0;
3542
3543 /* Basically no-ops for non-queued controllers */
3544 if (ctlr->queued) {
3545 ret = spi_stop_queue(ctlr);
3546 if (ret)
3547 dev_err(&ctlr->dev, "queue stop failed\n");
3548 }
3549
3550 __spi_mark_suspended(ctlr);
3551 return ret;
3552 }
3553 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3554
spi_controller_resume(struct spi_controller * ctlr)3555 int spi_controller_resume(struct spi_controller *ctlr)
3556 {
3557 int ret = 0;
3558
3559 __spi_mark_resumed(ctlr);
3560
3561 if (ctlr->queued) {
3562 ret = spi_start_queue(ctlr);
3563 if (ret)
3564 dev_err(&ctlr->dev, "queue restart failed\n");
3565 }
3566 return ret;
3567 }
3568 EXPORT_SYMBOL_GPL(spi_controller_resume);
3569
3570 /*-------------------------------------------------------------------------*/
3571
3572 /* Core methods for spi_message alterations */
3573
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3574 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3575 struct spi_message *msg,
3576 void *res)
3577 {
3578 struct spi_replaced_transfers *rxfer = res;
3579 size_t i;
3580
3581 /* Call extra callback if requested */
3582 if (rxfer->release)
3583 rxfer->release(ctlr, msg, res);
3584
3585 /* Insert replaced transfers back into the message */
3586 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3587
3588 /* Remove the formerly inserted entries */
3589 for (i = 0; i < rxfer->inserted; i++)
3590 list_del(&rxfer->inserted_transfers[i].transfer_list);
3591 }
3592
3593 /**
3594 * spi_replace_transfers - replace transfers with several transfers
3595 * and register change with spi_message.resources
3596 * @msg: the spi_message we work upon
3597 * @xfer_first: the first spi_transfer we want to replace
3598 * @remove: number of transfers to remove
3599 * @insert: the number of transfers we want to insert instead
3600 * @release: extra release code necessary in some circumstances
3601 * @extradatasize: extra data to allocate (with alignment guarantees
3602 * of struct @spi_transfer)
3603 * @gfp: gfp flags
3604 *
3605 * Returns: pointer to @spi_replaced_transfers,
3606 * PTR_ERR(...) in case of errors.
3607 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3608 static struct spi_replaced_transfers *spi_replace_transfers(
3609 struct spi_message *msg,
3610 struct spi_transfer *xfer_first,
3611 size_t remove,
3612 size_t insert,
3613 spi_replaced_release_t release,
3614 size_t extradatasize,
3615 gfp_t gfp)
3616 {
3617 struct spi_replaced_transfers *rxfer;
3618 struct spi_transfer *xfer;
3619 size_t i;
3620
3621 /* Allocate the structure using spi_res */
3622 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3623 struct_size(rxfer, inserted_transfers, insert)
3624 + extradatasize,
3625 gfp);
3626 if (!rxfer)
3627 return ERR_PTR(-ENOMEM);
3628
3629 /* The release code to invoke before running the generic release */
3630 rxfer->release = release;
3631
3632 /* Assign extradata */
3633 if (extradatasize)
3634 rxfer->extradata =
3635 &rxfer->inserted_transfers[insert];
3636
3637 /* Init the replaced_transfers list */
3638 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3639
3640 /*
3641 * Assign the list_entry after which we should reinsert
3642 * the @replaced_transfers - it may be spi_message.messages!
3643 */
3644 rxfer->replaced_after = xfer_first->transfer_list.prev;
3645
3646 /* Remove the requested number of transfers */
3647 for (i = 0; i < remove; i++) {
3648 /*
3649 * If the entry after replaced_after it is msg->transfers
3650 * then we have been requested to remove more transfers
3651 * than are in the list.
3652 */
3653 if (rxfer->replaced_after->next == &msg->transfers) {
3654 dev_err(&msg->spi->dev,
3655 "requested to remove more spi_transfers than are available\n");
3656 /* Insert replaced transfers back into the message */
3657 list_splice(&rxfer->replaced_transfers,
3658 rxfer->replaced_after);
3659
3660 /* Free the spi_replace_transfer structure... */
3661 spi_res_free(rxfer);
3662
3663 /* ...and return with an error */
3664 return ERR_PTR(-EINVAL);
3665 }
3666
3667 /*
3668 * Remove the entry after replaced_after from list of
3669 * transfers and add it to list of replaced_transfers.
3670 */
3671 list_move_tail(rxfer->replaced_after->next,
3672 &rxfer->replaced_transfers);
3673 }
3674
3675 /*
3676 * Create copy of the given xfer with identical settings
3677 * based on the first transfer to get removed.
3678 */
3679 for (i = 0; i < insert; i++) {
3680 /* We need to run in reverse order */
3681 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3682
3683 /* Copy all spi_transfer data */
3684 memcpy(xfer, xfer_first, sizeof(*xfer));
3685
3686 /* Add to list */
3687 list_add(&xfer->transfer_list, rxfer->replaced_after);
3688
3689 /* Clear cs_change and delay for all but the last */
3690 if (i) {
3691 xfer->cs_change = false;
3692 xfer->delay.value = 0;
3693 }
3694 }
3695
3696 /* Set up inserted... */
3697 rxfer->inserted = insert;
3698
3699 /* ...and register it with spi_res/spi_message */
3700 spi_res_add(msg, rxfer);
3701
3702 return rxfer;
3703 }
3704
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3705 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3706 struct spi_message *msg,
3707 struct spi_transfer **xferp,
3708 size_t maxsize)
3709 {
3710 struct spi_transfer *xfer = *xferp, *xfers;
3711 struct spi_replaced_transfers *srt;
3712 size_t offset;
3713 size_t count, i;
3714
3715 /* Calculate how many we have to replace */
3716 count = DIV_ROUND_UP(xfer->len, maxsize);
3717
3718 /* Create replacement */
3719 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3720 if (IS_ERR(srt))
3721 return PTR_ERR(srt);
3722 xfers = srt->inserted_transfers;
3723
3724 /*
3725 * Now handle each of those newly inserted spi_transfers.
3726 * Note that the replacements spi_transfers all are preset
3727 * to the same values as *xferp, so tx_buf, rx_buf and len
3728 * are all identical (as well as most others)
3729 * so we just have to fix up len and the pointers.
3730 */
3731
3732 /*
3733 * The first transfer just needs the length modified, so we
3734 * run it outside the loop.
3735 */
3736 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3737
3738 /* All the others need rx_buf/tx_buf also set */
3739 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3740 /* Update rx_buf, tx_buf and DMA */
3741 if (xfers[i].rx_buf)
3742 xfers[i].rx_buf += offset;
3743 if (xfers[i].tx_buf)
3744 xfers[i].tx_buf += offset;
3745
3746 /* Update length */
3747 xfers[i].len = min(maxsize, xfers[i].len - offset);
3748 }
3749
3750 /*
3751 * We set up xferp to the last entry we have inserted,
3752 * so that we skip those already split transfers.
3753 */
3754 *xferp = &xfers[count - 1];
3755
3756 /* Increment statistics counters */
3757 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3758 transfers_split_maxsize);
3759 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3760 transfers_split_maxsize);
3761
3762 return 0;
3763 }
3764
3765 /**
3766 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3767 * when an individual transfer exceeds a
3768 * certain size
3769 * @ctlr: the @spi_controller for this transfer
3770 * @msg: the @spi_message to transform
3771 * @maxsize: the maximum when to apply this
3772 *
3773 * This function allocates resources that are automatically freed during the
3774 * spi message unoptimize phase so this function should only be called from
3775 * optimize_message callbacks.
3776 *
3777 * Return: status of transformation
3778 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3779 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3780 struct spi_message *msg,
3781 size_t maxsize)
3782 {
3783 struct spi_transfer *xfer;
3784 int ret;
3785
3786 /*
3787 * Iterate over the transfer_list,
3788 * but note that xfer is advanced to the last transfer inserted
3789 * to avoid checking sizes again unnecessarily (also xfer does
3790 * potentially belong to a different list by the time the
3791 * replacement has happened).
3792 */
3793 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3794 if (xfer->len > maxsize) {
3795 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3796 maxsize);
3797 if (ret)
3798 return ret;
3799 }
3800 }
3801
3802 return 0;
3803 }
3804 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3805
3806
3807 /**
3808 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3809 * when an individual transfer exceeds a
3810 * certain number of SPI words
3811 * @ctlr: the @spi_controller for this transfer
3812 * @msg: the @spi_message to transform
3813 * @maxwords: the number of words to limit each transfer to
3814 *
3815 * This function allocates resources that are automatically freed during the
3816 * spi message unoptimize phase so this function should only be called from
3817 * optimize_message callbacks.
3818 *
3819 * Return: status of transformation
3820 */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3821 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3822 struct spi_message *msg,
3823 size_t maxwords)
3824 {
3825 struct spi_transfer *xfer;
3826
3827 /*
3828 * Iterate over the transfer_list,
3829 * but note that xfer is advanced to the last transfer inserted
3830 * to avoid checking sizes again unnecessarily (also xfer does
3831 * potentially belong to a different list by the time the
3832 * replacement has happened).
3833 */
3834 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3835 size_t maxsize;
3836 int ret;
3837
3838 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3839 if (xfer->len > maxsize) {
3840 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3841 maxsize);
3842 if (ret)
3843 return ret;
3844 }
3845 }
3846
3847 return 0;
3848 }
3849 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3850
3851 /*-------------------------------------------------------------------------*/
3852
3853 /*
3854 * Core methods for SPI controller protocol drivers. Some of the
3855 * other core methods are currently defined as inline functions.
3856 */
3857
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3858 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3859 u8 bits_per_word)
3860 {
3861 if (ctlr->bits_per_word_mask) {
3862 /* Only 32 bits fit in the mask */
3863 if (bits_per_word > 32)
3864 return -EINVAL;
3865 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3866 return -EINVAL;
3867 }
3868
3869 return 0;
3870 }
3871
3872 /**
3873 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3874 * @spi: the device that requires specific CS timing configuration
3875 *
3876 * Return: zero on success, else a negative error code.
3877 */
spi_set_cs_timing(struct spi_device * spi)3878 static int spi_set_cs_timing(struct spi_device *spi)
3879 {
3880 struct device *parent = spi->controller->dev.parent;
3881 int status = 0;
3882
3883 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3884 if (spi->controller->auto_runtime_pm) {
3885 status = pm_runtime_get_sync(parent);
3886 if (status < 0) {
3887 pm_runtime_put_noidle(parent);
3888 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3889 status);
3890 return status;
3891 }
3892
3893 status = spi->controller->set_cs_timing(spi);
3894 pm_runtime_mark_last_busy(parent);
3895 pm_runtime_put_autosuspend(parent);
3896 } else {
3897 status = spi->controller->set_cs_timing(spi);
3898 }
3899 }
3900 return status;
3901 }
3902
3903 /**
3904 * spi_setup - setup SPI mode and clock rate
3905 * @spi: the device whose settings are being modified
3906 * Context: can sleep, and no requests are queued to the device
3907 *
3908 * SPI protocol drivers may need to update the transfer mode if the
3909 * device doesn't work with its default. They may likewise need
3910 * to update clock rates or word sizes from initial values. This function
3911 * changes those settings, and must be called from a context that can sleep.
3912 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3913 * effect the next time the device is selected and data is transferred to
3914 * or from it. When this function returns, the SPI device is deselected.
3915 *
3916 * Note that this call will fail if the protocol driver specifies an option
3917 * that the underlying controller or its driver does not support. For
3918 * example, not all hardware supports wire transfers using nine bit words,
3919 * LSB-first wire encoding, or active-high chipselects.
3920 *
3921 * Return: zero on success, else a negative error code.
3922 */
spi_setup(struct spi_device * spi)3923 int spi_setup(struct spi_device *spi)
3924 {
3925 unsigned bad_bits, ugly_bits;
3926 int status = 0;
3927
3928 /*
3929 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3930 * are set at the same time.
3931 */
3932 if ((hweight_long(spi->mode &
3933 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3934 (hweight_long(spi->mode &
3935 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3936 dev_err(&spi->dev,
3937 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3938 return -EINVAL;
3939 }
3940 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3941 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3942 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3943 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3944 return -EINVAL;
3945 /*
3946 * Help drivers fail *cleanly* when they need options
3947 * that aren't supported with their current controller.
3948 * SPI_CS_WORD has a fallback software implementation,
3949 * so it is ignored here.
3950 */
3951 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3952 SPI_NO_TX | SPI_NO_RX);
3953 ugly_bits = bad_bits &
3954 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3955 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3956 if (ugly_bits) {
3957 dev_warn(&spi->dev,
3958 "setup: ignoring unsupported mode bits %x\n",
3959 ugly_bits);
3960 spi->mode &= ~ugly_bits;
3961 bad_bits &= ~ugly_bits;
3962 }
3963 if (bad_bits) {
3964 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3965 bad_bits);
3966 return -EINVAL;
3967 }
3968
3969 if (!spi->bits_per_word) {
3970 spi->bits_per_word = 8;
3971 } else {
3972 /*
3973 * Some controllers may not support the default 8 bits-per-word
3974 * so only perform the check when this is explicitly provided.
3975 */
3976 status = __spi_validate_bits_per_word(spi->controller,
3977 spi->bits_per_word);
3978 if (status)
3979 return status;
3980 }
3981
3982 if (spi->controller->max_speed_hz &&
3983 (!spi->max_speed_hz ||
3984 spi->max_speed_hz > spi->controller->max_speed_hz))
3985 spi->max_speed_hz = spi->controller->max_speed_hz;
3986
3987 mutex_lock(&spi->controller->io_mutex);
3988
3989 if (spi->controller->setup) {
3990 status = spi->controller->setup(spi);
3991 if (status) {
3992 mutex_unlock(&spi->controller->io_mutex);
3993 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3994 status);
3995 return status;
3996 }
3997 }
3998
3999 status = spi_set_cs_timing(spi);
4000 if (status) {
4001 mutex_unlock(&spi->controller->io_mutex);
4002 return status;
4003 }
4004
4005 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
4006 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
4007 if (status < 0) {
4008 mutex_unlock(&spi->controller->io_mutex);
4009 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
4010 status);
4011 return status;
4012 }
4013
4014 /*
4015 * We do not want to return positive value from pm_runtime_get,
4016 * there are many instances of devices calling spi_setup() and
4017 * checking for a non-zero return value instead of a negative
4018 * return value.
4019 */
4020 status = 0;
4021
4022 spi_set_cs(spi, false, true);
4023 pm_runtime_mark_last_busy(spi->controller->dev.parent);
4024 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4025 } else {
4026 spi_set_cs(spi, false, true);
4027 }
4028
4029 mutex_unlock(&spi->controller->io_mutex);
4030
4031 if (spi->rt && !spi->controller->rt) {
4032 spi->controller->rt = true;
4033 spi_set_thread_rt(spi->controller);
4034 }
4035
4036 trace_spi_setup(spi, status);
4037
4038 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4039 spi->mode & SPI_MODE_X_MASK,
4040 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4041 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4042 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4043 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4044 spi->bits_per_word, spi->max_speed_hz,
4045 status);
4046
4047 return status;
4048 }
4049 EXPORT_SYMBOL_GPL(spi_setup);
4050
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4051 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4052 struct spi_device *spi)
4053 {
4054 int delay1, delay2;
4055
4056 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4057 if (delay1 < 0)
4058 return delay1;
4059
4060 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4061 if (delay2 < 0)
4062 return delay2;
4063
4064 if (delay1 < delay2)
4065 memcpy(&xfer->word_delay, &spi->word_delay,
4066 sizeof(xfer->word_delay));
4067
4068 return 0;
4069 }
4070
__spi_validate(struct spi_device * spi,struct spi_message * message)4071 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4072 {
4073 struct spi_controller *ctlr = spi->controller;
4074 struct spi_transfer *xfer;
4075 int w_size;
4076
4077 if (list_empty(&message->transfers))
4078 return -EINVAL;
4079
4080 message->spi = spi;
4081
4082 /*
4083 * Half-duplex links include original MicroWire, and ones with
4084 * only one data pin like SPI_3WIRE (switches direction) or where
4085 * either MOSI or MISO is missing. They can also be caused by
4086 * software limitations.
4087 */
4088 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4089 (spi->mode & SPI_3WIRE)) {
4090 unsigned flags = ctlr->flags;
4091
4092 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4093 if (xfer->rx_buf && xfer->tx_buf)
4094 return -EINVAL;
4095 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4096 return -EINVAL;
4097 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4098 return -EINVAL;
4099 }
4100 }
4101
4102 /*
4103 * Set transfer bits_per_word and max speed as spi device default if
4104 * it is not set for this transfer.
4105 * Set transfer tx_nbits and rx_nbits as single transfer default
4106 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4107 * Ensure transfer word_delay is at least as long as that required by
4108 * device itself.
4109 */
4110 message->frame_length = 0;
4111 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4112 xfer->effective_speed_hz = 0;
4113 message->frame_length += xfer->len;
4114 if (!xfer->bits_per_word)
4115 xfer->bits_per_word = spi->bits_per_word;
4116
4117 if (!xfer->speed_hz)
4118 xfer->speed_hz = spi->max_speed_hz;
4119
4120 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4121 xfer->speed_hz = ctlr->max_speed_hz;
4122
4123 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4124 return -EINVAL;
4125
4126 /*
4127 * SPI transfer length should be multiple of SPI word size
4128 * where SPI word size should be power-of-two multiple.
4129 */
4130 if (xfer->bits_per_word <= 8)
4131 w_size = 1;
4132 else if (xfer->bits_per_word <= 16)
4133 w_size = 2;
4134 else
4135 w_size = 4;
4136
4137 /* No partial transfers accepted */
4138 if (xfer->len % w_size)
4139 return -EINVAL;
4140
4141 if (xfer->speed_hz && ctlr->min_speed_hz &&
4142 xfer->speed_hz < ctlr->min_speed_hz)
4143 return -EINVAL;
4144
4145 if (xfer->tx_buf && !xfer->tx_nbits)
4146 xfer->tx_nbits = SPI_NBITS_SINGLE;
4147 if (xfer->rx_buf && !xfer->rx_nbits)
4148 xfer->rx_nbits = SPI_NBITS_SINGLE;
4149 /*
4150 * Check transfer tx/rx_nbits:
4151 * 1. check the value matches one of single, dual and quad
4152 * 2. check tx/rx_nbits match the mode in spi_device
4153 */
4154 if (xfer->tx_buf) {
4155 if (spi->mode & SPI_NO_TX)
4156 return -EINVAL;
4157 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4158 xfer->tx_nbits != SPI_NBITS_DUAL &&
4159 xfer->tx_nbits != SPI_NBITS_QUAD)
4160 return -EINVAL;
4161 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4162 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4163 return -EINVAL;
4164 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4165 !(spi->mode & SPI_TX_QUAD))
4166 return -EINVAL;
4167 }
4168 /* Check transfer rx_nbits */
4169 if (xfer->rx_buf) {
4170 if (spi->mode & SPI_NO_RX)
4171 return -EINVAL;
4172 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4173 xfer->rx_nbits != SPI_NBITS_DUAL &&
4174 xfer->rx_nbits != SPI_NBITS_QUAD)
4175 return -EINVAL;
4176 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4177 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4178 return -EINVAL;
4179 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4180 !(spi->mode & SPI_RX_QUAD))
4181 return -EINVAL;
4182 }
4183
4184 if (_spi_xfer_word_delay_update(xfer, spi))
4185 return -EINVAL;
4186 }
4187
4188 message->status = -EINPROGRESS;
4189
4190 return 0;
4191 }
4192
4193 /*
4194 * spi_split_transfers - generic handling of transfer splitting
4195 * @msg: the message to split
4196 *
4197 * Under certain conditions, a SPI controller may not support arbitrary
4198 * transfer sizes or other features required by a peripheral. This function
4199 * will split the transfers in the message into smaller transfers that are
4200 * supported by the controller.
4201 *
4202 * Controllers with special requirements not covered here can also split
4203 * transfers in the optimize_message() callback.
4204 *
4205 * Context: can sleep
4206 * Return: zero on success, else a negative error code
4207 */
spi_split_transfers(struct spi_message * msg)4208 static int spi_split_transfers(struct spi_message *msg)
4209 {
4210 struct spi_controller *ctlr = msg->spi->controller;
4211 struct spi_transfer *xfer;
4212 int ret;
4213
4214 /*
4215 * If an SPI controller does not support toggling the CS line on each
4216 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4217 * for the CS line, we can emulate the CS-per-word hardware function by
4218 * splitting transfers into one-word transfers and ensuring that
4219 * cs_change is set for each transfer.
4220 */
4221 if ((msg->spi->mode & SPI_CS_WORD) &&
4222 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4223 ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4224 if (ret)
4225 return ret;
4226
4227 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4228 /* Don't change cs_change on the last entry in the list */
4229 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4230 break;
4231
4232 xfer->cs_change = 1;
4233 }
4234 } else {
4235 ret = spi_split_transfers_maxsize(ctlr, msg,
4236 spi_max_transfer_size(msg->spi));
4237 if (ret)
4238 return ret;
4239 }
4240
4241 return 0;
4242 }
4243
4244 /*
4245 * __spi_optimize_message - shared implementation for spi_optimize_message()
4246 * and spi_maybe_optimize_message()
4247 * @spi: the device that will be used for the message
4248 * @msg: the message to optimize
4249 *
4250 * Peripheral drivers will call spi_optimize_message() and the spi core will
4251 * call spi_maybe_optimize_message() instead of calling this directly.
4252 *
4253 * It is not valid to call this on a message that has already been optimized.
4254 *
4255 * Return: zero on success, else a negative error code
4256 */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4257 static int __spi_optimize_message(struct spi_device *spi,
4258 struct spi_message *msg)
4259 {
4260 struct spi_controller *ctlr = spi->controller;
4261 int ret;
4262
4263 ret = __spi_validate(spi, msg);
4264 if (ret)
4265 return ret;
4266
4267 ret = spi_split_transfers(msg);
4268 if (ret)
4269 return ret;
4270
4271 if (ctlr->optimize_message) {
4272 ret = ctlr->optimize_message(msg);
4273 if (ret) {
4274 spi_res_release(ctlr, msg);
4275 return ret;
4276 }
4277 }
4278
4279 msg->optimized = true;
4280
4281 return 0;
4282 }
4283
4284 /*
4285 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4286 * @spi: the device that will be used for the message
4287 * @msg: the message to optimize
4288 * Return: zero on success, else a negative error code
4289 */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4290 static int spi_maybe_optimize_message(struct spi_device *spi,
4291 struct spi_message *msg)
4292 {
4293 if (msg->pre_optimized)
4294 return 0;
4295
4296 return __spi_optimize_message(spi, msg);
4297 }
4298
4299 /**
4300 * spi_optimize_message - do any one-time validation and setup for a SPI message
4301 * @spi: the device that will be used for the message
4302 * @msg: the message to optimize
4303 *
4304 * Peripheral drivers that reuse the same message repeatedly may call this to
4305 * perform as much message prep as possible once, rather than repeating it each
4306 * time a message transfer is performed to improve throughput and reduce CPU
4307 * usage.
4308 *
4309 * Once a message has been optimized, it cannot be modified with the exception
4310 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4311 * only the data in the memory it points to).
4312 *
4313 * Calls to this function must be balanced with calls to spi_unoptimize_message()
4314 * to avoid leaking resources.
4315 *
4316 * Context: can sleep
4317 * Return: zero on success, else a negative error code
4318 */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4319 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4320 {
4321 int ret;
4322
4323 ret = __spi_optimize_message(spi, msg);
4324 if (ret)
4325 return ret;
4326
4327 /*
4328 * This flag indicates that the peripheral driver called spi_optimize_message()
4329 * and therefore we shouldn't unoptimize message automatically when finalizing
4330 * the message but rather wait until spi_unoptimize_message() is called
4331 * by the peripheral driver.
4332 */
4333 msg->pre_optimized = true;
4334
4335 return 0;
4336 }
4337 EXPORT_SYMBOL_GPL(spi_optimize_message);
4338
4339 /**
4340 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4341 * @msg: the message to unoptimize
4342 *
4343 * Calls to this function must be balanced with calls to spi_optimize_message().
4344 *
4345 * Context: can sleep
4346 */
spi_unoptimize_message(struct spi_message * msg)4347 void spi_unoptimize_message(struct spi_message *msg)
4348 {
4349 __spi_unoptimize_message(msg);
4350 msg->pre_optimized = false;
4351 }
4352 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4353
__spi_async(struct spi_device * spi,struct spi_message * message)4354 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4355 {
4356 struct spi_controller *ctlr = spi->controller;
4357 struct spi_transfer *xfer;
4358
4359 /*
4360 * Some controllers do not support doing regular SPI transfers. Return
4361 * ENOTSUPP when this is the case.
4362 */
4363 if (!ctlr->transfer)
4364 return -ENOTSUPP;
4365
4366 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4367 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4368
4369 trace_spi_message_submit(message);
4370
4371 if (!ctlr->ptp_sts_supported) {
4372 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4373 xfer->ptp_sts_word_pre = 0;
4374 ptp_read_system_prets(xfer->ptp_sts);
4375 }
4376 }
4377
4378 return ctlr->transfer(spi, message);
4379 }
4380
4381 /**
4382 * spi_async - asynchronous SPI transfer
4383 * @spi: device with which data will be exchanged
4384 * @message: describes the data transfers, including completion callback
4385 * Context: any (IRQs may be blocked, etc)
4386 *
4387 * This call may be used in_irq and other contexts which can't sleep,
4388 * as well as from task contexts which can sleep.
4389 *
4390 * The completion callback is invoked in a context which can't sleep.
4391 * Before that invocation, the value of message->status is undefined.
4392 * When the callback is issued, message->status holds either zero (to
4393 * indicate complete success) or a negative error code. After that
4394 * callback returns, the driver which issued the transfer request may
4395 * deallocate the associated memory; it's no longer in use by any SPI
4396 * core or controller driver code.
4397 *
4398 * Note that although all messages to a spi_device are handled in
4399 * FIFO order, messages may go to different devices in other orders.
4400 * Some device might be higher priority, or have various "hard" access
4401 * time requirements, for example.
4402 *
4403 * On detection of any fault during the transfer, processing of
4404 * the entire message is aborted, and the device is deselected.
4405 * Until returning from the associated message completion callback,
4406 * no other spi_message queued to that device will be processed.
4407 * (This rule applies equally to all the synchronous transfer calls,
4408 * which are wrappers around this core asynchronous primitive.)
4409 *
4410 * Return: zero on success, else a negative error code.
4411 */
spi_async(struct spi_device * spi,struct spi_message * message)4412 int spi_async(struct spi_device *spi, struct spi_message *message)
4413 {
4414 struct spi_controller *ctlr = spi->controller;
4415 int ret;
4416 unsigned long flags;
4417
4418 ret = spi_maybe_optimize_message(spi, message);
4419 if (ret)
4420 return ret;
4421
4422 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4423
4424 if (ctlr->bus_lock_flag)
4425 ret = -EBUSY;
4426 else
4427 ret = __spi_async(spi, message);
4428
4429 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4430
4431 spi_maybe_unoptimize_message(message);
4432
4433 return ret;
4434 }
4435 EXPORT_SYMBOL_GPL(spi_async);
4436
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4437 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4438 {
4439 bool was_busy;
4440 int ret;
4441
4442 mutex_lock(&ctlr->io_mutex);
4443
4444 was_busy = ctlr->busy;
4445
4446 ctlr->cur_msg = msg;
4447 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4448 if (ret)
4449 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4450 ctlr->cur_msg = NULL;
4451 ctlr->fallback = false;
4452
4453 if (!was_busy) {
4454 kfree(ctlr->dummy_rx);
4455 ctlr->dummy_rx = NULL;
4456 kfree(ctlr->dummy_tx);
4457 ctlr->dummy_tx = NULL;
4458 if (ctlr->unprepare_transfer_hardware &&
4459 ctlr->unprepare_transfer_hardware(ctlr))
4460 dev_err(&ctlr->dev,
4461 "failed to unprepare transfer hardware\n");
4462 spi_idle_runtime_pm(ctlr);
4463 }
4464
4465 mutex_unlock(&ctlr->io_mutex);
4466 }
4467
4468 /*-------------------------------------------------------------------------*/
4469
4470 /*
4471 * Utility methods for SPI protocol drivers, layered on
4472 * top of the core. Some other utility methods are defined as
4473 * inline functions.
4474 */
4475
spi_complete(void * arg)4476 static void spi_complete(void *arg)
4477 {
4478 complete(arg);
4479 }
4480
__spi_sync(struct spi_device * spi,struct spi_message * message)4481 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4482 {
4483 DECLARE_COMPLETION_ONSTACK(done);
4484 unsigned long flags;
4485 int status;
4486 struct spi_controller *ctlr = spi->controller;
4487
4488 if (__spi_check_suspended(ctlr)) {
4489 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4490 return -ESHUTDOWN;
4491 }
4492
4493 status = spi_maybe_optimize_message(spi, message);
4494 if (status)
4495 return status;
4496
4497 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4498 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4499
4500 /*
4501 * Checking queue_empty here only guarantees async/sync message
4502 * ordering when coming from the same context. It does not need to
4503 * guard against reentrancy from a different context. The io_mutex
4504 * will catch those cases.
4505 */
4506 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4507 message->actual_length = 0;
4508 message->status = -EINPROGRESS;
4509
4510 trace_spi_message_submit(message);
4511
4512 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4513 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4514
4515 __spi_transfer_message_noqueue(ctlr, message);
4516
4517 return message->status;
4518 }
4519
4520 /*
4521 * There are messages in the async queue that could have originated
4522 * from the same context, so we need to preserve ordering.
4523 * Therefor we send the message to the async queue and wait until they
4524 * are completed.
4525 */
4526 message->complete = spi_complete;
4527 message->context = &done;
4528
4529 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4530 status = __spi_async(spi, message);
4531 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4532
4533 if (status == 0) {
4534 wait_for_completion(&done);
4535 status = message->status;
4536 }
4537 message->complete = NULL;
4538 message->context = NULL;
4539
4540 return status;
4541 }
4542
4543 /**
4544 * spi_sync - blocking/synchronous SPI data transfers
4545 * @spi: device with which data will be exchanged
4546 * @message: describes the data transfers
4547 * Context: can sleep
4548 *
4549 * This call may only be used from a context that may sleep. The sleep
4550 * is non-interruptible, and has no timeout. Low-overhead controller
4551 * drivers may DMA directly into and out of the message buffers.
4552 *
4553 * Note that the SPI device's chip select is active during the message,
4554 * and then is normally disabled between messages. Drivers for some
4555 * frequently-used devices may want to minimize costs of selecting a chip,
4556 * by leaving it selected in anticipation that the next message will go
4557 * to the same chip. (That may increase power usage.)
4558 *
4559 * Also, the caller is guaranteeing that the memory associated with the
4560 * message will not be freed before this call returns.
4561 *
4562 * Return: zero on success, else a negative error code.
4563 */
spi_sync(struct spi_device * spi,struct spi_message * message)4564 int spi_sync(struct spi_device *spi, struct spi_message *message)
4565 {
4566 int ret;
4567
4568 mutex_lock(&spi->controller->bus_lock_mutex);
4569 ret = __spi_sync(spi, message);
4570 mutex_unlock(&spi->controller->bus_lock_mutex);
4571
4572 return ret;
4573 }
4574 EXPORT_SYMBOL_GPL(spi_sync);
4575
4576 /**
4577 * spi_sync_locked - version of spi_sync with exclusive bus usage
4578 * @spi: device with which data will be exchanged
4579 * @message: describes the data transfers
4580 * Context: can sleep
4581 *
4582 * This call may only be used from a context that may sleep. The sleep
4583 * is non-interruptible, and has no timeout. Low-overhead controller
4584 * drivers may DMA directly into and out of the message buffers.
4585 *
4586 * This call should be used by drivers that require exclusive access to the
4587 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4588 * be released by a spi_bus_unlock call when the exclusive access is over.
4589 *
4590 * Return: zero on success, else a negative error code.
4591 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4592 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4593 {
4594 return __spi_sync(spi, message);
4595 }
4596 EXPORT_SYMBOL_GPL(spi_sync_locked);
4597
4598 /**
4599 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4600 * @ctlr: SPI bus master that should be locked for exclusive bus access
4601 * Context: can sleep
4602 *
4603 * This call may only be used from a context that may sleep. The sleep
4604 * is non-interruptible, and has no timeout.
4605 *
4606 * This call should be used by drivers that require exclusive access to the
4607 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4608 * exclusive access is over. Data transfer must be done by spi_sync_locked
4609 * and spi_async_locked calls when the SPI bus lock is held.
4610 *
4611 * Return: always zero.
4612 */
spi_bus_lock(struct spi_controller * ctlr)4613 int spi_bus_lock(struct spi_controller *ctlr)
4614 {
4615 unsigned long flags;
4616
4617 mutex_lock(&ctlr->bus_lock_mutex);
4618
4619 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4620 ctlr->bus_lock_flag = 1;
4621 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4622
4623 /* Mutex remains locked until spi_bus_unlock() is called */
4624
4625 return 0;
4626 }
4627 EXPORT_SYMBOL_GPL(spi_bus_lock);
4628
4629 /**
4630 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4631 * @ctlr: SPI bus master that was locked for exclusive bus access
4632 * Context: can sleep
4633 *
4634 * This call may only be used from a context that may sleep. The sleep
4635 * is non-interruptible, and has no timeout.
4636 *
4637 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4638 * call.
4639 *
4640 * Return: always zero.
4641 */
spi_bus_unlock(struct spi_controller * ctlr)4642 int spi_bus_unlock(struct spi_controller *ctlr)
4643 {
4644 ctlr->bus_lock_flag = 0;
4645
4646 mutex_unlock(&ctlr->bus_lock_mutex);
4647
4648 return 0;
4649 }
4650 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4651
4652 /* Portable code must never pass more than 32 bytes */
4653 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4654
4655 static u8 *buf;
4656
4657 /**
4658 * spi_write_then_read - SPI synchronous write followed by read
4659 * @spi: device with which data will be exchanged
4660 * @txbuf: data to be written (need not be DMA-safe)
4661 * @n_tx: size of txbuf, in bytes
4662 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4663 * @n_rx: size of rxbuf, in bytes
4664 * Context: can sleep
4665 *
4666 * This performs a half duplex MicroWire style transaction with the
4667 * device, sending txbuf and then reading rxbuf. The return value
4668 * is zero for success, else a negative errno status code.
4669 * This call may only be used from a context that may sleep.
4670 *
4671 * Parameters to this routine are always copied using a small buffer.
4672 * Performance-sensitive or bulk transfer code should instead use
4673 * spi_{async,sync}() calls with DMA-safe buffers.
4674 *
4675 * Return: zero on success, else a negative error code.
4676 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4677 int spi_write_then_read(struct spi_device *spi,
4678 const void *txbuf, unsigned n_tx,
4679 void *rxbuf, unsigned n_rx)
4680 {
4681 static DEFINE_MUTEX(lock);
4682
4683 int status;
4684 struct spi_message message;
4685 struct spi_transfer x[2];
4686 u8 *local_buf;
4687
4688 /*
4689 * Use preallocated DMA-safe buffer if we can. We can't avoid
4690 * copying here, (as a pure convenience thing), but we can
4691 * keep heap costs out of the hot path unless someone else is
4692 * using the pre-allocated buffer or the transfer is too large.
4693 */
4694 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4695 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4696 GFP_KERNEL | GFP_DMA);
4697 if (!local_buf)
4698 return -ENOMEM;
4699 } else {
4700 local_buf = buf;
4701 }
4702
4703 spi_message_init(&message);
4704 memset(x, 0, sizeof(x));
4705 if (n_tx) {
4706 x[0].len = n_tx;
4707 spi_message_add_tail(&x[0], &message);
4708 }
4709 if (n_rx) {
4710 x[1].len = n_rx;
4711 spi_message_add_tail(&x[1], &message);
4712 }
4713
4714 memcpy(local_buf, txbuf, n_tx);
4715 x[0].tx_buf = local_buf;
4716 x[1].rx_buf = local_buf + n_tx;
4717
4718 /* Do the I/O */
4719 status = spi_sync(spi, &message);
4720 if (status == 0)
4721 memcpy(rxbuf, x[1].rx_buf, n_rx);
4722
4723 if (x[0].tx_buf == buf)
4724 mutex_unlock(&lock);
4725 else
4726 kfree(local_buf);
4727
4728 return status;
4729 }
4730 EXPORT_SYMBOL_GPL(spi_write_then_read);
4731
4732 /*-------------------------------------------------------------------------*/
4733
4734 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4735 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4736 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4737 {
4738 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4739
4740 return dev ? to_spi_device(dev) : NULL;
4741 }
4742
4743 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4744 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4745 {
4746 struct device *dev;
4747
4748 dev = class_find_device_by_of_node(&spi_master_class, node);
4749 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4750 dev = class_find_device_by_of_node(&spi_slave_class, node);
4751 if (!dev)
4752 return NULL;
4753
4754 /* Reference got in class_find_device */
4755 return container_of(dev, struct spi_controller, dev);
4756 }
4757
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4758 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4759 void *arg)
4760 {
4761 struct of_reconfig_data *rd = arg;
4762 struct spi_controller *ctlr;
4763 struct spi_device *spi;
4764
4765 switch (of_reconfig_get_state_change(action, arg)) {
4766 case OF_RECONFIG_CHANGE_ADD:
4767 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4768 if (ctlr == NULL)
4769 return NOTIFY_OK; /* Not for us */
4770
4771 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4772 put_device(&ctlr->dev);
4773 return NOTIFY_OK;
4774 }
4775
4776 /*
4777 * Clear the flag before adding the device so that fw_devlink
4778 * doesn't skip adding consumers to this device.
4779 */
4780 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4781 spi = of_register_spi_device(ctlr, rd->dn);
4782 put_device(&ctlr->dev);
4783
4784 if (IS_ERR(spi)) {
4785 pr_err("%s: failed to create for '%pOF'\n",
4786 __func__, rd->dn);
4787 of_node_clear_flag(rd->dn, OF_POPULATED);
4788 return notifier_from_errno(PTR_ERR(spi));
4789 }
4790 break;
4791
4792 case OF_RECONFIG_CHANGE_REMOVE:
4793 /* Already depopulated? */
4794 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4795 return NOTIFY_OK;
4796
4797 /* Find our device by node */
4798 spi = of_find_spi_device_by_node(rd->dn);
4799 if (spi == NULL)
4800 return NOTIFY_OK; /* No? not meant for us */
4801
4802 /* Unregister takes one ref away */
4803 spi_unregister_device(spi);
4804
4805 /* And put the reference of the find */
4806 put_device(&spi->dev);
4807 break;
4808 }
4809
4810 return NOTIFY_OK;
4811 }
4812
4813 static struct notifier_block spi_of_notifier = {
4814 .notifier_call = of_spi_notify,
4815 };
4816 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4817 extern struct notifier_block spi_of_notifier;
4818 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4819
4820 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4821 static int spi_acpi_controller_match(struct device *dev, const void *data)
4822 {
4823 return ACPI_COMPANION(dev->parent) == data;
4824 }
4825
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4826 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4827 {
4828 struct device *dev;
4829
4830 dev = class_find_device(&spi_master_class, NULL, adev,
4831 spi_acpi_controller_match);
4832 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4833 dev = class_find_device(&spi_slave_class, NULL, adev,
4834 spi_acpi_controller_match);
4835 if (!dev)
4836 return NULL;
4837
4838 return container_of(dev, struct spi_controller, dev);
4839 }
4840 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4841
acpi_spi_find_device_by_adev(struct acpi_device * adev)4842 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4843 {
4844 struct device *dev;
4845
4846 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4847 return to_spi_device(dev);
4848 }
4849
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4850 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4851 void *arg)
4852 {
4853 struct acpi_device *adev = arg;
4854 struct spi_controller *ctlr;
4855 struct spi_device *spi;
4856
4857 switch (value) {
4858 case ACPI_RECONFIG_DEVICE_ADD:
4859 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4860 if (!ctlr)
4861 break;
4862
4863 acpi_register_spi_device(ctlr, adev);
4864 put_device(&ctlr->dev);
4865 break;
4866 case ACPI_RECONFIG_DEVICE_REMOVE:
4867 if (!acpi_device_enumerated(adev))
4868 break;
4869
4870 spi = acpi_spi_find_device_by_adev(adev);
4871 if (!spi)
4872 break;
4873
4874 spi_unregister_device(spi);
4875 put_device(&spi->dev);
4876 break;
4877 }
4878
4879 return NOTIFY_OK;
4880 }
4881
4882 static struct notifier_block spi_acpi_notifier = {
4883 .notifier_call = acpi_spi_notify,
4884 };
4885 #else
4886 extern struct notifier_block spi_acpi_notifier;
4887 #endif
4888
spi_init(void)4889 static int __init spi_init(void)
4890 {
4891 int status;
4892
4893 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4894 if (!buf) {
4895 status = -ENOMEM;
4896 goto err0;
4897 }
4898
4899 status = bus_register(&spi_bus_type);
4900 if (status < 0)
4901 goto err1;
4902
4903 status = class_register(&spi_master_class);
4904 if (status < 0)
4905 goto err2;
4906
4907 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4908 status = class_register(&spi_slave_class);
4909 if (status < 0)
4910 goto err3;
4911 }
4912
4913 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4914 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4915 if (IS_ENABLED(CONFIG_ACPI))
4916 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4917
4918 return 0;
4919
4920 err3:
4921 class_unregister(&spi_master_class);
4922 err2:
4923 bus_unregister(&spi_bus_type);
4924 err1:
4925 kfree(buf);
4926 buf = NULL;
4927 err0:
4928 return status;
4929 }
4930
4931 /*
4932 * A board_info is normally registered in arch_initcall(),
4933 * but even essential drivers wait till later.
4934 *
4935 * REVISIT only boardinfo really needs static linking. The rest (device and
4936 * driver registration) _could_ be dynamically linked (modular) ... Costs
4937 * include needing to have boardinfo data structures be much more public.
4938 */
4939 postcore_initcall(spi_init);
4940