spi: Get sgs size fix into branch
[sfrench/cifs-2.6.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/property.h>
25 #include <linux/export.h>
26 #include <linux/sched/rt.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/delay.h>
29 #include <linux/kthread.h>
30 #include <linux/ioport.h>
31 #include <linux/acpi.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/platform_data/x86/apple.h>
35 #include <linux/ptp_clock_kernel.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
46 static void spidev_release(struct device *dev)
47 {
48         struct spi_device       *spi = to_spi_device(dev);
49
50         spi_controller_put(spi->controller);
51         kfree(spi->driver_override);
52         kfree(spi);
53 }
54
55 static ssize_t
56 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
57 {
58         const struct spi_device *spi = to_spi_device(dev);
59         int len;
60
61         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
62         if (len != -ENODEV)
63                 return len;
64
65         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
66 }
67 static DEVICE_ATTR_RO(modalias);
68
69 static ssize_t driver_override_store(struct device *dev,
70                                      struct device_attribute *a,
71                                      const char *buf, size_t count)
72 {
73         struct spi_device *spi = to_spi_device(dev);
74         const char *end = memchr(buf, '\n', count);
75         const size_t len = end ? end - buf : count;
76         const char *driver_override, *old;
77
78         /* We need to keep extra room for a newline when displaying value */
79         if (len >= (PAGE_SIZE - 1))
80                 return -EINVAL;
81
82         driver_override = kstrndup(buf, len, GFP_KERNEL);
83         if (!driver_override)
84                 return -ENOMEM;
85
86         device_lock(dev);
87         old = spi->driver_override;
88         if (len) {
89                 spi->driver_override = driver_override;
90         } else {
91                 /* Empty string, disable driver override */
92                 spi->driver_override = NULL;
93                 kfree(driver_override);
94         }
95         device_unlock(dev);
96         kfree(old);
97
98         return count;
99 }
100
101 static ssize_t driver_override_show(struct device *dev,
102                                     struct device_attribute *a, char *buf)
103 {
104         const struct spi_device *spi = to_spi_device(dev);
105         ssize_t len;
106
107         device_lock(dev);
108         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
109         device_unlock(dev);
110         return len;
111 }
112 static DEVICE_ATTR_RW(driver_override);
113
114 #define SPI_STATISTICS_ATTRS(field, file)                               \
115 static ssize_t spi_controller_##field##_show(struct device *dev,        \
116                                              struct device_attribute *attr, \
117                                              char *buf)                 \
118 {                                                                       \
119         struct spi_controller *ctlr = container_of(dev,                 \
120                                          struct spi_controller, dev);   \
121         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
122 }                                                                       \
123 static struct device_attribute dev_attr_spi_controller_##field = {      \
124         .attr = { .name = file, .mode = 0444 },                         \
125         .show = spi_controller_##field##_show,                          \
126 };                                                                      \
127 static ssize_t spi_device_##field##_show(struct device *dev,            \
128                                          struct device_attribute *attr, \
129                                         char *buf)                      \
130 {                                                                       \
131         struct spi_device *spi = to_spi_device(dev);                    \
132         return spi_statistics_##field##_show(&spi->statistics, buf);    \
133 }                                                                       \
134 static struct device_attribute dev_attr_spi_device_##field = {          \
135         .attr = { .name = file, .mode = 0444 },                         \
136         .show = spi_device_##field##_show,                              \
137 }
138
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141                                             char *buf)                  \
142 {                                                                       \
143         unsigned long flags;                                            \
144         ssize_t len;                                                    \
145         spin_lock_irqsave(&stat->lock, flags);                          \
146         len = sysfs_emit(buf, format_string "\n", stat->field);         \
147         spin_unlock_irqrestore(&stat->lock, flags);                     \
148         return len;                                                     \
149 }                                                                       \
150 SPI_STATISTICS_ATTRS(name, file)
151
152 #define SPI_STATISTICS_SHOW(field, format_string)                       \
153         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
154                                  field, format_string)
155
156 SPI_STATISTICS_SHOW(messages, "%lu");
157 SPI_STATISTICS_SHOW(transfers, "%lu");
158 SPI_STATISTICS_SHOW(errors, "%lu");
159 SPI_STATISTICS_SHOW(timedout, "%lu");
160
161 SPI_STATISTICS_SHOW(spi_sync, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163 SPI_STATISTICS_SHOW(spi_async, "%lu");
164
165 SPI_STATISTICS_SHOW(bytes, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
170         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
171                                  "transfer_bytes_histo_" number,        \
172                                  transfer_bytes_histo[index],  "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
190
191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192
193 static struct attribute *spi_dev_attrs[] = {
194         &dev_attr_modalias.attr,
195         &dev_attr_driver_override.attr,
196         NULL,
197 };
198
199 static const struct attribute_group spi_dev_group = {
200         .attrs  = spi_dev_attrs,
201 };
202
203 static struct attribute *spi_device_statistics_attrs[] = {
204         &dev_attr_spi_device_messages.attr,
205         &dev_attr_spi_device_transfers.attr,
206         &dev_attr_spi_device_errors.attr,
207         &dev_attr_spi_device_timedout.attr,
208         &dev_attr_spi_device_spi_sync.attr,
209         &dev_attr_spi_device_spi_sync_immediate.attr,
210         &dev_attr_spi_device_spi_async.attr,
211         &dev_attr_spi_device_bytes.attr,
212         &dev_attr_spi_device_bytes_rx.attr,
213         &dev_attr_spi_device_bytes_tx.attr,
214         &dev_attr_spi_device_transfer_bytes_histo0.attr,
215         &dev_attr_spi_device_transfer_bytes_histo1.attr,
216         &dev_attr_spi_device_transfer_bytes_histo2.attr,
217         &dev_attr_spi_device_transfer_bytes_histo3.attr,
218         &dev_attr_spi_device_transfer_bytes_histo4.attr,
219         &dev_attr_spi_device_transfer_bytes_histo5.attr,
220         &dev_attr_spi_device_transfer_bytes_histo6.attr,
221         &dev_attr_spi_device_transfer_bytes_histo7.attr,
222         &dev_attr_spi_device_transfer_bytes_histo8.attr,
223         &dev_attr_spi_device_transfer_bytes_histo9.attr,
224         &dev_attr_spi_device_transfer_bytes_histo10.attr,
225         &dev_attr_spi_device_transfer_bytes_histo11.attr,
226         &dev_attr_spi_device_transfer_bytes_histo12.attr,
227         &dev_attr_spi_device_transfer_bytes_histo13.attr,
228         &dev_attr_spi_device_transfer_bytes_histo14.attr,
229         &dev_attr_spi_device_transfer_bytes_histo15.attr,
230         &dev_attr_spi_device_transfer_bytes_histo16.attr,
231         &dev_attr_spi_device_transfers_split_maxsize.attr,
232         NULL,
233 };
234
235 static const struct attribute_group spi_device_statistics_group = {
236         .name  = "statistics",
237         .attrs  = spi_device_statistics_attrs,
238 };
239
240 static const struct attribute_group *spi_dev_groups[] = {
241         &spi_dev_group,
242         &spi_device_statistics_group,
243         NULL,
244 };
245
246 static struct attribute *spi_controller_statistics_attrs[] = {
247         &dev_attr_spi_controller_messages.attr,
248         &dev_attr_spi_controller_transfers.attr,
249         &dev_attr_spi_controller_errors.attr,
250         &dev_attr_spi_controller_timedout.attr,
251         &dev_attr_spi_controller_spi_sync.attr,
252         &dev_attr_spi_controller_spi_sync_immediate.attr,
253         &dev_attr_spi_controller_spi_async.attr,
254         &dev_attr_spi_controller_bytes.attr,
255         &dev_attr_spi_controller_bytes_rx.attr,
256         &dev_attr_spi_controller_bytes_tx.attr,
257         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
258         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
259         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
260         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
261         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
262         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
263         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
264         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
265         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
266         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
267         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
268         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
269         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
270         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
271         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
272         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
273         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
274         &dev_attr_spi_controller_transfers_split_maxsize.attr,
275         NULL,
276 };
277
278 static const struct attribute_group spi_controller_statistics_group = {
279         .name  = "statistics",
280         .attrs  = spi_controller_statistics_attrs,
281 };
282
283 static const struct attribute_group *spi_master_groups[] = {
284         &spi_controller_statistics_group,
285         NULL,
286 };
287
288 static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289                                               struct spi_transfer *xfer,
290                                               struct spi_controller *ctlr)
291 {
292         unsigned long flags;
293         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
294
295         if (l2len < 0)
296                 l2len = 0;
297
298         spin_lock_irqsave(&stats->lock, flags);
299
300         stats->transfers++;
301         stats->transfer_bytes_histo[l2len]++;
302
303         stats->bytes += xfer->len;
304         if ((xfer->tx_buf) &&
305             (xfer->tx_buf != ctlr->dummy_tx))
306                 stats->bytes_tx += xfer->len;
307         if ((xfer->rx_buf) &&
308             (xfer->rx_buf != ctlr->dummy_rx))
309                 stats->bytes_rx += xfer->len;
310
311         spin_unlock_irqrestore(&stats->lock, flags);
312 }
313
314 /*
315  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316  * and the sysfs version makes coldplug work too.
317  */
318 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
319 {
320         while (id->name[0]) {
321                 if (!strcmp(name, id->name))
322                         return id;
323                 id++;
324         }
325         return NULL;
326 }
327
328 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
329 {
330         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
331
332         return spi_match_id(sdrv->id_table, sdev->modalias);
333 }
334 EXPORT_SYMBOL_GPL(spi_get_device_id);
335
336 static int spi_match_device(struct device *dev, struct device_driver *drv)
337 {
338         const struct spi_device *spi = to_spi_device(dev);
339         const struct spi_driver *sdrv = to_spi_driver(drv);
340
341         /* Check override first, and if set, only use the named driver */
342         if (spi->driver_override)
343                 return strcmp(spi->driver_override, drv->name) == 0;
344
345         /* Attempt an OF style match */
346         if (of_driver_match_device(dev, drv))
347                 return 1;
348
349         /* Then try ACPI */
350         if (acpi_driver_match_device(dev, drv))
351                 return 1;
352
353         if (sdrv->id_table)
354                 return !!spi_match_id(sdrv->id_table, spi->modalias);
355
356         return strcmp(spi->modalias, drv->name) == 0;
357 }
358
359 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
360 {
361         const struct spi_device         *spi = to_spi_device(dev);
362         int rc;
363
364         rc = acpi_device_uevent_modalias(dev, env);
365         if (rc != -ENODEV)
366                 return rc;
367
368         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
369 }
370
371 static int spi_probe(struct device *dev)
372 {
373         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
374         struct spi_device               *spi = to_spi_device(dev);
375         int ret;
376
377         ret = of_clk_set_defaults(dev->of_node, false);
378         if (ret)
379                 return ret;
380
381         if (dev->of_node) {
382                 spi->irq = of_irq_get(dev->of_node, 0);
383                 if (spi->irq == -EPROBE_DEFER)
384                         return -EPROBE_DEFER;
385                 if (spi->irq < 0)
386                         spi->irq = 0;
387         }
388
389         ret = dev_pm_domain_attach(dev, true);
390         if (ret)
391                 return ret;
392
393         if (sdrv->probe) {
394                 ret = sdrv->probe(spi);
395                 if (ret)
396                         dev_pm_domain_detach(dev, true);
397         }
398
399         return ret;
400 }
401
402 static void spi_remove(struct device *dev)
403 {
404         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
405
406         if (sdrv->remove)
407                 sdrv->remove(to_spi_device(dev));
408
409         dev_pm_domain_detach(dev, true);
410 }
411
412 static void spi_shutdown(struct device *dev)
413 {
414         if (dev->driver) {
415                 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
416
417                 if (sdrv->shutdown)
418                         sdrv->shutdown(to_spi_device(dev));
419         }
420 }
421
422 struct bus_type spi_bus_type = {
423         .name           = "spi",
424         .dev_groups     = spi_dev_groups,
425         .match          = spi_match_device,
426         .uevent         = spi_uevent,
427         .probe          = spi_probe,
428         .remove         = spi_remove,
429         .shutdown       = spi_shutdown,
430 };
431 EXPORT_SYMBOL_GPL(spi_bus_type);
432
433 /**
434  * __spi_register_driver - register a SPI driver
435  * @owner: owner module of the driver to register
436  * @sdrv: the driver to register
437  * Context: can sleep
438  *
439  * Return: zero on success, else a negative error code.
440  */
441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
442 {
443         sdrv->driver.owner = owner;
444         sdrv->driver.bus = &spi_bus_type;
445
446         /*
447          * For Really Good Reasons we use spi: modaliases not of:
448          * modaliases for DT so module autoloading won't work if we
449          * don't have a spi_device_id as well as a compatible string.
450          */
451         if (sdrv->driver.of_match_table) {
452                 const struct of_device_id *of_id;
453
454                 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
455                      of_id++) {
456                         const char *of_name;
457
458                         /* Strip off any vendor prefix */
459                         of_name = strnchr(of_id->compatible,
460                                           sizeof(of_id->compatible), ',');
461                         if (of_name)
462                                 of_name++;
463                         else
464                                 of_name = of_id->compatible;
465
466                         if (sdrv->id_table) {
467                                 const struct spi_device_id *spi_id;
468
469                                 spi_id = spi_match_id(sdrv->id_table, of_name);
470                                 if (spi_id)
471                                         continue;
472                         } else {
473                                 if (strcmp(sdrv->driver.name, of_name) == 0)
474                                         continue;
475                         }
476
477                         pr_warn("SPI driver %s has no spi_device_id for %s\n",
478                                 sdrv->driver.name, of_id->compatible);
479                 }
480         }
481
482         return driver_register(&sdrv->driver);
483 }
484 EXPORT_SYMBOL_GPL(__spi_register_driver);
485
486 /*-------------------------------------------------------------------------*/
487
488 /*
489  * SPI devices should normally not be created by SPI device drivers; that
490  * would make them board-specific.  Similarly with SPI controller drivers.
491  * Device registration normally goes into like arch/.../mach.../board-YYY.c
492  * with other readonly (flashable) information about mainboard devices.
493  */
494
495 struct boardinfo {
496         struct list_head        list;
497         struct spi_board_info   board_info;
498 };
499
500 static LIST_HEAD(board_list);
501 static LIST_HEAD(spi_controller_list);
502
503 /*
504  * Used to protect add/del operation for board_info list and
505  * spi_controller list, and their matching process also used
506  * to protect object of type struct idr.
507  */
508 static DEFINE_MUTEX(board_lock);
509
510 /**
511  * spi_alloc_device - Allocate a new SPI device
512  * @ctlr: Controller to which device is connected
513  * Context: can sleep
514  *
515  * Allows a driver to allocate and initialize a spi_device without
516  * registering it immediately.  This allows a driver to directly
517  * fill the spi_device with device parameters before calling
518  * spi_add_device() on it.
519  *
520  * Caller is responsible to call spi_add_device() on the returned
521  * spi_device structure to add it to the SPI controller.  If the caller
522  * needs to discard the spi_device without adding it, then it should
523  * call spi_dev_put() on it.
524  *
525  * Return: a pointer to the new device, or NULL.
526  */
527 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
528 {
529         struct spi_device       *spi;
530
531         if (!spi_controller_get(ctlr))
532                 return NULL;
533
534         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
535         if (!spi) {
536                 spi_controller_put(ctlr);
537                 return NULL;
538         }
539
540         spi->master = spi->controller = ctlr;
541         spi->dev.parent = &ctlr->dev;
542         spi->dev.bus = &spi_bus_type;
543         spi->dev.release = spidev_release;
544         spi->mode = ctlr->buswidth_override_bits;
545
546         spin_lock_init(&spi->statistics.lock);
547
548         device_initialize(&spi->dev);
549         return spi;
550 }
551 EXPORT_SYMBOL_GPL(spi_alloc_device);
552
553 static void spi_dev_set_name(struct spi_device *spi)
554 {
555         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
556
557         if (adev) {
558                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
559                 return;
560         }
561
562         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
563                      spi->chip_select);
564 }
565
566 static int spi_dev_check(struct device *dev, void *data)
567 {
568         struct spi_device *spi = to_spi_device(dev);
569         struct spi_device *new_spi = data;
570
571         if (spi->controller == new_spi->controller &&
572             spi->chip_select == new_spi->chip_select)
573                 return -EBUSY;
574         return 0;
575 }
576
577 static void spi_cleanup(struct spi_device *spi)
578 {
579         if (spi->controller->cleanup)
580                 spi->controller->cleanup(spi);
581 }
582
583 static int __spi_add_device(struct spi_device *spi)
584 {
585         struct spi_controller *ctlr = spi->controller;
586         struct device *dev = ctlr->dev.parent;
587         int status;
588
589         /*
590          * We need to make sure there's no other device with this
591          * chipselect **BEFORE** we call setup(), else we'll trash
592          * its configuration.
593          */
594         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
595         if (status) {
596                 dev_err(dev, "chipselect %d already in use\n",
597                                 spi->chip_select);
598                 return status;
599         }
600
601         /* Controller may unregister concurrently */
602         if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
603             !device_is_registered(&ctlr->dev)) {
604                 return -ENODEV;
605         }
606
607         if (ctlr->cs_gpiods)
608                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
609
610         /*
611          * Drivers may modify this initial i/o setup, but will
612          * normally rely on the device being setup.  Devices
613          * using SPI_CS_HIGH can't coexist well otherwise...
614          */
615         status = spi_setup(spi);
616         if (status < 0) {
617                 dev_err(dev, "can't setup %s, status %d\n",
618                                 dev_name(&spi->dev), status);
619                 return status;
620         }
621
622         /* Device may be bound to an active driver when this returns */
623         status = device_add(&spi->dev);
624         if (status < 0) {
625                 dev_err(dev, "can't add %s, status %d\n",
626                                 dev_name(&spi->dev), status);
627                 spi_cleanup(spi);
628         } else {
629                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
630         }
631
632         return status;
633 }
634
635 /**
636  * spi_add_device - Add spi_device allocated with spi_alloc_device
637  * @spi: spi_device to register
638  *
639  * Companion function to spi_alloc_device.  Devices allocated with
640  * spi_alloc_device can be added onto the spi bus with this function.
641  *
642  * Return: 0 on success; negative errno on failure
643  */
644 int spi_add_device(struct spi_device *spi)
645 {
646         struct spi_controller *ctlr = spi->controller;
647         struct device *dev = ctlr->dev.parent;
648         int status;
649
650         /* Chipselects are numbered 0..max; validate. */
651         if (spi->chip_select >= ctlr->num_chipselect) {
652                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
653                         ctlr->num_chipselect);
654                 return -EINVAL;
655         }
656
657         /* Set the bus ID string */
658         spi_dev_set_name(spi);
659
660         mutex_lock(&ctlr->add_lock);
661         status = __spi_add_device(spi);
662         mutex_unlock(&ctlr->add_lock);
663         return status;
664 }
665 EXPORT_SYMBOL_GPL(spi_add_device);
666
667 static int spi_add_device_locked(struct spi_device *spi)
668 {
669         struct spi_controller *ctlr = spi->controller;
670         struct device *dev = ctlr->dev.parent;
671
672         /* Chipselects are numbered 0..max; validate. */
673         if (spi->chip_select >= ctlr->num_chipselect) {
674                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
675                         ctlr->num_chipselect);
676                 return -EINVAL;
677         }
678
679         /* Set the bus ID string */
680         spi_dev_set_name(spi);
681
682         WARN_ON(!mutex_is_locked(&ctlr->add_lock));
683         return __spi_add_device(spi);
684 }
685
686 /**
687  * spi_new_device - instantiate one new SPI device
688  * @ctlr: Controller to which device is connected
689  * @chip: Describes the SPI device
690  * Context: can sleep
691  *
692  * On typical mainboards, this is purely internal; and it's not needed
693  * after board init creates the hard-wired devices.  Some development
694  * platforms may not be able to use spi_register_board_info though, and
695  * this is exported so that for example a USB or parport based adapter
696  * driver could add devices (which it would learn about out-of-band).
697  *
698  * Return: the new device, or NULL.
699  */
700 struct spi_device *spi_new_device(struct spi_controller *ctlr,
701                                   struct spi_board_info *chip)
702 {
703         struct spi_device       *proxy;
704         int                     status;
705
706         /*
707          * NOTE:  caller did any chip->bus_num checks necessary.
708          *
709          * Also, unless we change the return value convention to use
710          * error-or-pointer (not NULL-or-pointer), troubleshootability
711          * suggests syslogged diagnostics are best here (ugh).
712          */
713
714         proxy = spi_alloc_device(ctlr);
715         if (!proxy)
716                 return NULL;
717
718         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
719
720         proxy->chip_select = chip->chip_select;
721         proxy->max_speed_hz = chip->max_speed_hz;
722         proxy->mode = chip->mode;
723         proxy->irq = chip->irq;
724         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
725         proxy->dev.platform_data = (void *) chip->platform_data;
726         proxy->controller_data = chip->controller_data;
727         proxy->controller_state = NULL;
728
729         if (chip->swnode) {
730                 status = device_add_software_node(&proxy->dev, chip->swnode);
731                 if (status) {
732                         dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
733                                 chip->modalias, status);
734                         goto err_dev_put;
735                 }
736         }
737
738         status = spi_add_device(proxy);
739         if (status < 0)
740                 goto err_dev_put;
741
742         return proxy;
743
744 err_dev_put:
745         device_remove_software_node(&proxy->dev);
746         spi_dev_put(proxy);
747         return NULL;
748 }
749 EXPORT_SYMBOL_GPL(spi_new_device);
750
751 /**
752  * spi_unregister_device - unregister a single SPI device
753  * @spi: spi_device to unregister
754  *
755  * Start making the passed SPI device vanish. Normally this would be handled
756  * by spi_unregister_controller().
757  */
758 void spi_unregister_device(struct spi_device *spi)
759 {
760         if (!spi)
761                 return;
762
763         if (spi->dev.of_node) {
764                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
765                 of_node_put(spi->dev.of_node);
766         }
767         if (ACPI_COMPANION(&spi->dev))
768                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
769         device_remove_software_node(&spi->dev);
770         device_del(&spi->dev);
771         spi_cleanup(spi);
772         put_device(&spi->dev);
773 }
774 EXPORT_SYMBOL_GPL(spi_unregister_device);
775
776 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
777                                               struct spi_board_info *bi)
778 {
779         struct spi_device *dev;
780
781         if (ctlr->bus_num != bi->bus_num)
782                 return;
783
784         dev = spi_new_device(ctlr, bi);
785         if (!dev)
786                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
787                         bi->modalias);
788 }
789
790 /**
791  * spi_register_board_info - register SPI devices for a given board
792  * @info: array of chip descriptors
793  * @n: how many descriptors are provided
794  * Context: can sleep
795  *
796  * Board-specific early init code calls this (probably during arch_initcall)
797  * with segments of the SPI device table.  Any device nodes are created later,
798  * after the relevant parent SPI controller (bus_num) is defined.  We keep
799  * this table of devices forever, so that reloading a controller driver will
800  * not make Linux forget about these hard-wired devices.
801  *
802  * Other code can also call this, e.g. a particular add-on board might provide
803  * SPI devices through its expansion connector, so code initializing that board
804  * would naturally declare its SPI devices.
805  *
806  * The board info passed can safely be __initdata ... but be careful of
807  * any embedded pointers (platform_data, etc), they're copied as-is.
808  *
809  * Return: zero on success, else a negative error code.
810  */
811 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
812 {
813         struct boardinfo *bi;
814         int i;
815
816         if (!n)
817                 return 0;
818
819         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
820         if (!bi)
821                 return -ENOMEM;
822
823         for (i = 0; i < n; i++, bi++, info++) {
824                 struct spi_controller *ctlr;
825
826                 memcpy(&bi->board_info, info, sizeof(*info));
827
828                 mutex_lock(&board_lock);
829                 list_add_tail(&bi->list, &board_list);
830                 list_for_each_entry(ctlr, &spi_controller_list, list)
831                         spi_match_controller_to_boardinfo(ctlr,
832                                                           &bi->board_info);
833                 mutex_unlock(&board_lock);
834         }
835
836         return 0;
837 }
838
839 /*-------------------------------------------------------------------------*/
840
841 /* Core methods for SPI resource management */
842
843 /**
844  * spi_res_alloc - allocate a spi resource that is life-cycle managed
845  *                 during the processing of a spi_message while using
846  *                 spi_transfer_one
847  * @spi:     the spi device for which we allocate memory
848  * @release: the release code to execute for this resource
849  * @size:    size to alloc and return
850  * @gfp:     GFP allocation flags
851  *
852  * Return: the pointer to the allocated data
853  *
854  * This may get enhanced in the future to allocate from a memory pool
855  * of the @spi_device or @spi_controller to avoid repeated allocations.
856  */
857 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
858                            size_t size, gfp_t gfp)
859 {
860         struct spi_res *sres;
861
862         sres = kzalloc(sizeof(*sres) + size, gfp);
863         if (!sres)
864                 return NULL;
865
866         INIT_LIST_HEAD(&sres->entry);
867         sres->release = release;
868
869         return sres->data;
870 }
871
872 /**
873  * spi_res_free - free an spi resource
874  * @res: pointer to the custom data of a resource
875  */
876 static void spi_res_free(void *res)
877 {
878         struct spi_res *sres = container_of(res, struct spi_res, data);
879
880         if (!res)
881                 return;
882
883         WARN_ON(!list_empty(&sres->entry));
884         kfree(sres);
885 }
886
887 /**
888  * spi_res_add - add a spi_res to the spi_message
889  * @message: the spi message
890  * @res:     the spi_resource
891  */
892 static void spi_res_add(struct spi_message *message, void *res)
893 {
894         struct spi_res *sres = container_of(res, struct spi_res, data);
895
896         WARN_ON(!list_empty(&sres->entry));
897         list_add_tail(&sres->entry, &message->resources);
898 }
899
900 /**
901  * spi_res_release - release all spi resources for this message
902  * @ctlr:  the @spi_controller
903  * @message: the @spi_message
904  */
905 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
906 {
907         struct spi_res *res, *tmp;
908
909         list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
910                 if (res->release)
911                         res->release(ctlr, message, res->data);
912
913                 list_del(&res->entry);
914
915                 kfree(res);
916         }
917 }
918
919 /*-------------------------------------------------------------------------*/
920
921 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
922 {
923         bool activate = enable;
924
925         /*
926          * Avoid calling into the driver (or doing delays) if the chip select
927          * isn't actually changing from the last time this was called.
928          */
929         if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
930                                 (!enable && spi->controller->last_cs != spi->chip_select)) &&
931             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
932                 return;
933
934         trace_spi_set_cs(spi, activate);
935
936         spi->controller->last_cs = enable ? spi->chip_select : -1;
937         spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
938
939         if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
940                 spi_delay_exec(&spi->cs_hold, NULL);
941         }
942
943         if (spi->mode & SPI_CS_HIGH)
944                 enable = !enable;
945
946         if (spi->cs_gpiod) {
947                 if (!(spi->mode & SPI_NO_CS)) {
948                         /*
949                          * Historically ACPI has no means of the GPIO polarity and
950                          * thus the SPISerialBus() resource defines it on the per-chip
951                          * basis. In order to avoid a chain of negations, the GPIO
952                          * polarity is considered being Active High. Even for the cases
953                          * when _DSD() is involved (in the updated versions of ACPI)
954                          * the GPIO CS polarity must be defined Active High to avoid
955                          * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
956                          * into account.
957                          */
958                         if (has_acpi_companion(&spi->dev))
959                                 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
960                         else
961                                 /* Polarity handled by GPIO library */
962                                 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
963                 }
964                 /* Some SPI masters need both GPIO CS & slave_select */
965                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
966                     spi->controller->set_cs)
967                         spi->controller->set_cs(spi, !enable);
968         } else if (spi->controller->set_cs) {
969                 spi->controller->set_cs(spi, !enable);
970         }
971
972         if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
973                 if (activate)
974                         spi_delay_exec(&spi->cs_setup, NULL);
975                 else
976                         spi_delay_exec(&spi->cs_inactive, NULL);
977         }
978 }
979
980 #ifdef CONFIG_HAS_DMA
981 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
982                 struct sg_table *sgt, void *buf, size_t len,
983                 enum dma_data_direction dir)
984 {
985         const bool vmalloced_buf = is_vmalloc_addr(buf);
986         unsigned int max_seg_size = dma_get_max_seg_size(dev);
987 #ifdef CONFIG_HIGHMEM
988         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
989                                 (unsigned long)buf < (PKMAP_BASE +
990                                         (LAST_PKMAP * PAGE_SIZE)));
991 #else
992         const bool kmap_buf = false;
993 #endif
994         int desc_len;
995         int sgs;
996         struct page *vm_page;
997         struct scatterlist *sg;
998         void *sg_buf;
999         size_t min;
1000         int i, ret;
1001
1002         if (vmalloced_buf || kmap_buf) {
1003                 desc_len = min_t(unsigned int, max_seg_size, PAGE_SIZE);
1004                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1005         } else if (virt_addr_valid(buf)) {
1006                 desc_len = min_t(unsigned int, max_seg_size, ctlr->max_dma_len);
1007                 sgs = DIV_ROUND_UP(len, desc_len);
1008         } else {
1009                 return -EINVAL;
1010         }
1011
1012         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1013         if (ret != 0)
1014                 return ret;
1015
1016         sg = &sgt->sgl[0];
1017         for (i = 0; i < sgs; i++) {
1018
1019                 if (vmalloced_buf || kmap_buf) {
1020                         /*
1021                          * Next scatterlist entry size is the minimum between
1022                          * the desc_len and the remaining buffer length that
1023                          * fits in a page.
1024                          */
1025                         min = min_t(size_t, desc_len,
1026                                     min_t(size_t, len,
1027                                           PAGE_SIZE - offset_in_page(buf)));
1028                         if (vmalloced_buf)
1029                                 vm_page = vmalloc_to_page(buf);
1030                         else
1031                                 vm_page = kmap_to_page(buf);
1032                         if (!vm_page) {
1033                                 sg_free_table(sgt);
1034                                 return -ENOMEM;
1035                         }
1036                         sg_set_page(sg, vm_page,
1037                                     min, offset_in_page(buf));
1038                 } else {
1039                         min = min_t(size_t, len, desc_len);
1040                         sg_buf = buf;
1041                         sg_set_buf(sg, sg_buf, min);
1042                 }
1043
1044                 buf += min;
1045                 len -= min;
1046                 sg = sg_next(sg);
1047         }
1048
1049         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
1050         if (!ret)
1051                 ret = -ENOMEM;
1052         if (ret < 0) {
1053                 sg_free_table(sgt);
1054                 return ret;
1055         }
1056
1057         sgt->nents = ret;
1058
1059         return 0;
1060 }
1061
1062 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1063                    struct sg_table *sgt, enum dma_data_direction dir)
1064 {
1065         if (sgt->orig_nents) {
1066                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1067                 sg_free_table(sgt);
1068         }
1069 }
1070
1071 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1072 {
1073         struct device *tx_dev, *rx_dev;
1074         struct spi_transfer *xfer;
1075         int ret;
1076
1077         if (!ctlr->can_dma)
1078                 return 0;
1079
1080         if (ctlr->dma_tx)
1081                 tx_dev = ctlr->dma_tx->device->dev;
1082         else if (ctlr->dma_map_dev)
1083                 tx_dev = ctlr->dma_map_dev;
1084         else
1085                 tx_dev = ctlr->dev.parent;
1086
1087         if (ctlr->dma_rx)
1088                 rx_dev = ctlr->dma_rx->device->dev;
1089         else if (ctlr->dma_map_dev)
1090                 rx_dev = ctlr->dma_map_dev;
1091         else
1092                 rx_dev = ctlr->dev.parent;
1093
1094         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1095                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1096                         continue;
1097
1098                 if (xfer->tx_buf != NULL) {
1099                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1100                                           (void *)xfer->tx_buf, xfer->len,
1101                                           DMA_TO_DEVICE);
1102                         if (ret != 0)
1103                                 return ret;
1104                 }
1105
1106                 if (xfer->rx_buf != NULL) {
1107                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1108                                           xfer->rx_buf, xfer->len,
1109                                           DMA_FROM_DEVICE);
1110                         if (ret != 0) {
1111                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1112                                               DMA_TO_DEVICE);
1113                                 return ret;
1114                         }
1115                 }
1116         }
1117
1118         ctlr->cur_msg_mapped = true;
1119
1120         return 0;
1121 }
1122
1123 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1124 {
1125         struct spi_transfer *xfer;
1126         struct device *tx_dev, *rx_dev;
1127
1128         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1129                 return 0;
1130
1131         if (ctlr->dma_tx)
1132                 tx_dev = ctlr->dma_tx->device->dev;
1133         else
1134                 tx_dev = ctlr->dev.parent;
1135
1136         if (ctlr->dma_rx)
1137                 rx_dev = ctlr->dma_rx->device->dev;
1138         else
1139                 rx_dev = ctlr->dev.parent;
1140
1141         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1142                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1143                         continue;
1144
1145                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1146                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1147         }
1148
1149         ctlr->cur_msg_mapped = false;
1150
1151         return 0;
1152 }
1153 #else /* !CONFIG_HAS_DMA */
1154 static inline int __spi_map_msg(struct spi_controller *ctlr,
1155                                 struct spi_message *msg)
1156 {
1157         return 0;
1158 }
1159
1160 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1161                                   struct spi_message *msg)
1162 {
1163         return 0;
1164 }
1165 #endif /* !CONFIG_HAS_DMA */
1166
1167 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1168                                 struct spi_message *msg)
1169 {
1170         struct spi_transfer *xfer;
1171
1172         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1173                 /*
1174                  * Restore the original value of tx_buf or rx_buf if they are
1175                  * NULL.
1176                  */
1177                 if (xfer->tx_buf == ctlr->dummy_tx)
1178                         xfer->tx_buf = NULL;
1179                 if (xfer->rx_buf == ctlr->dummy_rx)
1180                         xfer->rx_buf = NULL;
1181         }
1182
1183         return __spi_unmap_msg(ctlr, msg);
1184 }
1185
1186 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1187 {
1188         struct spi_transfer *xfer;
1189         void *tmp;
1190         unsigned int max_tx, max_rx;
1191
1192         if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1193                 && !(msg->spi->mode & SPI_3WIRE)) {
1194                 max_tx = 0;
1195                 max_rx = 0;
1196
1197                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1198                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1199                             !xfer->tx_buf)
1200                                 max_tx = max(xfer->len, max_tx);
1201                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1202                             !xfer->rx_buf)
1203                                 max_rx = max(xfer->len, max_rx);
1204                 }
1205
1206                 if (max_tx) {
1207                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1208                                        GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1209                         if (!tmp)
1210                                 return -ENOMEM;
1211                         ctlr->dummy_tx = tmp;
1212                 }
1213
1214                 if (max_rx) {
1215                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1216                                        GFP_KERNEL | GFP_DMA);
1217                         if (!tmp)
1218                                 return -ENOMEM;
1219                         ctlr->dummy_rx = tmp;
1220                 }
1221
1222                 if (max_tx || max_rx) {
1223                         list_for_each_entry(xfer, &msg->transfers,
1224                                             transfer_list) {
1225                                 if (!xfer->len)
1226                                         continue;
1227                                 if (!xfer->tx_buf)
1228                                         xfer->tx_buf = ctlr->dummy_tx;
1229                                 if (!xfer->rx_buf)
1230                                         xfer->rx_buf = ctlr->dummy_rx;
1231                         }
1232                 }
1233         }
1234
1235         return __spi_map_msg(ctlr, msg);
1236 }
1237
1238 static int spi_transfer_wait(struct spi_controller *ctlr,
1239                              struct spi_message *msg,
1240                              struct spi_transfer *xfer)
1241 {
1242         struct spi_statistics *statm = &ctlr->statistics;
1243         struct spi_statistics *stats = &msg->spi->statistics;
1244         u32 speed_hz = xfer->speed_hz;
1245         unsigned long long ms;
1246
1247         if (spi_controller_is_slave(ctlr)) {
1248                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1249                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1250                         return -EINTR;
1251                 }
1252         } else {
1253                 if (!speed_hz)
1254                         speed_hz = 100000;
1255
1256                 /*
1257                  * For each byte we wait for 8 cycles of the SPI clock.
1258                  * Since speed is defined in Hz and we want milliseconds,
1259                  * use respective multiplier, but before the division,
1260                  * otherwise we may get 0 for short transfers.
1261                  */
1262                 ms = 8LL * MSEC_PER_SEC * xfer->len;
1263                 do_div(ms, speed_hz);
1264
1265                 /*
1266                  * Increase it twice and add 200 ms tolerance, use
1267                  * predefined maximum in case of overflow.
1268                  */
1269                 ms += ms + 200;
1270                 if (ms > UINT_MAX)
1271                         ms = UINT_MAX;
1272
1273                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1274                                                  msecs_to_jiffies(ms));
1275
1276                 if (ms == 0) {
1277                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1278                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1279                         dev_err(&msg->spi->dev,
1280                                 "SPI transfer timed out\n");
1281                         return -ETIMEDOUT;
1282                 }
1283         }
1284
1285         return 0;
1286 }
1287
1288 static void _spi_transfer_delay_ns(u32 ns)
1289 {
1290         if (!ns)
1291                 return;
1292         if (ns <= NSEC_PER_USEC) {
1293                 ndelay(ns);
1294         } else {
1295                 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1296
1297                 if (us <= 10)
1298                         udelay(us);
1299                 else
1300                         usleep_range(us, us + DIV_ROUND_UP(us, 10));
1301         }
1302 }
1303
1304 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1305 {
1306         u32 delay = _delay->value;
1307         u32 unit = _delay->unit;
1308         u32 hz;
1309
1310         if (!delay)
1311                 return 0;
1312
1313         switch (unit) {
1314         case SPI_DELAY_UNIT_USECS:
1315                 delay *= NSEC_PER_USEC;
1316                 break;
1317         case SPI_DELAY_UNIT_NSECS:
1318                 /* Nothing to do here */
1319                 break;
1320         case SPI_DELAY_UNIT_SCK:
1321                 /* clock cycles need to be obtained from spi_transfer */
1322                 if (!xfer)
1323                         return -EINVAL;
1324                 /*
1325                  * If there is unknown effective speed, approximate it
1326                  * by underestimating with half of the requested hz.
1327                  */
1328                 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1329                 if (!hz)
1330                         return -EINVAL;
1331
1332                 /* Convert delay to nanoseconds */
1333                 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1334                 break;
1335         default:
1336                 return -EINVAL;
1337         }
1338
1339         return delay;
1340 }
1341 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1342
1343 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1344 {
1345         int delay;
1346
1347         might_sleep();
1348
1349         if (!_delay)
1350                 return -EINVAL;
1351
1352         delay = spi_delay_to_ns(_delay, xfer);
1353         if (delay < 0)
1354                 return delay;
1355
1356         _spi_transfer_delay_ns(delay);
1357
1358         return 0;
1359 }
1360 EXPORT_SYMBOL_GPL(spi_delay_exec);
1361
1362 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1363                                           struct spi_transfer *xfer)
1364 {
1365         u32 default_delay_ns = 10 * NSEC_PER_USEC;
1366         u32 delay = xfer->cs_change_delay.value;
1367         u32 unit = xfer->cs_change_delay.unit;
1368         int ret;
1369
1370         /* return early on "fast" mode - for everything but USECS */
1371         if (!delay) {
1372                 if (unit == SPI_DELAY_UNIT_USECS)
1373                         _spi_transfer_delay_ns(default_delay_ns);
1374                 return;
1375         }
1376
1377         ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1378         if (ret) {
1379                 dev_err_once(&msg->spi->dev,
1380                              "Use of unsupported delay unit %i, using default of %luus\n",
1381                              unit, default_delay_ns / NSEC_PER_USEC);
1382                 _spi_transfer_delay_ns(default_delay_ns);
1383         }
1384 }
1385
1386 /*
1387  * spi_transfer_one_message - Default implementation of transfer_one_message()
1388  *
1389  * This is a standard implementation of transfer_one_message() for
1390  * drivers which implement a transfer_one() operation.  It provides
1391  * standard handling of delays and chip select management.
1392  */
1393 static int spi_transfer_one_message(struct spi_controller *ctlr,
1394                                     struct spi_message *msg)
1395 {
1396         struct spi_transfer *xfer;
1397         bool keep_cs = false;
1398         int ret = 0;
1399         struct spi_statistics *statm = &ctlr->statistics;
1400         struct spi_statistics *stats = &msg->spi->statistics;
1401
1402         spi_set_cs(msg->spi, true, false);
1403
1404         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1405         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1406
1407         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1408                 trace_spi_transfer_start(msg, xfer);
1409
1410                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1411                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1412
1413                 if (!ctlr->ptp_sts_supported) {
1414                         xfer->ptp_sts_word_pre = 0;
1415                         ptp_read_system_prets(xfer->ptp_sts);
1416                 }
1417
1418                 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1419                         reinit_completion(&ctlr->xfer_completion);
1420
1421 fallback_pio:
1422                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1423                         if (ret < 0) {
1424                                 if (ctlr->cur_msg_mapped &&
1425                                    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1426                                         __spi_unmap_msg(ctlr, msg);
1427                                         ctlr->fallback = true;
1428                                         xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1429                                         goto fallback_pio;
1430                                 }
1431
1432                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1433                                                                errors);
1434                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1435                                                                errors);
1436                                 dev_err(&msg->spi->dev,
1437                                         "SPI transfer failed: %d\n", ret);
1438                                 goto out;
1439                         }
1440
1441                         if (ret > 0) {
1442                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1443                                 if (ret < 0)
1444                                         msg->status = ret;
1445                         }
1446                 } else {
1447                         if (xfer->len)
1448                                 dev_err(&msg->spi->dev,
1449                                         "Bufferless transfer has length %u\n",
1450                                         xfer->len);
1451                 }
1452
1453                 if (!ctlr->ptp_sts_supported) {
1454                         ptp_read_system_postts(xfer->ptp_sts);
1455                         xfer->ptp_sts_word_post = xfer->len;
1456                 }
1457
1458                 trace_spi_transfer_stop(msg, xfer);
1459
1460                 if (msg->status != -EINPROGRESS)
1461                         goto out;
1462
1463                 spi_transfer_delay_exec(xfer);
1464
1465                 if (xfer->cs_change) {
1466                         if (list_is_last(&xfer->transfer_list,
1467                                          &msg->transfers)) {
1468                                 keep_cs = true;
1469                         } else {
1470                                 spi_set_cs(msg->spi, false, false);
1471                                 _spi_transfer_cs_change_delay(msg, xfer);
1472                                 spi_set_cs(msg->spi, true, false);
1473                         }
1474                 }
1475
1476                 msg->actual_length += xfer->len;
1477         }
1478
1479 out:
1480         if (ret != 0 || !keep_cs)
1481                 spi_set_cs(msg->spi, false, false);
1482
1483         if (msg->status == -EINPROGRESS)
1484                 msg->status = ret;
1485
1486         if (msg->status && ctlr->handle_err)
1487                 ctlr->handle_err(ctlr, msg);
1488
1489         spi_finalize_current_message(ctlr);
1490
1491         return ret;
1492 }
1493
1494 /**
1495  * spi_finalize_current_transfer - report completion of a transfer
1496  * @ctlr: the controller reporting completion
1497  *
1498  * Called by SPI drivers using the core transfer_one_message()
1499  * implementation to notify it that the current interrupt driven
1500  * transfer has finished and the next one may be scheduled.
1501  */
1502 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1503 {
1504         complete(&ctlr->xfer_completion);
1505 }
1506 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1507
1508 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1509 {
1510         if (ctlr->auto_runtime_pm) {
1511                 pm_runtime_mark_last_busy(ctlr->dev.parent);
1512                 pm_runtime_put_autosuspend(ctlr->dev.parent);
1513         }
1514 }
1515
1516 /**
1517  * __spi_pump_messages - function which processes spi message queue
1518  * @ctlr: controller to process queue for
1519  * @in_kthread: true if we are in the context of the message pump thread
1520  *
1521  * This function checks if there is any spi message in the queue that
1522  * needs processing and if so call out to the driver to initialize hardware
1523  * and transfer each message.
1524  *
1525  * Note that it is called both from the kthread itself and also from
1526  * inside spi_sync(); the queue extraction handling at the top of the
1527  * function should deal with this safely.
1528  */
1529 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1530 {
1531         struct spi_transfer *xfer;
1532         struct spi_message *msg;
1533         bool was_busy = false;
1534         unsigned long flags;
1535         int ret;
1536
1537         /* Lock queue */
1538         spin_lock_irqsave(&ctlr->queue_lock, flags);
1539
1540         /* Make sure we are not already running a message */
1541         if (ctlr->cur_msg) {
1542                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1543                 return;
1544         }
1545
1546         /* If another context is idling the device then defer */
1547         if (ctlr->idling) {
1548                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1549                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1550                 return;
1551         }
1552
1553         /* Check if the queue is idle */
1554         if (list_empty(&ctlr->queue) || !ctlr->running) {
1555                 if (!ctlr->busy) {
1556                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1557                         return;
1558                 }
1559
1560                 /* Defer any non-atomic teardown to the thread */
1561                 if (!in_kthread) {
1562                         if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1563                             !ctlr->unprepare_transfer_hardware) {
1564                                 spi_idle_runtime_pm(ctlr);
1565                                 ctlr->busy = false;
1566                                 trace_spi_controller_idle(ctlr);
1567                         } else {
1568                                 kthread_queue_work(ctlr->kworker,
1569                                                    &ctlr->pump_messages);
1570                         }
1571                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1572                         return;
1573                 }
1574
1575                 ctlr->busy = false;
1576                 ctlr->idling = true;
1577                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1578
1579                 kfree(ctlr->dummy_rx);
1580                 ctlr->dummy_rx = NULL;
1581                 kfree(ctlr->dummy_tx);
1582                 ctlr->dummy_tx = NULL;
1583                 if (ctlr->unprepare_transfer_hardware &&
1584                     ctlr->unprepare_transfer_hardware(ctlr))
1585                         dev_err(&ctlr->dev,
1586                                 "failed to unprepare transfer hardware\n");
1587                 spi_idle_runtime_pm(ctlr);
1588                 trace_spi_controller_idle(ctlr);
1589
1590                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1591                 ctlr->idling = false;
1592                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1593                 return;
1594         }
1595
1596         /* Extract head of queue */
1597         msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1598         ctlr->cur_msg = msg;
1599
1600         list_del_init(&msg->queue);
1601         if (ctlr->busy)
1602                 was_busy = true;
1603         else
1604                 ctlr->busy = true;
1605         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1606
1607         mutex_lock(&ctlr->io_mutex);
1608
1609         if (!was_busy && ctlr->auto_runtime_pm) {
1610                 ret = pm_runtime_get_sync(ctlr->dev.parent);
1611                 if (ret < 0) {
1612                         pm_runtime_put_noidle(ctlr->dev.parent);
1613                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1614                                 ret);
1615                         mutex_unlock(&ctlr->io_mutex);
1616                         return;
1617                 }
1618         }
1619
1620         if (!was_busy)
1621                 trace_spi_controller_busy(ctlr);
1622
1623         if (!was_busy && ctlr->prepare_transfer_hardware) {
1624                 ret = ctlr->prepare_transfer_hardware(ctlr);
1625                 if (ret) {
1626                         dev_err(&ctlr->dev,
1627                                 "failed to prepare transfer hardware: %d\n",
1628                                 ret);
1629
1630                         if (ctlr->auto_runtime_pm)
1631                                 pm_runtime_put(ctlr->dev.parent);
1632
1633                         msg->status = ret;
1634                         spi_finalize_current_message(ctlr);
1635
1636                         mutex_unlock(&ctlr->io_mutex);
1637                         return;
1638                 }
1639         }
1640
1641         trace_spi_message_start(msg);
1642
1643         if (ctlr->prepare_message) {
1644                 ret = ctlr->prepare_message(ctlr, msg);
1645                 if (ret) {
1646                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1647                                 ret);
1648                         msg->status = ret;
1649                         spi_finalize_current_message(ctlr);
1650                         goto out;
1651                 }
1652                 ctlr->cur_msg_prepared = true;
1653         }
1654
1655         ret = spi_map_msg(ctlr, msg);
1656         if (ret) {
1657                 msg->status = ret;
1658                 spi_finalize_current_message(ctlr);
1659                 goto out;
1660         }
1661
1662         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1663                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1664                         xfer->ptp_sts_word_pre = 0;
1665                         ptp_read_system_prets(xfer->ptp_sts);
1666                 }
1667         }
1668
1669         ret = ctlr->transfer_one_message(ctlr, msg);
1670         if (ret) {
1671                 dev_err(&ctlr->dev,
1672                         "failed to transfer one message from queue\n");
1673                 goto out;
1674         }
1675
1676 out:
1677         mutex_unlock(&ctlr->io_mutex);
1678
1679         /* Prod the scheduler in case transfer_one() was busy waiting */
1680         if (!ret)
1681                 cond_resched();
1682 }
1683
1684 /**
1685  * spi_pump_messages - kthread work function which processes spi message queue
1686  * @work: pointer to kthread work struct contained in the controller struct
1687  */
1688 static void spi_pump_messages(struct kthread_work *work)
1689 {
1690         struct spi_controller *ctlr =
1691                 container_of(work, struct spi_controller, pump_messages);
1692
1693         __spi_pump_messages(ctlr, true);
1694 }
1695
1696 /**
1697  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1698  * @ctlr: Pointer to the spi_controller structure of the driver
1699  * @xfer: Pointer to the transfer being timestamped
1700  * @progress: How many words (not bytes) have been transferred so far
1701  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1702  *            transfer, for less jitter in time measurement. Only compatible
1703  *            with PIO drivers. If true, must follow up with
1704  *            spi_take_timestamp_post or otherwise system will crash.
1705  *            WARNING: for fully predictable results, the CPU frequency must
1706  *            also be under control (governor).
1707  *
1708  * This is a helper for drivers to collect the beginning of the TX timestamp
1709  * for the requested byte from the SPI transfer. The frequency with which this
1710  * function must be called (once per word, once for the whole transfer, once
1711  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1712  * greater than or equal to the requested byte at the time of the call. The
1713  * timestamp is only taken once, at the first such call. It is assumed that
1714  * the driver advances its @tx buffer pointer monotonically.
1715  */
1716 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1717                             struct spi_transfer *xfer,
1718                             size_t progress, bool irqs_off)
1719 {
1720         if (!xfer->ptp_sts)
1721                 return;
1722
1723         if (xfer->timestamped)
1724                 return;
1725
1726         if (progress > xfer->ptp_sts_word_pre)
1727                 return;
1728
1729         /* Capture the resolution of the timestamp */
1730         xfer->ptp_sts_word_pre = progress;
1731
1732         if (irqs_off) {
1733                 local_irq_save(ctlr->irq_flags);
1734                 preempt_disable();
1735         }
1736
1737         ptp_read_system_prets(xfer->ptp_sts);
1738 }
1739 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1740
1741 /**
1742  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1743  * @ctlr: Pointer to the spi_controller structure of the driver
1744  * @xfer: Pointer to the transfer being timestamped
1745  * @progress: How many words (not bytes) have been transferred so far
1746  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1747  *
1748  * This is a helper for drivers to collect the end of the TX timestamp for
1749  * the requested byte from the SPI transfer. Can be called with an arbitrary
1750  * frequency: only the first call where @tx exceeds or is equal to the
1751  * requested word will be timestamped.
1752  */
1753 void spi_take_timestamp_post(struct spi_controller *ctlr,
1754                              struct spi_transfer *xfer,
1755                              size_t progress, bool irqs_off)
1756 {
1757         if (!xfer->ptp_sts)
1758                 return;
1759
1760         if (xfer->timestamped)
1761                 return;
1762
1763         if (progress < xfer->ptp_sts_word_post)
1764                 return;
1765
1766         ptp_read_system_postts(xfer->ptp_sts);
1767
1768         if (irqs_off) {
1769                 local_irq_restore(ctlr->irq_flags);
1770                 preempt_enable();
1771         }
1772
1773         /* Capture the resolution of the timestamp */
1774         xfer->ptp_sts_word_post = progress;
1775
1776         xfer->timestamped = true;
1777 }
1778 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1779
1780 /**
1781  * spi_set_thread_rt - set the controller to pump at realtime priority
1782  * @ctlr: controller to boost priority of
1783  *
1784  * This can be called because the controller requested realtime priority
1785  * (by setting the ->rt value before calling spi_register_controller()) or
1786  * because a device on the bus said that its transfers needed realtime
1787  * priority.
1788  *
1789  * NOTE: at the moment if any device on a bus says it needs realtime then
1790  * the thread will be at realtime priority for all transfers on that
1791  * controller.  If this eventually becomes a problem we may see if we can
1792  * find a way to boost the priority only temporarily during relevant
1793  * transfers.
1794  */
1795 static void spi_set_thread_rt(struct spi_controller *ctlr)
1796 {
1797         dev_info(&ctlr->dev,
1798                 "will run message pump with realtime priority\n");
1799         sched_set_fifo(ctlr->kworker->task);
1800 }
1801
1802 static int spi_init_queue(struct spi_controller *ctlr)
1803 {
1804         ctlr->running = false;
1805         ctlr->busy = false;
1806
1807         ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1808         if (IS_ERR(ctlr->kworker)) {
1809                 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1810                 return PTR_ERR(ctlr->kworker);
1811         }
1812
1813         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1814
1815         /*
1816          * Controller config will indicate if this controller should run the
1817          * message pump with high (realtime) priority to reduce the transfer
1818          * latency on the bus by minimising the delay between a transfer
1819          * request and the scheduling of the message pump thread. Without this
1820          * setting the message pump thread will remain at default priority.
1821          */
1822         if (ctlr->rt)
1823                 spi_set_thread_rt(ctlr);
1824
1825         return 0;
1826 }
1827
1828 /**
1829  * spi_get_next_queued_message() - called by driver to check for queued
1830  * messages
1831  * @ctlr: the controller to check for queued messages
1832  *
1833  * If there are more messages in the queue, the next message is returned from
1834  * this call.
1835  *
1836  * Return: the next message in the queue, else NULL if the queue is empty.
1837  */
1838 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1839 {
1840         struct spi_message *next;
1841         unsigned long flags;
1842
1843         /* get a pointer to the next message, if any */
1844         spin_lock_irqsave(&ctlr->queue_lock, flags);
1845         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1846                                         queue);
1847         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1848
1849         return next;
1850 }
1851 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1852
1853 /**
1854  * spi_finalize_current_message() - the current message is complete
1855  * @ctlr: the controller to return the message to
1856  *
1857  * Called by the driver to notify the core that the message in the front of the
1858  * queue is complete and can be removed from the queue.
1859  */
1860 void spi_finalize_current_message(struct spi_controller *ctlr)
1861 {
1862         struct spi_transfer *xfer;
1863         struct spi_message *mesg;
1864         unsigned long flags;
1865         int ret;
1866
1867         spin_lock_irqsave(&ctlr->queue_lock, flags);
1868         mesg = ctlr->cur_msg;
1869         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1870
1871         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1872                 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1873                         ptp_read_system_postts(xfer->ptp_sts);
1874                         xfer->ptp_sts_word_post = xfer->len;
1875                 }
1876         }
1877
1878         if (unlikely(ctlr->ptp_sts_supported))
1879                 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1880                         WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1881
1882         spi_unmap_msg(ctlr, mesg);
1883
1884         /*
1885          * In the prepare_messages callback the SPI bus has the opportunity
1886          * to split a transfer to smaller chunks.
1887          *
1888          * Release the split transfers here since spi_map_msg() is done on
1889          * the split transfers.
1890          */
1891         spi_res_release(ctlr, mesg);
1892
1893         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1894                 ret = ctlr->unprepare_message(ctlr, mesg);
1895                 if (ret) {
1896                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1897                                 ret);
1898                 }
1899         }
1900
1901         spin_lock_irqsave(&ctlr->queue_lock, flags);
1902         ctlr->cur_msg = NULL;
1903         ctlr->cur_msg_prepared = false;
1904         ctlr->fallback = false;
1905         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1906         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1907
1908         trace_spi_message_done(mesg);
1909
1910         mesg->state = NULL;
1911         if (mesg->complete)
1912                 mesg->complete(mesg->context);
1913 }
1914 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1915
1916 static int spi_start_queue(struct spi_controller *ctlr)
1917 {
1918         unsigned long flags;
1919
1920         spin_lock_irqsave(&ctlr->queue_lock, flags);
1921
1922         if (ctlr->running || ctlr->busy) {
1923                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1924                 return -EBUSY;
1925         }
1926
1927         ctlr->running = true;
1928         ctlr->cur_msg = NULL;
1929         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1930
1931         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1932
1933         return 0;
1934 }
1935
1936 static int spi_stop_queue(struct spi_controller *ctlr)
1937 {
1938         unsigned long flags;
1939         unsigned limit = 500;
1940         int ret = 0;
1941
1942         spin_lock_irqsave(&ctlr->queue_lock, flags);
1943
1944         /*
1945          * This is a bit lame, but is optimized for the common execution path.
1946          * A wait_queue on the ctlr->busy could be used, but then the common
1947          * execution path (pump_messages) would be required to call wake_up or
1948          * friends on every SPI message. Do this instead.
1949          */
1950         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1951                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1952                 usleep_range(10000, 11000);
1953                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1954         }
1955
1956         if (!list_empty(&ctlr->queue) || ctlr->busy)
1957                 ret = -EBUSY;
1958         else
1959                 ctlr->running = false;
1960
1961         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1962
1963         if (ret) {
1964                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1965                 return ret;
1966         }
1967         return ret;
1968 }
1969
1970 static int spi_destroy_queue(struct spi_controller *ctlr)
1971 {
1972         int ret;
1973
1974         ret = spi_stop_queue(ctlr);
1975
1976         /*
1977          * kthread_flush_worker will block until all work is done.
1978          * If the reason that stop_queue timed out is that the work will never
1979          * finish, then it does no good to call flush/stop thread, so
1980          * return anyway.
1981          */
1982         if (ret) {
1983                 dev_err(&ctlr->dev, "problem destroying queue\n");
1984                 return ret;
1985         }
1986
1987         kthread_destroy_worker(ctlr->kworker);
1988
1989         return 0;
1990 }
1991
1992 static int __spi_queued_transfer(struct spi_device *spi,
1993                                  struct spi_message *msg,
1994                                  bool need_pump)
1995 {
1996         struct spi_controller *ctlr = spi->controller;
1997         unsigned long flags;
1998
1999         spin_lock_irqsave(&ctlr->queue_lock, flags);
2000
2001         if (!ctlr->running) {
2002                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2003                 return -ESHUTDOWN;
2004         }
2005         msg->actual_length = 0;
2006         msg->status = -EINPROGRESS;
2007
2008         list_add_tail(&msg->queue, &ctlr->queue);
2009         if (!ctlr->busy && need_pump)
2010                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2011
2012         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2013         return 0;
2014 }
2015
2016 /**
2017  * spi_queued_transfer - transfer function for queued transfers
2018  * @spi: spi device which is requesting transfer
2019  * @msg: spi message which is to handled is queued to driver queue
2020  *
2021  * Return: zero on success, else a negative error code.
2022  */
2023 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2024 {
2025         return __spi_queued_transfer(spi, msg, true);
2026 }
2027
2028 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2029 {
2030         int ret;
2031
2032         ctlr->transfer = spi_queued_transfer;
2033         if (!ctlr->transfer_one_message)
2034                 ctlr->transfer_one_message = spi_transfer_one_message;
2035
2036         /* Initialize and start queue */
2037         ret = spi_init_queue(ctlr);
2038         if (ret) {
2039                 dev_err(&ctlr->dev, "problem initializing queue\n");
2040                 goto err_init_queue;
2041         }
2042         ctlr->queued = true;
2043         ret = spi_start_queue(ctlr);
2044         if (ret) {
2045                 dev_err(&ctlr->dev, "problem starting queue\n");
2046                 goto err_start_queue;
2047         }
2048
2049         return 0;
2050
2051 err_start_queue:
2052         spi_destroy_queue(ctlr);
2053 err_init_queue:
2054         return ret;
2055 }
2056
2057 /**
2058  * spi_flush_queue - Send all pending messages in the queue from the callers'
2059  *                   context
2060  * @ctlr: controller to process queue for
2061  *
2062  * This should be used when one wants to ensure all pending messages have been
2063  * sent before doing something. Is used by the spi-mem code to make sure SPI
2064  * memory operations do not preempt regular SPI transfers that have been queued
2065  * before the spi-mem operation.
2066  */
2067 void spi_flush_queue(struct spi_controller *ctlr)
2068 {
2069         if (ctlr->transfer == spi_queued_transfer)
2070                 __spi_pump_messages(ctlr, false);
2071 }
2072
2073 /*-------------------------------------------------------------------------*/
2074
2075 #if defined(CONFIG_OF)
2076 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2077                            struct device_node *nc)
2078 {
2079         u32 value;
2080         int rc;
2081
2082         /* Mode (clock phase/polarity/etc.) */
2083         if (of_property_read_bool(nc, "spi-cpha"))
2084                 spi->mode |= SPI_CPHA;
2085         if (of_property_read_bool(nc, "spi-cpol"))
2086                 spi->mode |= SPI_CPOL;
2087         if (of_property_read_bool(nc, "spi-3wire"))
2088                 spi->mode |= SPI_3WIRE;
2089         if (of_property_read_bool(nc, "spi-lsb-first"))
2090                 spi->mode |= SPI_LSB_FIRST;
2091         if (of_property_read_bool(nc, "spi-cs-high"))
2092                 spi->mode |= SPI_CS_HIGH;
2093
2094         /* Device DUAL/QUAD mode */
2095         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2096                 switch (value) {
2097                 case 0:
2098                         spi->mode |= SPI_NO_TX;
2099                         break;
2100                 case 1:
2101                         break;
2102                 case 2:
2103                         spi->mode |= SPI_TX_DUAL;
2104                         break;
2105                 case 4:
2106                         spi->mode |= SPI_TX_QUAD;
2107                         break;
2108                 case 8:
2109                         spi->mode |= SPI_TX_OCTAL;
2110                         break;
2111                 default:
2112                         dev_warn(&ctlr->dev,
2113                                 "spi-tx-bus-width %d not supported\n",
2114                                 value);
2115                         break;
2116                 }
2117         }
2118
2119         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2120                 switch (value) {
2121                 case 0:
2122                         spi->mode |= SPI_NO_RX;
2123                         break;
2124                 case 1:
2125                         break;
2126                 case 2:
2127                         spi->mode |= SPI_RX_DUAL;
2128                         break;
2129                 case 4:
2130                         spi->mode |= SPI_RX_QUAD;
2131                         break;
2132                 case 8:
2133                         spi->mode |= SPI_RX_OCTAL;
2134                         break;
2135                 default:
2136                         dev_warn(&ctlr->dev,
2137                                 "spi-rx-bus-width %d not supported\n",
2138                                 value);
2139                         break;
2140                 }
2141         }
2142
2143         if (spi_controller_is_slave(ctlr)) {
2144                 if (!of_node_name_eq(nc, "slave")) {
2145                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2146                                 nc);
2147                         return -EINVAL;
2148                 }
2149                 return 0;
2150         }
2151
2152         /* Device address */
2153         rc = of_property_read_u32(nc, "reg", &value);
2154         if (rc) {
2155                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2156                         nc, rc);
2157                 return rc;
2158         }
2159         spi->chip_select = value;
2160
2161         /* Device speed */
2162         if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2163                 spi->max_speed_hz = value;
2164
2165         return 0;
2166 }
2167
2168 static struct spi_device *
2169 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2170 {
2171         struct spi_device *spi;
2172         int rc;
2173
2174         /* Alloc an spi_device */
2175         spi = spi_alloc_device(ctlr);
2176         if (!spi) {
2177                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2178                 rc = -ENOMEM;
2179                 goto err_out;
2180         }
2181
2182         /* Select device driver */
2183         rc = of_modalias_node(nc, spi->modalias,
2184                                 sizeof(spi->modalias));
2185         if (rc < 0) {
2186                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2187                 goto err_out;
2188         }
2189
2190         rc = of_spi_parse_dt(ctlr, spi, nc);
2191         if (rc)
2192                 goto err_out;
2193
2194         /* Store a pointer to the node in the device structure */
2195         of_node_get(nc);
2196         spi->dev.of_node = nc;
2197         spi->dev.fwnode = of_fwnode_handle(nc);
2198
2199         /* Register the new device */
2200         rc = spi_add_device(spi);
2201         if (rc) {
2202                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2203                 goto err_of_node_put;
2204         }
2205
2206         return spi;
2207
2208 err_of_node_put:
2209         of_node_put(nc);
2210 err_out:
2211         spi_dev_put(spi);
2212         return ERR_PTR(rc);
2213 }
2214
2215 /**
2216  * of_register_spi_devices() - Register child devices onto the SPI bus
2217  * @ctlr:       Pointer to spi_controller device
2218  *
2219  * Registers an spi_device for each child node of controller node which
2220  * represents a valid SPI slave.
2221  */
2222 static void of_register_spi_devices(struct spi_controller *ctlr)
2223 {
2224         struct spi_device *spi;
2225         struct device_node *nc;
2226
2227         if (!ctlr->dev.of_node)
2228                 return;
2229
2230         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2231                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2232                         continue;
2233                 spi = of_register_spi_device(ctlr, nc);
2234                 if (IS_ERR(spi)) {
2235                         dev_warn(&ctlr->dev,
2236                                  "Failed to create SPI device for %pOF\n", nc);
2237                         of_node_clear_flag(nc, OF_POPULATED);
2238                 }
2239         }
2240 }
2241 #else
2242 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2243 #endif
2244
2245 /**
2246  * spi_new_ancillary_device() - Register ancillary SPI device
2247  * @spi:         Pointer to the main SPI device registering the ancillary device
2248  * @chip_select: Chip Select of the ancillary device
2249  *
2250  * Register an ancillary SPI device; for example some chips have a chip-select
2251  * for normal device usage and another one for setup/firmware upload.
2252  *
2253  * This may only be called from main SPI device's probe routine.
2254  *
2255  * Return: 0 on success; negative errno on failure
2256  */
2257 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2258                                              u8 chip_select)
2259 {
2260         struct spi_device *ancillary;
2261         int rc = 0;
2262
2263         /* Alloc an spi_device */
2264         ancillary = spi_alloc_device(spi->controller);
2265         if (!ancillary) {
2266                 rc = -ENOMEM;
2267                 goto err_out;
2268         }
2269
2270         strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2271
2272         /* Use provided chip-select for ancillary device */
2273         ancillary->chip_select = chip_select;
2274
2275         /* Take over SPI mode/speed from SPI main device */
2276         ancillary->max_speed_hz = spi->max_speed_hz;
2277         ancillary->mode = spi->mode;
2278
2279         /* Register the new device */
2280         rc = spi_add_device_locked(ancillary);
2281         if (rc) {
2282                 dev_err(&spi->dev, "failed to register ancillary device\n");
2283                 goto err_out;
2284         }
2285
2286         return ancillary;
2287
2288 err_out:
2289         spi_dev_put(ancillary);
2290         return ERR_PTR(rc);
2291 }
2292 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2293
2294 #ifdef CONFIG_ACPI
2295 struct acpi_spi_lookup {
2296         struct spi_controller   *ctlr;
2297         u32                     max_speed_hz;
2298         u32                     mode;
2299         int                     irq;
2300         u8                      bits_per_word;
2301         u8                      chip_select;
2302         int                     n;
2303         int                     index;
2304 };
2305
2306 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2307 {
2308         struct acpi_resource_spi_serialbus *sb;
2309         int *count = data;
2310
2311         if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2312                 return 1;
2313
2314         sb = &ares->data.spi_serial_bus;
2315         if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2316                 return 1;
2317
2318         *count = *count + 1;
2319
2320         return 1;
2321 }
2322
2323 /**
2324  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2325  * @adev:       ACPI device
2326  *
2327  * Returns the number of SpiSerialBus resources in the ACPI-device's
2328  * resource-list; or a negative error code.
2329  */
2330 int acpi_spi_count_resources(struct acpi_device *adev)
2331 {
2332         LIST_HEAD(r);
2333         int count = 0;
2334         int ret;
2335
2336         ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2337         if (ret < 0)
2338                 return ret;
2339
2340         acpi_dev_free_resource_list(&r);
2341
2342         return count;
2343 }
2344 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2345
2346 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2347                                             struct acpi_spi_lookup *lookup)
2348 {
2349         const union acpi_object *obj;
2350
2351         if (!x86_apple_machine)
2352                 return;
2353
2354         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2355             && obj->buffer.length >= 4)
2356                 lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2357
2358         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2359             && obj->buffer.length == 8)
2360                 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2361
2362         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2363             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2364                 lookup->mode |= SPI_LSB_FIRST;
2365
2366         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2367             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2368                 lookup->mode |= SPI_CPOL;
2369
2370         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2371             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2372                 lookup->mode |= SPI_CPHA;
2373 }
2374
2375 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2376
2377 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2378 {
2379         struct acpi_spi_lookup *lookup = data;
2380         struct spi_controller *ctlr = lookup->ctlr;
2381
2382         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2383                 struct acpi_resource_spi_serialbus *sb;
2384                 acpi_handle parent_handle;
2385                 acpi_status status;
2386
2387                 sb = &ares->data.spi_serial_bus;
2388                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2389
2390                         if (lookup->index != -1 && lookup->n++ != lookup->index)
2391                                 return 1;
2392
2393                         if (lookup->index == -1 && !ctlr)
2394                                 return -ENODEV;
2395
2396                         status = acpi_get_handle(NULL,
2397                                                  sb->resource_source.string_ptr,
2398                                                  &parent_handle);
2399
2400                         if (ACPI_FAILURE(status))
2401                                 return -ENODEV;
2402
2403                         if (ctlr) {
2404                                 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2405                                         return -ENODEV;
2406                         } else {
2407                                 struct acpi_device *adev;
2408
2409                                 if (acpi_bus_get_device(parent_handle, &adev))
2410                                         return -ENODEV;
2411
2412                                 ctlr = acpi_spi_find_controller_by_adev(adev);
2413                                 if (!ctlr)
2414                                         return -ENODEV;
2415
2416                                 lookup->ctlr = ctlr;
2417                         }
2418
2419                         /*
2420                          * ACPI DeviceSelection numbering is handled by the
2421                          * host controller driver in Windows and can vary
2422                          * from driver to driver. In Linux we always expect
2423                          * 0 .. max - 1 so we need to ask the driver to
2424                          * translate between the two schemes.
2425                          */
2426                         if (ctlr->fw_translate_cs) {
2427                                 int cs = ctlr->fw_translate_cs(ctlr,
2428                                                 sb->device_selection);
2429                                 if (cs < 0)
2430                                         return cs;
2431                                 lookup->chip_select = cs;
2432                         } else {
2433                                 lookup->chip_select = sb->device_selection;
2434                         }
2435
2436                         lookup->max_speed_hz = sb->connection_speed;
2437                         lookup->bits_per_word = sb->data_bit_length;
2438
2439                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2440                                 lookup->mode |= SPI_CPHA;
2441                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2442                                 lookup->mode |= SPI_CPOL;
2443                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2444                                 lookup->mode |= SPI_CS_HIGH;
2445                 }
2446         } else if (lookup->irq < 0) {
2447                 struct resource r;
2448
2449                 if (acpi_dev_resource_interrupt(ares, 0, &r))
2450                         lookup->irq = r.start;
2451         }
2452
2453         /* Always tell the ACPI core to skip this resource */
2454         return 1;
2455 }
2456
2457 /**
2458  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2459  * @ctlr: controller to which the spi device belongs
2460  * @adev: ACPI Device for the spi device
2461  * @index: Index of the spi resource inside the ACPI Node
2462  *
2463  * This should be used to allocate a new spi device from and ACPI Node.
2464  * The caller is responsible for calling spi_add_device to register the spi device.
2465  *
2466  * If ctlr is set to NULL, the Controller for the spi device will be looked up
2467  * using the resource.
2468  * If index is set to -1, index is not used.
2469  * Note: If index is -1, ctlr must be set.
2470  *
2471  * Return: a pointer to the new device, or ERR_PTR on error.
2472  */
2473 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2474                                          struct acpi_device *adev,
2475                                          int index)
2476 {
2477         acpi_handle parent_handle = NULL;
2478         struct list_head resource_list;
2479         struct acpi_spi_lookup lookup = {};
2480         struct spi_device *spi;
2481         int ret;
2482
2483         if (!ctlr && index == -1)
2484                 return ERR_PTR(-EINVAL);
2485
2486         lookup.ctlr             = ctlr;
2487         lookup.irq              = -1;
2488         lookup.index            = index;
2489         lookup.n                = 0;
2490
2491         INIT_LIST_HEAD(&resource_list);
2492         ret = acpi_dev_get_resources(adev, &resource_list,
2493                                      acpi_spi_add_resource, &lookup);
2494         acpi_dev_free_resource_list(&resource_list);
2495
2496         if (ret < 0)
2497                 /* found SPI in _CRS but it points to another controller */
2498                 return ERR_PTR(-ENODEV);
2499
2500         if (!lookup.max_speed_hz &&
2501             ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2502             ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2503                 /* Apple does not use _CRS but nested devices for SPI slaves */
2504                 acpi_spi_parse_apple_properties(adev, &lookup);
2505         }
2506
2507         if (!lookup.max_speed_hz)
2508                 return ERR_PTR(-ENODEV);
2509
2510         spi = spi_alloc_device(lookup.ctlr);
2511         if (!spi) {
2512                 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2513                         dev_name(&adev->dev));
2514                 return ERR_PTR(-ENOMEM);
2515         }
2516
2517         ACPI_COMPANION_SET(&spi->dev, adev);
2518         spi->max_speed_hz       = lookup.max_speed_hz;
2519         spi->mode               |= lookup.mode;
2520         spi->irq                = lookup.irq;
2521         spi->bits_per_word      = lookup.bits_per_word;
2522         spi->chip_select        = lookup.chip_select;
2523
2524         return spi;
2525 }
2526 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2527
2528 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2529                                             struct acpi_device *adev)
2530 {
2531         struct spi_device *spi;
2532
2533         if (acpi_bus_get_status(adev) || !adev->status.present ||
2534             acpi_device_enumerated(adev))
2535                 return AE_OK;
2536
2537         spi = acpi_spi_device_alloc(ctlr, adev, -1);
2538         if (IS_ERR(spi)) {
2539                 if (PTR_ERR(spi) == -ENOMEM)
2540                         return AE_NO_MEMORY;
2541                 else
2542                         return AE_OK;
2543         }
2544
2545         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2546                           sizeof(spi->modalias));
2547
2548         if (spi->irq < 0)
2549                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2550
2551         acpi_device_set_enumerated(adev);
2552
2553         adev->power.flags.ignore_parent = true;
2554         if (spi_add_device(spi)) {
2555                 adev->power.flags.ignore_parent = false;
2556                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2557                         dev_name(&adev->dev));
2558                 spi_dev_put(spi);
2559         }
2560
2561         return AE_OK;
2562 }
2563
2564 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2565                                        void *data, void **return_value)
2566 {
2567         struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2568         struct spi_controller *ctlr = data;
2569
2570         if (!adev)
2571                 return AE_OK;
2572
2573         return acpi_register_spi_device(ctlr, adev);
2574 }
2575
2576 #define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2577
2578 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2579 {
2580         acpi_status status;
2581         acpi_handle handle;
2582
2583         handle = ACPI_HANDLE(ctlr->dev.parent);
2584         if (!handle)
2585                 return;
2586
2587         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2588                                      SPI_ACPI_ENUMERATE_MAX_DEPTH,
2589                                      acpi_spi_add_device, NULL, ctlr, NULL);
2590         if (ACPI_FAILURE(status))
2591                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2592 }
2593 #else
2594 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2595 #endif /* CONFIG_ACPI */
2596
2597 static void spi_controller_release(struct device *dev)
2598 {
2599         struct spi_controller *ctlr;
2600
2601         ctlr = container_of(dev, struct spi_controller, dev);
2602         kfree(ctlr);
2603 }
2604
2605 static struct class spi_master_class = {
2606         .name           = "spi_master",
2607         .owner          = THIS_MODULE,
2608         .dev_release    = spi_controller_release,
2609         .dev_groups     = spi_master_groups,
2610 };
2611
2612 #ifdef CONFIG_SPI_SLAVE
2613 /**
2614  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2615  *                   controller
2616  * @spi: device used for the current transfer
2617  */
2618 int spi_slave_abort(struct spi_device *spi)
2619 {
2620         struct spi_controller *ctlr = spi->controller;
2621
2622         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2623                 return ctlr->slave_abort(ctlr);
2624
2625         return -ENOTSUPP;
2626 }
2627 EXPORT_SYMBOL_GPL(spi_slave_abort);
2628
2629 static int match_true(struct device *dev, void *data)
2630 {
2631         return 1;
2632 }
2633
2634 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2635                           char *buf)
2636 {
2637         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2638                                                    dev);
2639         struct device *child;
2640
2641         child = device_find_child(&ctlr->dev, NULL, match_true);
2642         return sprintf(buf, "%s\n",
2643                        child ? to_spi_device(child)->modalias : NULL);
2644 }
2645
2646 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2647                            const char *buf, size_t count)
2648 {
2649         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2650                                                    dev);
2651         struct spi_device *spi;
2652         struct device *child;
2653         char name[32];
2654         int rc;
2655
2656         rc = sscanf(buf, "%31s", name);
2657         if (rc != 1 || !name[0])
2658                 return -EINVAL;
2659
2660         child = device_find_child(&ctlr->dev, NULL, match_true);
2661         if (child) {
2662                 /* Remove registered slave */
2663                 device_unregister(child);
2664                 put_device(child);
2665         }
2666
2667         if (strcmp(name, "(null)")) {
2668                 /* Register new slave */
2669                 spi = spi_alloc_device(ctlr);
2670                 if (!spi)
2671                         return -ENOMEM;
2672
2673                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2674
2675                 rc = spi_add_device(spi);
2676                 if (rc) {
2677                         spi_dev_put(spi);
2678                         return rc;
2679                 }
2680         }
2681
2682         return count;
2683 }
2684
2685 static DEVICE_ATTR_RW(slave);
2686
2687 static struct attribute *spi_slave_attrs[] = {
2688         &dev_attr_slave.attr,
2689         NULL,
2690 };
2691
2692 static const struct attribute_group spi_slave_group = {
2693         .attrs = spi_slave_attrs,
2694 };
2695
2696 static const struct attribute_group *spi_slave_groups[] = {
2697         &spi_controller_statistics_group,
2698         &spi_slave_group,
2699         NULL,
2700 };
2701
2702 static struct class spi_slave_class = {
2703         .name           = "spi_slave",
2704         .owner          = THIS_MODULE,
2705         .dev_release    = spi_controller_release,
2706         .dev_groups     = spi_slave_groups,
2707 };
2708 #else
2709 extern struct class spi_slave_class;    /* dummy */
2710 #endif
2711
2712 /**
2713  * __spi_alloc_controller - allocate an SPI master or slave controller
2714  * @dev: the controller, possibly using the platform_bus
2715  * @size: how much zeroed driver-private data to allocate; the pointer to this
2716  *      memory is in the driver_data field of the returned device, accessible
2717  *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2718  *      drivers granting DMA access to portions of their private data need to
2719  *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2720  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2721  *      slave (true) controller
2722  * Context: can sleep
2723  *
2724  * This call is used only by SPI controller drivers, which are the
2725  * only ones directly touching chip registers.  It's how they allocate
2726  * an spi_controller structure, prior to calling spi_register_controller().
2727  *
2728  * This must be called from context that can sleep.
2729  *
2730  * The caller is responsible for assigning the bus number and initializing the
2731  * controller's methods before calling spi_register_controller(); and (after
2732  * errors adding the device) calling spi_controller_put() to prevent a memory
2733  * leak.
2734  *
2735  * Return: the SPI controller structure on success, else NULL.
2736  */
2737 struct spi_controller *__spi_alloc_controller(struct device *dev,
2738                                               unsigned int size, bool slave)
2739 {
2740         struct spi_controller   *ctlr;
2741         size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2742
2743         if (!dev)
2744                 return NULL;
2745
2746         ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2747         if (!ctlr)
2748                 return NULL;
2749
2750         device_initialize(&ctlr->dev);
2751         INIT_LIST_HEAD(&ctlr->queue);
2752         spin_lock_init(&ctlr->queue_lock);
2753         spin_lock_init(&ctlr->bus_lock_spinlock);
2754         mutex_init(&ctlr->bus_lock_mutex);
2755         mutex_init(&ctlr->io_mutex);
2756         mutex_init(&ctlr->add_lock);
2757         ctlr->bus_num = -1;
2758         ctlr->num_chipselect = 1;
2759         ctlr->slave = slave;
2760         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2761                 ctlr->dev.class = &spi_slave_class;
2762         else
2763                 ctlr->dev.class = &spi_master_class;
2764         ctlr->dev.parent = dev;
2765         pm_suspend_ignore_children(&ctlr->dev, true);
2766         spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2767
2768         return ctlr;
2769 }
2770 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2771
2772 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2773 {
2774         spi_controller_put(*(struct spi_controller **)ctlr);
2775 }
2776
2777 /**
2778  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2779  * @dev: physical device of SPI controller
2780  * @size: how much zeroed driver-private data to allocate
2781  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2782  * Context: can sleep
2783  *
2784  * Allocate an SPI controller and automatically release a reference on it
2785  * when @dev is unbound from its driver.  Drivers are thus relieved from
2786  * having to call spi_controller_put().
2787  *
2788  * The arguments to this function are identical to __spi_alloc_controller().
2789  *
2790  * Return: the SPI controller structure on success, else NULL.
2791  */
2792 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2793                                                    unsigned int size,
2794                                                    bool slave)
2795 {
2796         struct spi_controller **ptr, *ctlr;
2797
2798         ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2799                            GFP_KERNEL);
2800         if (!ptr)
2801                 return NULL;
2802
2803         ctlr = __spi_alloc_controller(dev, size, slave);
2804         if (ctlr) {
2805                 ctlr->devm_allocated = true;
2806                 *ptr = ctlr;
2807                 devres_add(dev, ptr);
2808         } else {
2809                 devres_free(ptr);
2810         }
2811
2812         return ctlr;
2813 }
2814 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2815
2816 /**
2817  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2818  * @ctlr: The SPI master to grab GPIO descriptors for
2819  */
2820 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2821 {
2822         int nb, i;
2823         struct gpio_desc **cs;
2824         struct device *dev = &ctlr->dev;
2825         unsigned long native_cs_mask = 0;
2826         unsigned int num_cs_gpios = 0;
2827
2828         nb = gpiod_count(dev, "cs");
2829         if (nb < 0) {
2830                 /* No GPIOs at all is fine, else return the error */
2831                 if (nb == -ENOENT)
2832                         return 0;
2833                 return nb;
2834         }
2835
2836         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2837
2838         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2839                           GFP_KERNEL);
2840         if (!cs)
2841                 return -ENOMEM;
2842         ctlr->cs_gpiods = cs;
2843
2844         for (i = 0; i < nb; i++) {
2845                 /*
2846                  * Most chipselects are active low, the inverted
2847                  * semantics are handled by special quirks in gpiolib,
2848                  * so initializing them GPIOD_OUT_LOW here means
2849                  * "unasserted", in most cases this will drive the physical
2850                  * line high.
2851                  */
2852                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2853                                                       GPIOD_OUT_LOW);
2854                 if (IS_ERR(cs[i]))
2855                         return PTR_ERR(cs[i]);
2856
2857                 if (cs[i]) {
2858                         /*
2859                          * If we find a CS GPIO, name it after the device and
2860                          * chip select line.
2861                          */
2862                         char *gpioname;
2863
2864                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2865                                                   dev_name(dev), i);
2866                         if (!gpioname)
2867                                 return -ENOMEM;
2868                         gpiod_set_consumer_name(cs[i], gpioname);
2869                         num_cs_gpios++;
2870                         continue;
2871                 }
2872
2873                 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2874                         dev_err(dev, "Invalid native chip select %d\n", i);
2875                         return -EINVAL;
2876                 }
2877                 native_cs_mask |= BIT(i);
2878         }
2879
2880         ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2881
2882         if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2883             ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2884                 dev_err(dev, "No unused native chip select available\n");
2885                 return -EINVAL;
2886         }
2887
2888         return 0;
2889 }
2890
2891 static int spi_controller_check_ops(struct spi_controller *ctlr)
2892 {
2893         /*
2894          * The controller may implement only the high-level SPI-memory like
2895          * operations if it does not support regular SPI transfers, and this is
2896          * valid use case.
2897          * If ->mem_ops is NULL, we request that at least one of the
2898          * ->transfer_xxx() method be implemented.
2899          */
2900         if (ctlr->mem_ops) {
2901                 if (!ctlr->mem_ops->exec_op)
2902                         return -EINVAL;
2903         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2904                    !ctlr->transfer_one_message) {
2905                 return -EINVAL;
2906         }
2907
2908         return 0;
2909 }
2910
2911 /**
2912  * spi_register_controller - register SPI master or slave controller
2913  * @ctlr: initialized master, originally from spi_alloc_master() or
2914  *      spi_alloc_slave()
2915  * Context: can sleep
2916  *
2917  * SPI controllers connect to their drivers using some non-SPI bus,
2918  * such as the platform bus.  The final stage of probe() in that code
2919  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2920  *
2921  * SPI controllers use board specific (often SOC specific) bus numbers,
2922  * and board-specific addressing for SPI devices combines those numbers
2923  * with chip select numbers.  Since SPI does not directly support dynamic
2924  * device identification, boards need configuration tables telling which
2925  * chip is at which address.
2926  *
2927  * This must be called from context that can sleep.  It returns zero on
2928  * success, else a negative error code (dropping the controller's refcount).
2929  * After a successful return, the caller is responsible for calling
2930  * spi_unregister_controller().
2931  *
2932  * Return: zero on success, else a negative error code.
2933  */
2934 int spi_register_controller(struct spi_controller *ctlr)
2935 {
2936         struct device           *dev = ctlr->dev.parent;
2937         struct boardinfo        *bi;
2938         int                     status;
2939         int                     id, first_dynamic;
2940
2941         if (!dev)
2942                 return -ENODEV;
2943
2944         /*
2945          * Make sure all necessary hooks are implemented before registering
2946          * the SPI controller.
2947          */
2948         status = spi_controller_check_ops(ctlr);
2949         if (status)
2950                 return status;
2951
2952         if (ctlr->bus_num >= 0) {
2953                 /* devices with a fixed bus num must check-in with the num */
2954                 mutex_lock(&board_lock);
2955                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2956                         ctlr->bus_num + 1, GFP_KERNEL);
2957                 mutex_unlock(&board_lock);
2958                 if (WARN(id < 0, "couldn't get idr"))
2959                         return id == -ENOSPC ? -EBUSY : id;
2960                 ctlr->bus_num = id;
2961         } else if (ctlr->dev.of_node) {
2962                 /* allocate dynamic bus number using Linux idr */
2963                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2964                 if (id >= 0) {
2965                         ctlr->bus_num = id;
2966                         mutex_lock(&board_lock);
2967                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2968                                        ctlr->bus_num + 1, GFP_KERNEL);
2969                         mutex_unlock(&board_lock);
2970                         if (WARN(id < 0, "couldn't get idr"))
2971                                 return id == -ENOSPC ? -EBUSY : id;
2972                 }
2973         }
2974         if (ctlr->bus_num < 0) {
2975                 first_dynamic = of_alias_get_highest_id("spi");
2976                 if (first_dynamic < 0)
2977                         first_dynamic = 0;
2978                 else
2979                         first_dynamic++;
2980
2981                 mutex_lock(&board_lock);
2982                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2983                                0, GFP_KERNEL);
2984                 mutex_unlock(&board_lock);
2985                 if (WARN(id < 0, "couldn't get idr"))
2986                         return id;
2987                 ctlr->bus_num = id;
2988         }
2989         ctlr->bus_lock_flag = 0;
2990         init_completion(&ctlr->xfer_completion);
2991         if (!ctlr->max_dma_len)
2992                 ctlr->max_dma_len = INT_MAX;
2993
2994         /*
2995          * Register the device, then userspace will see it.
2996          * Registration fails if the bus ID is in use.
2997          */
2998         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2999
3000         if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3001                 status = spi_get_gpio_descs(ctlr);
3002                 if (status)
3003                         goto free_bus_id;
3004                 /*
3005                  * A controller using GPIO descriptors always
3006                  * supports SPI_CS_HIGH if need be.
3007                  */
3008                 ctlr->mode_bits |= SPI_CS_HIGH;
3009         }
3010
3011         /*
3012          * Even if it's just one always-selected device, there must
3013          * be at least one chipselect.
3014          */
3015         if (!ctlr->num_chipselect) {
3016                 status = -EINVAL;
3017                 goto free_bus_id;
3018         }
3019
3020         /* setting last_cs to -1 means no chip selected */
3021         ctlr->last_cs = -1;
3022
3023         status = device_add(&ctlr->dev);
3024         if (status < 0)
3025                 goto free_bus_id;
3026         dev_dbg(dev, "registered %s %s\n",
3027                         spi_controller_is_slave(ctlr) ? "slave" : "master",
3028                         dev_name(&ctlr->dev));
3029
3030         /*
3031          * If we're using a queued driver, start the queue. Note that we don't
3032          * need the queueing logic if the driver is only supporting high-level
3033          * memory operations.
3034          */
3035         if (ctlr->transfer) {
3036                 dev_info(dev, "controller is unqueued, this is deprecated\n");
3037         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3038                 status = spi_controller_initialize_queue(ctlr);
3039                 if (status) {
3040                         device_del(&ctlr->dev);
3041                         goto free_bus_id;
3042                 }
3043         }
3044         /* add statistics */
3045         spin_lock_init(&ctlr->statistics.lock);
3046
3047         mutex_lock(&board_lock);
3048         list_add_tail(&ctlr->list, &spi_controller_list);
3049         list_for_each_entry(bi, &board_list, list)
3050                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3051         mutex_unlock(&board_lock);
3052
3053         /* Register devices from the device tree and ACPI */
3054         of_register_spi_devices(ctlr);
3055         acpi_register_spi_devices(ctlr);
3056         return status;
3057
3058 free_bus_id:
3059         mutex_lock(&board_lock);
3060         idr_remove(&spi_master_idr, ctlr->bus_num);
3061         mutex_unlock(&board_lock);
3062         return status;
3063 }
3064 EXPORT_SYMBOL_GPL(spi_register_controller);
3065
3066 static void devm_spi_unregister(void *ctlr)
3067 {
3068         spi_unregister_controller(ctlr);
3069 }
3070
3071 /**
3072  * devm_spi_register_controller - register managed SPI master or slave
3073  *      controller
3074  * @dev:    device managing SPI controller
3075  * @ctlr: initialized controller, originally from spi_alloc_master() or
3076  *      spi_alloc_slave()
3077  * Context: can sleep
3078  *
3079  * Register a SPI device as with spi_register_controller() which will
3080  * automatically be unregistered and freed.
3081  *
3082  * Return: zero on success, else a negative error code.
3083  */
3084 int devm_spi_register_controller(struct device *dev,
3085                                  struct spi_controller *ctlr)
3086 {
3087         int ret;
3088
3089         ret = spi_register_controller(ctlr);
3090         if (ret)
3091                 return ret;
3092
3093         return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
3094 }
3095 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3096
3097 static int __unregister(struct device *dev, void *null)
3098 {
3099         spi_unregister_device(to_spi_device(dev));
3100         return 0;
3101 }
3102
3103 /**
3104  * spi_unregister_controller - unregister SPI master or slave controller
3105  * @ctlr: the controller being unregistered
3106  * Context: can sleep
3107  *
3108  * This call is used only by SPI controller drivers, which are the
3109  * only ones directly touching chip registers.
3110  *
3111  * This must be called from context that can sleep.
3112  *
3113  * Note that this function also drops a reference to the controller.
3114  */
3115 void spi_unregister_controller(struct spi_controller *ctlr)
3116 {
3117         struct spi_controller *found;
3118         int id = ctlr->bus_num;
3119
3120         /* Prevent addition of new devices, unregister existing ones */
3121         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3122                 mutex_lock(&ctlr->add_lock);
3123
3124         device_for_each_child(&ctlr->dev, NULL, __unregister);
3125
3126         /* First make sure that this controller was ever added */
3127         mutex_lock(&board_lock);
3128         found = idr_find(&spi_master_idr, id);
3129         mutex_unlock(&board_lock);
3130         if (ctlr->queued) {
3131                 if (spi_destroy_queue(ctlr))
3132                         dev_err(&ctlr->dev, "queue remove failed\n");
3133         }
3134         mutex_lock(&board_lock);
3135         list_del(&ctlr->list);
3136         mutex_unlock(&board_lock);
3137
3138         device_del(&ctlr->dev);
3139
3140         /* free bus id */
3141         mutex_lock(&board_lock);
3142         if (found == ctlr)
3143                 idr_remove(&spi_master_idr, id);
3144         mutex_unlock(&board_lock);
3145
3146         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3147                 mutex_unlock(&ctlr->add_lock);
3148
3149         /* Release the last reference on the controller if its driver
3150          * has not yet been converted to devm_spi_alloc_master/slave().
3151          */
3152         if (!ctlr->devm_allocated)
3153                 put_device(&ctlr->dev);
3154 }
3155 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3156
3157 int spi_controller_suspend(struct spi_controller *ctlr)
3158 {
3159         int ret;
3160
3161         /* Basically no-ops for non-queued controllers */
3162         if (!ctlr->queued)
3163                 return 0;
3164
3165         ret = spi_stop_queue(ctlr);
3166         if (ret)
3167                 dev_err(&ctlr->dev, "queue stop failed\n");
3168
3169         return ret;
3170 }
3171 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3172
3173 int spi_controller_resume(struct spi_controller *ctlr)
3174 {
3175         int ret;
3176
3177         if (!ctlr->queued)
3178                 return 0;
3179
3180         ret = spi_start_queue(ctlr);
3181         if (ret)
3182                 dev_err(&ctlr->dev, "queue restart failed\n");
3183
3184         return ret;
3185 }
3186 EXPORT_SYMBOL_GPL(spi_controller_resume);
3187
3188 /*-------------------------------------------------------------------------*/
3189
3190 /* Core methods for spi_message alterations */
3191
3192 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3193                                             struct spi_message *msg,
3194                                             void *res)
3195 {
3196         struct spi_replaced_transfers *rxfer = res;
3197         size_t i;
3198
3199         /* call extra callback if requested */
3200         if (rxfer->release)
3201                 rxfer->release(ctlr, msg, res);
3202
3203         /* insert replaced transfers back into the message */
3204         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3205
3206         /* remove the formerly inserted entries */
3207         for (i = 0; i < rxfer->inserted; i++)
3208                 list_del(&rxfer->inserted_transfers[i].transfer_list);
3209 }
3210
3211 /**
3212  * spi_replace_transfers - replace transfers with several transfers
3213  *                         and register change with spi_message.resources
3214  * @msg:           the spi_message we work upon
3215  * @xfer_first:    the first spi_transfer we want to replace
3216  * @remove:        number of transfers to remove
3217  * @insert:        the number of transfers we want to insert instead
3218  * @release:       extra release code necessary in some circumstances
3219  * @extradatasize: extra data to allocate (with alignment guarantees
3220  *                 of struct @spi_transfer)
3221  * @gfp:           gfp flags
3222  *
3223  * Returns: pointer to @spi_replaced_transfers,
3224  *          PTR_ERR(...) in case of errors.
3225  */
3226 static struct spi_replaced_transfers *spi_replace_transfers(
3227         struct spi_message *msg,
3228         struct spi_transfer *xfer_first,
3229         size_t remove,
3230         size_t insert,
3231         spi_replaced_release_t release,
3232         size_t extradatasize,
3233         gfp_t gfp)
3234 {
3235         struct spi_replaced_transfers *rxfer;
3236         struct spi_transfer *xfer;
3237         size_t i;
3238
3239         /* allocate the structure using spi_res */
3240         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3241                               struct_size(rxfer, inserted_transfers, insert)
3242                               + extradatasize,
3243                               gfp);
3244         if (!rxfer)
3245                 return ERR_PTR(-ENOMEM);
3246
3247         /* the release code to invoke before running the generic release */
3248         rxfer->release = release;
3249
3250         /* assign extradata */
3251         if (extradatasize)
3252                 rxfer->extradata =
3253                         &rxfer->inserted_transfers[insert];
3254
3255         /* init the replaced_transfers list */
3256         INIT_LIST_HEAD(&rxfer->replaced_transfers);
3257
3258         /*
3259          * Assign the list_entry after which we should reinsert
3260          * the @replaced_transfers - it may be spi_message.messages!
3261          */
3262         rxfer->replaced_after = xfer_first->transfer_list.prev;
3263
3264         /* remove the requested number of transfers */
3265         for (i = 0; i < remove; i++) {
3266                 /*
3267                  * If the entry after replaced_after it is msg->transfers
3268                  * then we have been requested to remove more transfers
3269                  * than are in the list.
3270                  */
3271                 if (rxfer->replaced_after->next == &msg->transfers) {
3272                         dev_err(&msg->spi->dev,
3273                                 "requested to remove more spi_transfers than are available\n");
3274                         /* insert replaced transfers back into the message */
3275                         list_splice(&rxfer->replaced_transfers,
3276                                     rxfer->replaced_after);
3277
3278                         /* free the spi_replace_transfer structure */
3279                         spi_res_free(rxfer);
3280
3281                         /* and return with an error */
3282                         return ERR_PTR(-EINVAL);
3283                 }
3284
3285                 /*
3286                  * Remove the entry after replaced_after from list of
3287                  * transfers and add it to list of replaced_transfers.
3288                  */
3289                 list_move_tail(rxfer->replaced_after->next,
3290                                &rxfer->replaced_transfers);
3291         }
3292
3293         /*
3294          * Create copy of the given xfer with identical settings
3295          * based on the first transfer to get removed.
3296          */
3297         for (i = 0; i < insert; i++) {
3298                 /* we need to run in reverse order */
3299                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3300
3301                 /* copy all spi_transfer data */
3302                 memcpy(xfer, xfer_first, sizeof(*xfer));
3303
3304                 /* add to list */
3305                 list_add(&xfer->transfer_list, rxfer->replaced_after);
3306
3307                 /* clear cs_change and delay for all but the last */
3308                 if (i) {
3309                         xfer->cs_change = false;
3310                         xfer->delay.value = 0;
3311                 }
3312         }
3313
3314         /* set up inserted */
3315         rxfer->inserted = insert;
3316
3317         /* and register it with spi_res/spi_message */
3318         spi_res_add(msg, rxfer);
3319
3320         return rxfer;
3321 }
3322
3323 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3324                                         struct spi_message *msg,
3325                                         struct spi_transfer **xferp,
3326                                         size_t maxsize,
3327                                         gfp_t gfp)
3328 {
3329         struct spi_transfer *xfer = *xferp, *xfers;
3330         struct spi_replaced_transfers *srt;
3331         size_t offset;
3332         size_t count, i;
3333
3334         /* calculate how many we have to replace */
3335         count = DIV_ROUND_UP(xfer->len, maxsize);
3336
3337         /* create replacement */
3338         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3339         if (IS_ERR(srt))
3340                 return PTR_ERR(srt);
3341         xfers = srt->inserted_transfers;
3342
3343         /*
3344          * Now handle each of those newly inserted spi_transfers.
3345          * Note that the replacements spi_transfers all are preset
3346          * to the same values as *xferp, so tx_buf, rx_buf and len
3347          * are all identical (as well as most others)
3348          * so we just have to fix up len and the pointers.
3349          *
3350          * This also includes support for the depreciated
3351          * spi_message.is_dma_mapped interface.
3352          */
3353
3354         /*
3355          * The first transfer just needs the length modified, so we
3356          * run it outside the loop.
3357          */
3358         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3359
3360         /* all the others need rx_buf/tx_buf also set */
3361         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3362                 /* update rx_buf, tx_buf and dma */
3363                 if (xfers[i].rx_buf)
3364                         xfers[i].rx_buf += offset;
3365                 if (xfers[i].rx_dma)
3366                         xfers[i].rx_dma += offset;
3367                 if (xfers[i].tx_buf)
3368                         xfers[i].tx_buf += offset;
3369                 if (xfers[i].tx_dma)
3370                         xfers[i].tx_dma += offset;
3371
3372                 /* update length */
3373                 xfers[i].len = min(maxsize, xfers[i].len - offset);
3374         }
3375
3376         /*
3377          * We set up xferp to the last entry we have inserted,
3378          * so that we skip those already split transfers.
3379          */
3380         *xferp = &xfers[count - 1];
3381
3382         /* increment statistics counters */
3383         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3384                                        transfers_split_maxsize);
3385         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3386                                        transfers_split_maxsize);
3387
3388         return 0;
3389 }
3390
3391 /**
3392  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3393  *                               when an individual transfer exceeds a
3394  *                               certain size
3395  * @ctlr:    the @spi_controller for this transfer
3396  * @msg:   the @spi_message to transform
3397  * @maxsize:  the maximum when to apply this
3398  * @gfp: GFP allocation flags
3399  *
3400  * Return: status of transformation
3401  */
3402 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3403                                 struct spi_message *msg,
3404                                 size_t maxsize,
3405                                 gfp_t gfp)
3406 {
3407         struct spi_transfer *xfer;
3408         int ret;
3409
3410         /*
3411          * Iterate over the transfer_list,
3412          * but note that xfer is advanced to the last transfer inserted
3413          * to avoid checking sizes again unnecessarily (also xfer does
3414          * potentially belong to a different list by the time the
3415          * replacement has happened).
3416          */
3417         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3418                 if (xfer->len > maxsize) {
3419                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3420                                                            maxsize, gfp);
3421                         if (ret)
3422                                 return ret;
3423                 }
3424         }
3425
3426         return 0;
3427 }
3428 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3429
3430 /*-------------------------------------------------------------------------*/
3431
3432 /* Core methods for SPI controller protocol drivers.  Some of the
3433  * other core methods are currently defined as inline functions.
3434  */
3435
3436 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3437                                         u8 bits_per_word)
3438 {
3439         if (ctlr->bits_per_word_mask) {
3440                 /* Only 32 bits fit in the mask */
3441                 if (bits_per_word > 32)
3442                         return -EINVAL;
3443                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3444                         return -EINVAL;
3445         }
3446
3447         return 0;
3448 }
3449
3450 /**
3451  * spi_setup - setup SPI mode and clock rate
3452  * @spi: the device whose settings are being modified
3453  * Context: can sleep, and no requests are queued to the device
3454  *
3455  * SPI protocol drivers may need to update the transfer mode if the
3456  * device doesn't work with its default.  They may likewise need
3457  * to update clock rates or word sizes from initial values.  This function
3458  * changes those settings, and must be called from a context that can sleep.
3459  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3460  * effect the next time the device is selected and data is transferred to
3461  * or from it.  When this function returns, the spi device is deselected.
3462  *
3463  * Note that this call will fail if the protocol driver specifies an option
3464  * that the underlying controller or its driver does not support.  For
3465  * example, not all hardware supports wire transfers using nine bit words,
3466  * LSB-first wire encoding, or active-high chipselects.
3467  *
3468  * Return: zero on success, else a negative error code.
3469  */
3470 int spi_setup(struct spi_device *spi)
3471 {
3472         unsigned        bad_bits, ugly_bits;
3473         int             status;
3474
3475         /*
3476          * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3477          * are set at the same time.
3478          */
3479         if ((hweight_long(spi->mode &
3480                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3481             (hweight_long(spi->mode &
3482                 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3483                 dev_err(&spi->dev,
3484                 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3485                 return -EINVAL;
3486         }
3487         /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3488         if ((spi->mode & SPI_3WIRE) && (spi->mode &
3489                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3490                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3491                 return -EINVAL;
3492         /*
3493          * Help drivers fail *cleanly* when they need options
3494          * that aren't supported with their current controller.
3495          * SPI_CS_WORD has a fallback software implementation,
3496          * so it is ignored here.
3497          */
3498         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3499                                  SPI_NO_TX | SPI_NO_RX);
3500         ugly_bits = bad_bits &
3501                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3502                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3503         if (ugly_bits) {
3504                 dev_warn(&spi->dev,
3505                          "setup: ignoring unsupported mode bits %x\n",
3506                          ugly_bits);
3507                 spi->mode &= ~ugly_bits;
3508                 bad_bits &= ~ugly_bits;
3509         }
3510         if (bad_bits) {
3511                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3512                         bad_bits);
3513                 return -EINVAL;
3514         }
3515
3516         if (!spi->bits_per_word)
3517                 spi->bits_per_word = 8;
3518
3519         status = __spi_validate_bits_per_word(spi->controller,
3520                                               spi->bits_per_word);
3521         if (status)
3522                 return status;
3523
3524         if (spi->controller->max_speed_hz &&
3525             (!spi->max_speed_hz ||
3526              spi->max_speed_hz > spi->controller->max_speed_hz))
3527                 spi->max_speed_hz = spi->controller->max_speed_hz;
3528
3529         mutex_lock(&spi->controller->io_mutex);
3530
3531         if (spi->controller->setup) {
3532                 status = spi->controller->setup(spi);
3533                 if (status) {
3534                         mutex_unlock(&spi->controller->io_mutex);
3535                         dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3536                                 status);
3537                         return status;
3538                 }
3539         }
3540
3541         if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3542                 status = pm_runtime_get_sync(spi->controller->dev.parent);
3543                 if (status < 0) {
3544                         mutex_unlock(&spi->controller->io_mutex);
3545                         pm_runtime_put_noidle(spi->controller->dev.parent);
3546                         dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3547                                 status);
3548                         return status;
3549                 }
3550
3551                 /*
3552                  * We do not want to return positive value from pm_runtime_get,
3553                  * there are many instances of devices calling spi_setup() and
3554                  * checking for a non-zero return value instead of a negative
3555                  * return value.
3556                  */
3557                 status = 0;
3558
3559                 spi_set_cs(spi, false, true);
3560                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3561                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3562         } else {
3563                 spi_set_cs(spi, false, true);
3564         }
3565
3566         mutex_unlock(&spi->controller->io_mutex);
3567
3568         if (spi->rt && !spi->controller->rt) {
3569                 spi->controller->rt = true;
3570                 spi_set_thread_rt(spi->controller);
3571         }
3572
3573         trace_spi_setup(spi, status);
3574
3575         dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3576                         spi->mode & SPI_MODE_X_MASK,
3577                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3578                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3579                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3580                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
3581                         spi->bits_per_word, spi->max_speed_hz,
3582                         status);
3583
3584         return status;
3585 }
3586 EXPORT_SYMBOL_GPL(spi_setup);
3587
3588 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3589                                        struct spi_device *spi)
3590 {
3591         int delay1, delay2;
3592
3593         delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3594         if (delay1 < 0)
3595                 return delay1;
3596
3597         delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3598         if (delay2 < 0)
3599                 return delay2;
3600
3601         if (delay1 < delay2)
3602                 memcpy(&xfer->word_delay, &spi->word_delay,
3603                        sizeof(xfer->word_delay));
3604
3605         return 0;
3606 }
3607
3608 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3609 {
3610         struct spi_controller *ctlr = spi->controller;
3611         struct spi_transfer *xfer;
3612         int w_size;
3613
3614         if (list_empty(&message->transfers))
3615                 return -EINVAL;
3616
3617         /*
3618          * If an SPI controller does not support toggling the CS line on each
3619          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3620          * for the CS line, we can emulate the CS-per-word hardware function by
3621          * splitting transfers into one-word transfers and ensuring that
3622          * cs_change is set for each transfer.
3623          */
3624         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3625                                           spi->cs_gpiod)) {
3626                 size_t maxsize;
3627                 int ret;
3628
3629                 maxsize = (spi->bits_per_word + 7) / 8;
3630
3631                 /* spi_split_transfers_maxsize() requires message->spi */
3632                 message->spi = spi;
3633
3634                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3635                                                   GFP_KERNEL);
3636                 if (ret)
3637                         return ret;
3638
3639                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3640                         /* don't change cs_change on the last entry in the list */
3641                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3642                                 break;
3643                         xfer->cs_change = 1;
3644                 }
3645         }
3646
3647         /*
3648          * Half-duplex links include original MicroWire, and ones with
3649          * only one data pin like SPI_3WIRE (switches direction) or where
3650          * either MOSI or MISO is missing.  They can also be caused by
3651          * software limitations.
3652          */
3653         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3654             (spi->mode & SPI_3WIRE)) {
3655                 unsigned flags = ctlr->flags;
3656
3657                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3658                         if (xfer->rx_buf && xfer->tx_buf)
3659                                 return -EINVAL;
3660                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3661                                 return -EINVAL;
3662                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3663                                 return -EINVAL;
3664                 }
3665         }
3666
3667         /*
3668          * Set transfer bits_per_word and max speed as spi device default if
3669          * it is not set for this transfer.
3670          * Set transfer tx_nbits and rx_nbits as single transfer default
3671          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3672          * Ensure transfer word_delay is at least as long as that required by
3673          * device itself.
3674          */
3675         message->frame_length = 0;
3676         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3677                 xfer->effective_speed_hz = 0;
3678                 message->frame_length += xfer->len;
3679                 if (!xfer->bits_per_word)
3680                         xfer->bits_per_word = spi->bits_per_word;
3681
3682                 if (!xfer->speed_hz)
3683                         xfer->speed_hz = spi->max_speed_hz;
3684
3685                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3686                         xfer->speed_hz = ctlr->max_speed_hz;
3687
3688                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3689                         return -EINVAL;
3690
3691                 /*
3692                  * SPI transfer length should be multiple of SPI word size
3693                  * where SPI word size should be power-of-two multiple.
3694                  */
3695                 if (xfer->bits_per_word <= 8)
3696                         w_size = 1;
3697                 else if (xfer->bits_per_word <= 16)
3698                         w_size = 2;
3699                 else
3700                         w_size = 4;
3701
3702                 /* No partial transfers accepted */
3703                 if (xfer->len % w_size)
3704                         return -EINVAL;
3705
3706                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3707                     xfer->speed_hz < ctlr->min_speed_hz)
3708                         return -EINVAL;
3709
3710                 if (xfer->tx_buf && !xfer->tx_nbits)
3711                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3712                 if (xfer->rx_buf && !xfer->rx_nbits)
3713                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3714                 /*
3715                  * Check transfer tx/rx_nbits:
3716                  * 1. check the value matches one of single, dual and quad
3717                  * 2. check tx/rx_nbits match the mode in spi_device
3718                  */
3719                 if (xfer->tx_buf) {
3720                         if (spi->mode & SPI_NO_TX)
3721                                 return -EINVAL;
3722                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3723                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3724                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3725                                 return -EINVAL;
3726                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3727                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3728                                 return -EINVAL;
3729                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3730                                 !(spi->mode & SPI_TX_QUAD))
3731                                 return -EINVAL;
3732                 }
3733                 /* check transfer rx_nbits */
3734                 if (xfer->rx_buf) {
3735                         if (spi->mode & SPI_NO_RX)
3736                                 return -EINVAL;
3737                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3738                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3739                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3740                                 return -EINVAL;
3741                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3742                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3743                                 return -EINVAL;
3744                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3745                                 !(spi->mode & SPI_RX_QUAD))
3746                                 return -EINVAL;
3747                 }
3748
3749                 if (_spi_xfer_word_delay_update(xfer, spi))
3750                         return -EINVAL;
3751         }
3752
3753         message->status = -EINPROGRESS;
3754
3755         return 0;
3756 }
3757
3758 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3759 {
3760         struct spi_controller *ctlr = spi->controller;
3761         struct spi_transfer *xfer;
3762
3763         /*
3764          * Some controllers do not support doing regular SPI transfers. Return
3765          * ENOTSUPP when this is the case.
3766          */
3767         if (!ctlr->transfer)
3768                 return -ENOTSUPP;
3769
3770         message->spi = spi;
3771
3772         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3773         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3774
3775         trace_spi_message_submit(message);
3776
3777         if (!ctlr->ptp_sts_supported) {
3778                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3779                         xfer->ptp_sts_word_pre = 0;
3780                         ptp_read_system_prets(xfer->ptp_sts);
3781                 }
3782         }
3783
3784         return ctlr->transfer(spi, message);
3785 }
3786
3787 /**
3788  * spi_async - asynchronous SPI transfer
3789  * @spi: device with which data will be exchanged
3790  * @message: describes the data transfers, including completion callback
3791  * Context: any (irqs may be blocked, etc)
3792  *
3793  * This call may be used in_irq and other contexts which can't sleep,
3794  * as well as from task contexts which can sleep.
3795  *
3796  * The completion callback is invoked in a context which can't sleep.
3797  * Before that invocation, the value of message->status is undefined.
3798  * When the callback is issued, message->status holds either zero (to
3799  * indicate complete success) or a negative error code.  After that
3800  * callback returns, the driver which issued the transfer request may
3801  * deallocate the associated memory; it's no longer in use by any SPI
3802  * core or controller driver code.
3803  *
3804  * Note that although all messages to a spi_device are handled in
3805  * FIFO order, messages may go to different devices in other orders.
3806  * Some device might be higher priority, or have various "hard" access
3807  * time requirements, for example.
3808  *
3809  * On detection of any fault during the transfer, processing of
3810  * the entire message is aborted, and the device is deselected.
3811  * Until returning from the associated message completion callback,
3812  * no other spi_message queued to that device will be processed.
3813  * (This rule applies equally to all the synchronous transfer calls,
3814  * which are wrappers around this core asynchronous primitive.)
3815  *
3816  * Return: zero on success, else a negative error code.
3817  */
3818 int spi_async(struct spi_device *spi, struct spi_message *message)
3819 {
3820         struct spi_controller *ctlr = spi->controller;
3821         int ret;
3822         unsigned long flags;
3823
3824         ret = __spi_validate(spi, message);
3825         if (ret != 0)
3826                 return ret;
3827
3828         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3829
3830         if (ctlr->bus_lock_flag)
3831                 ret = -EBUSY;
3832         else
3833                 ret = __spi_async(spi, message);
3834
3835         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3836
3837         return ret;
3838 }
3839 EXPORT_SYMBOL_GPL(spi_async);
3840
3841 /**
3842  * spi_async_locked - version of spi_async with exclusive bus usage
3843  * @spi: device with which data will be exchanged
3844  * @message: describes the data transfers, including completion callback
3845  * Context: any (irqs may be blocked, etc)
3846  *
3847  * This call may be used in_irq and other contexts which can't sleep,
3848  * as well as from task contexts which can sleep.
3849  *
3850  * The completion callback is invoked in a context which can't sleep.
3851  * Before that invocation, the value of message->status is undefined.
3852  * When the callback is issued, message->status holds either zero (to
3853  * indicate complete success) or a negative error code.  After that
3854  * callback returns, the driver which issued the transfer request may
3855  * deallocate the associated memory; it's no longer in use by any SPI
3856  * core or controller driver code.
3857  *
3858  * Note that although all messages to a spi_device are handled in
3859  * FIFO order, messages may go to different devices in other orders.
3860  * Some device might be higher priority, or have various "hard" access
3861  * time requirements, for example.
3862  *
3863  * On detection of any fault during the transfer, processing of
3864  * the entire message is aborted, and the device is deselected.
3865  * Until returning from the associated message completion callback,
3866  * no other spi_message queued to that device will be processed.
3867  * (This rule applies equally to all the synchronous transfer calls,
3868  * which are wrappers around this core asynchronous primitive.)
3869  *
3870  * Return: zero on success, else a negative error code.
3871  */
3872 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3873 {
3874         struct spi_controller *ctlr = spi->controller;
3875         int ret;
3876         unsigned long flags;
3877
3878         ret = __spi_validate(spi, message);
3879         if (ret != 0)
3880                 return ret;
3881
3882         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3883
3884         ret = __spi_async(spi, message);
3885
3886         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3887
3888         return ret;
3889
3890 }
3891
3892 /*-------------------------------------------------------------------------*/
3893
3894 /*
3895  * Utility methods for SPI protocol drivers, layered on
3896  * top of the core.  Some other utility methods are defined as
3897  * inline functions.
3898  */
3899
3900 static void spi_complete(void *arg)
3901 {
3902         complete(arg);
3903 }
3904
3905 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3906 {
3907         DECLARE_COMPLETION_ONSTACK(done);
3908         int status;
3909         struct spi_controller *ctlr = spi->controller;
3910         unsigned long flags;
3911
3912         status = __spi_validate(spi, message);
3913         if (status != 0)
3914                 return status;
3915
3916         message->complete = spi_complete;
3917         message->context = &done;
3918         message->spi = spi;
3919
3920         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3921         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3922
3923         /*
3924          * If we're not using the legacy transfer method then we will
3925          * try to transfer in the calling context so special case.
3926          * This code would be less tricky if we could remove the
3927          * support for driver implemented message queues.
3928          */
3929         if (ctlr->transfer == spi_queued_transfer) {
3930                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3931
3932                 trace_spi_message_submit(message);
3933
3934                 status = __spi_queued_transfer(spi, message, false);
3935
3936                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3937         } else {
3938                 status = spi_async_locked(spi, message);
3939         }
3940
3941         if (status == 0) {
3942                 /* Push out the messages in the calling context if we can */
3943                 if (ctlr->transfer == spi_queued_transfer) {
3944                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3945                                                        spi_sync_immediate);
3946                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3947                                                        spi_sync_immediate);
3948                         __spi_pump_messages(ctlr, false);
3949                 }
3950
3951                 wait_for_completion(&done);
3952                 status = message->status;
3953         }
3954         message->context = NULL;
3955         return status;
3956 }
3957
3958 /**
3959  * spi_sync - blocking/synchronous SPI data transfers
3960  * @spi: device with which data will be exchanged
3961  * @message: describes the data transfers
3962  * Context: can sleep
3963  *
3964  * This call may only be used from a context that may sleep.  The sleep
3965  * is non-interruptible, and has no timeout.  Low-overhead controller
3966  * drivers may DMA directly into and out of the message buffers.
3967  *
3968  * Note that the SPI device's chip select is active during the message,
3969  * and then is normally disabled between messages.  Drivers for some
3970  * frequently-used devices may want to minimize costs of selecting a chip,
3971  * by leaving it selected in anticipation that the next message will go
3972  * to the same chip.  (That may increase power usage.)
3973  *
3974  * Also, the caller is guaranteeing that the memory associated with the
3975  * message will not be freed before this call returns.
3976  *
3977  * Return: zero on success, else a negative error code.
3978  */
3979 int spi_sync(struct spi_device *spi, struct spi_message *message)
3980 {
3981         int ret;
3982
3983         mutex_lock(&spi->controller->bus_lock_mutex);
3984         ret = __spi_sync(spi, message);
3985         mutex_unlock(&spi->controller->bus_lock_mutex);
3986
3987         return ret;
3988 }
3989 EXPORT_SYMBOL_GPL(spi_sync);
3990
3991 /**
3992  * spi_sync_locked - version of spi_sync with exclusive bus usage
3993  * @spi: device with which data will be exchanged
3994  * @message: describes the data transfers
3995  * Context: can sleep
3996  *
3997  * This call may only be used from a context that may sleep.  The sleep
3998  * is non-interruptible, and has no timeout.  Low-overhead controller
3999  * drivers may DMA directly into and out of the message buffers.
4000  *
4001  * This call should be used by drivers that require exclusive access to the
4002  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4003  * be released by a spi_bus_unlock call when the exclusive access is over.
4004  *
4005  * Return: zero on success, else a negative error code.
4006  */
4007 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4008 {
4009         return __spi_sync(spi, message);
4010 }
4011 EXPORT_SYMBOL_GPL(spi_sync_locked);
4012
4013 /**
4014  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4015  * @ctlr: SPI bus master that should be locked for exclusive bus access
4016  * Context: can sleep
4017  *
4018  * This call may only be used from a context that may sleep.  The sleep
4019  * is non-interruptible, and has no timeout.
4020  *
4021  * This call should be used by drivers that require exclusive access to the
4022  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4023  * exclusive access is over. Data transfer must be done by spi_sync_locked
4024  * and spi_async_locked calls when the SPI bus lock is held.
4025  *
4026  * Return: always zero.
4027  */
4028 int spi_bus_lock(struct spi_controller *ctlr)
4029 {
4030         unsigned long flags;
4031
4032         mutex_lock(&ctlr->bus_lock_mutex);
4033
4034         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4035         ctlr->bus_lock_flag = 1;
4036         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4037
4038         /* mutex remains locked until spi_bus_unlock is called */
4039
4040         return 0;
4041 }
4042 EXPORT_SYMBOL_GPL(spi_bus_lock);
4043
4044 /**
4045  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4046  * @ctlr: SPI bus master that was locked for exclusive bus access
4047  * Context: can sleep
4048  *
4049  * This call may only be used from a context that may sleep.  The sleep
4050  * is non-interruptible, and has no timeout.
4051  *
4052  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4053  * call.
4054  *
4055  * Return: always zero.
4056  */
4057 int spi_bus_unlock(struct spi_controller *ctlr)
4058 {
4059         ctlr->bus_lock_flag = 0;
4060
4061         mutex_unlock(&ctlr->bus_lock_mutex);
4062
4063         return 0;
4064 }
4065 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4066
4067 /* portable code must never pass more than 32 bytes */
4068 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
4069
4070 static u8       *buf;
4071
4072 /**
4073  * spi_write_then_read - SPI synchronous write followed by read
4074  * @spi: device with which data will be exchanged
4075  * @txbuf: data to be written (need not be dma-safe)
4076  * @n_tx: size of txbuf, in bytes
4077  * @rxbuf: buffer into which data will be read (need not be dma-safe)
4078  * @n_rx: size of rxbuf, in bytes
4079  * Context: can sleep
4080  *
4081  * This performs a half duplex MicroWire style transaction with the
4082  * device, sending txbuf and then reading rxbuf.  The return value
4083  * is zero for success, else a negative errno status code.
4084  * This call may only be used from a context that may sleep.
4085  *
4086  * Parameters to this routine are always copied using a small buffer.
4087  * Performance-sensitive or bulk transfer code should instead use
4088  * spi_{async,sync}() calls with dma-safe buffers.
4089  *
4090  * Return: zero on success, else a negative error code.
4091  */
4092 int spi_write_then_read(struct spi_device *spi,
4093                 const void *txbuf, unsigned n_tx,
4094                 void *rxbuf, unsigned n_rx)
4095 {
4096         static DEFINE_MUTEX(lock);
4097
4098         int                     status;
4099         struct spi_message      message;
4100         struct spi_transfer     x[2];
4101         u8                      *local_buf;
4102
4103         /*
4104          * Use preallocated DMA-safe buffer if we can. We can't avoid
4105          * copying here, (as a pure convenience thing), but we can
4106          * keep heap costs out of the hot path unless someone else is
4107          * using the pre-allocated buffer or the transfer is too large.
4108          */
4109         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4110                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4111                                     GFP_KERNEL | GFP_DMA);
4112                 if (!local_buf)
4113                         return -ENOMEM;
4114         } else {
4115                 local_buf = buf;
4116         }
4117
4118         spi_message_init(&message);
4119         memset(x, 0, sizeof(x));
4120         if (n_tx) {
4121                 x[0].len = n_tx;
4122                 spi_message_add_tail(&x[0], &message);
4123         }
4124         if (n_rx) {
4125                 x[1].len = n_rx;
4126                 spi_message_add_tail(&x[1], &message);
4127         }
4128
4129         memcpy(local_buf, txbuf, n_tx);
4130         x[0].tx_buf = local_buf;
4131         x[1].rx_buf = local_buf + n_tx;
4132
4133         /* do the i/o */
4134         status = spi_sync(spi, &message);
4135         if (status == 0)
4136                 memcpy(rxbuf, x[1].rx_buf, n_rx);
4137
4138         if (x[0].tx_buf == buf)
4139                 mutex_unlock(&lock);
4140         else
4141                 kfree(local_buf);
4142
4143         return status;
4144 }
4145 EXPORT_SYMBOL_GPL(spi_write_then_read);
4146
4147 /*-------------------------------------------------------------------------*/
4148
4149 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4150 /* must call put_device() when done with returned spi_device device */
4151 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4152 {
4153         struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4154
4155         return dev ? to_spi_device(dev) : NULL;
4156 }
4157
4158 /* the spi controllers are not using spi_bus, so we find it with another way */
4159 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4160 {
4161         struct device *dev;
4162
4163         dev = class_find_device_by_of_node(&spi_master_class, node);
4164         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4165                 dev = class_find_device_by_of_node(&spi_slave_class, node);
4166         if (!dev)
4167                 return NULL;
4168
4169         /* reference got in class_find_device */
4170         return container_of(dev, struct spi_controller, dev);
4171 }
4172
4173 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4174                          void *arg)
4175 {
4176         struct of_reconfig_data *rd = arg;
4177         struct spi_controller *ctlr;
4178         struct spi_device *spi;
4179
4180         switch (of_reconfig_get_state_change(action, arg)) {
4181         case OF_RECONFIG_CHANGE_ADD:
4182                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4183                 if (ctlr == NULL)
4184                         return NOTIFY_OK;       /* not for us */
4185
4186                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4187                         put_device(&ctlr->dev);
4188                         return NOTIFY_OK;
4189                 }
4190
4191                 spi = of_register_spi_device(ctlr, rd->dn);
4192                 put_device(&ctlr->dev);
4193
4194                 if (IS_ERR(spi)) {
4195                         pr_err("%s: failed to create for '%pOF'\n",
4196                                         __func__, rd->dn);
4197                         of_node_clear_flag(rd->dn, OF_POPULATED);
4198                         return notifier_from_errno(PTR_ERR(spi));
4199                 }
4200                 break;
4201
4202         case OF_RECONFIG_CHANGE_REMOVE:
4203                 /* already depopulated? */
4204                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4205                         return NOTIFY_OK;
4206
4207                 /* find our device by node */
4208                 spi = of_find_spi_device_by_node(rd->dn);
4209                 if (spi == NULL)
4210                         return NOTIFY_OK;       /* no? not meant for us */
4211
4212                 /* unregister takes one ref away */
4213                 spi_unregister_device(spi);
4214
4215                 /* and put the reference of the find */
4216                 put_device(&spi->dev);
4217                 break;
4218         }
4219
4220         return NOTIFY_OK;
4221 }
4222
4223 static struct notifier_block spi_of_notifier = {
4224         .notifier_call = of_spi_notify,
4225 };
4226 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4227 extern struct notifier_block spi_of_notifier;
4228 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4229
4230 #if IS_ENABLED(CONFIG_ACPI)
4231 static int spi_acpi_controller_match(struct device *dev, const void *data)
4232 {
4233         return ACPI_COMPANION(dev->parent) == data;
4234 }
4235
4236 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4237 {
4238         struct device *dev;
4239
4240         dev = class_find_device(&spi_master_class, NULL, adev,
4241                                 spi_acpi_controller_match);
4242         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4243                 dev = class_find_device(&spi_slave_class, NULL, adev,
4244                                         spi_acpi_controller_match);
4245         if (!dev)
4246                 return NULL;
4247
4248         return container_of(dev, struct spi_controller, dev);
4249 }
4250
4251 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4252 {
4253         struct device *dev;
4254
4255         dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4256         return to_spi_device(dev);
4257 }
4258
4259 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4260                            void *arg)
4261 {
4262         struct acpi_device *adev = arg;
4263         struct spi_controller *ctlr;
4264         struct spi_device *spi;
4265
4266         switch (value) {
4267         case ACPI_RECONFIG_DEVICE_ADD:
4268                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4269                 if (!ctlr)
4270                         break;
4271
4272                 acpi_register_spi_device(ctlr, adev);
4273                 put_device(&ctlr->dev);
4274                 break;
4275         case ACPI_RECONFIG_DEVICE_REMOVE:
4276                 if (!acpi_device_enumerated(adev))
4277                         break;
4278
4279                 spi = acpi_spi_find_device_by_adev(adev);
4280                 if (!spi)
4281                         break;
4282
4283                 spi_unregister_device(spi);
4284                 put_device(&spi->dev);
4285                 break;
4286         }
4287
4288         return NOTIFY_OK;
4289 }
4290
4291 static struct notifier_block spi_acpi_notifier = {
4292         .notifier_call = acpi_spi_notify,
4293 };
4294 #else
4295 extern struct notifier_block spi_acpi_notifier;
4296 #endif
4297
4298 static int __init spi_init(void)
4299 {
4300         int     status;
4301
4302         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4303         if (!buf) {
4304                 status = -ENOMEM;
4305                 goto err0;
4306         }
4307
4308         status = bus_register(&spi_bus_type);
4309         if (status < 0)
4310                 goto err1;
4311
4312         status = class_register(&spi_master_class);
4313         if (status < 0)
4314                 goto err2;
4315
4316         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4317                 status = class_register(&spi_slave_class);
4318                 if (status < 0)
4319                         goto err3;
4320         }
4321
4322         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4323                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4324         if (IS_ENABLED(CONFIG_ACPI))
4325                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4326
4327         return 0;
4328
4329 err3:
4330         class_unregister(&spi_master_class);
4331 err2:
4332         bus_unregister(&spi_bus_type);
4333 err1:
4334         kfree(buf);
4335         buf = NULL;
4336 err0:
4337         return status;
4338 }
4339
4340 /*
4341  * A board_info is normally registered in arch_initcall(),
4342  * but even essential drivers wait till later.
4343  *
4344  * REVISIT only boardinfo really needs static linking. The rest (device and
4345  * driver registration) _could_ be dynamically linked (modular) ... Costs
4346  * include needing to have boardinfo data structures be much more public.
4347  */
4348 postcore_initcall(spi_init);