Merge tag 'acpi-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[sfrench/cifs-2.6.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = to_spi_device(dev);                    \
88         return spi_statistics_##field##_show(&spi->statistics, buf);    \
89 }                                                                       \
90 static struct device_attribute dev_attr_spi_device_##field = {          \
91         .attr = { .name = file, .mode = S_IRUGO },                      \
92         .show = spi_device_##field##_show,                              \
93 }
94
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
97                                             char *buf)                  \
98 {                                                                       \
99         unsigned long flags;                                            \
100         ssize_t len;                                                    \
101         spin_lock_irqsave(&stat->lock, flags);                          \
102         len = sprintf(buf, format_string, stat->field);                 \
103         spin_unlock_irqrestore(&stat->lock, flags);                     \
104         return len;                                                     \
105 }                                                                       \
106 SPI_STATISTICS_ATTRS(name, file)
107
108 #define SPI_STATISTICS_SHOW(field, format_string)                       \
109         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
110                                  field, format_string)
111
112 SPI_STATISTICS_SHOW(messages, "%lu");
113 SPI_STATISTICS_SHOW(transfers, "%lu");
114 SPI_STATISTICS_SHOW(errors, "%lu");
115 SPI_STATISTICS_SHOW(timedout, "%lu");
116
117 SPI_STATISTICS_SHOW(spi_sync, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
119 SPI_STATISTICS_SHOW(spi_async, "%lu");
120
121 SPI_STATISTICS_SHOW(bytes, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
124
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
126         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
127                                  "transfer_bytes_histo_" number,        \
128                                  transfer_bytes_histo[index],  "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
146
147 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
148
149 static struct attribute *spi_dev_attrs[] = {
150         &dev_attr_modalias.attr,
151         NULL,
152 };
153
154 static const struct attribute_group spi_dev_group = {
155         .attrs  = spi_dev_attrs,
156 };
157
158 static struct attribute *spi_device_statistics_attrs[] = {
159         &dev_attr_spi_device_messages.attr,
160         &dev_attr_spi_device_transfers.attr,
161         &dev_attr_spi_device_errors.attr,
162         &dev_attr_spi_device_timedout.attr,
163         &dev_attr_spi_device_spi_sync.attr,
164         &dev_attr_spi_device_spi_sync_immediate.attr,
165         &dev_attr_spi_device_spi_async.attr,
166         &dev_attr_spi_device_bytes.attr,
167         &dev_attr_spi_device_bytes_rx.attr,
168         &dev_attr_spi_device_bytes_tx.attr,
169         &dev_attr_spi_device_transfer_bytes_histo0.attr,
170         &dev_attr_spi_device_transfer_bytes_histo1.attr,
171         &dev_attr_spi_device_transfer_bytes_histo2.attr,
172         &dev_attr_spi_device_transfer_bytes_histo3.attr,
173         &dev_attr_spi_device_transfer_bytes_histo4.attr,
174         &dev_attr_spi_device_transfer_bytes_histo5.attr,
175         &dev_attr_spi_device_transfer_bytes_histo6.attr,
176         &dev_attr_spi_device_transfer_bytes_histo7.attr,
177         &dev_attr_spi_device_transfer_bytes_histo8.attr,
178         &dev_attr_spi_device_transfer_bytes_histo9.attr,
179         &dev_attr_spi_device_transfer_bytes_histo10.attr,
180         &dev_attr_spi_device_transfer_bytes_histo11.attr,
181         &dev_attr_spi_device_transfer_bytes_histo12.attr,
182         &dev_attr_spi_device_transfer_bytes_histo13.attr,
183         &dev_attr_spi_device_transfer_bytes_histo14.attr,
184         &dev_attr_spi_device_transfer_bytes_histo15.attr,
185         &dev_attr_spi_device_transfer_bytes_histo16.attr,
186         &dev_attr_spi_device_transfers_split_maxsize.attr,
187         NULL,
188 };
189
190 static const struct attribute_group spi_device_statistics_group = {
191         .name  = "statistics",
192         .attrs  = spi_device_statistics_attrs,
193 };
194
195 static const struct attribute_group *spi_dev_groups[] = {
196         &spi_dev_group,
197         &spi_device_statistics_group,
198         NULL,
199 };
200
201 static struct attribute *spi_master_statistics_attrs[] = {
202         &dev_attr_spi_master_messages.attr,
203         &dev_attr_spi_master_transfers.attr,
204         &dev_attr_spi_master_errors.attr,
205         &dev_attr_spi_master_timedout.attr,
206         &dev_attr_spi_master_spi_sync.attr,
207         &dev_attr_spi_master_spi_sync_immediate.attr,
208         &dev_attr_spi_master_spi_async.attr,
209         &dev_attr_spi_master_bytes.attr,
210         &dev_attr_spi_master_bytes_rx.attr,
211         &dev_attr_spi_master_bytes_tx.attr,
212         &dev_attr_spi_master_transfer_bytes_histo0.attr,
213         &dev_attr_spi_master_transfer_bytes_histo1.attr,
214         &dev_attr_spi_master_transfer_bytes_histo2.attr,
215         &dev_attr_spi_master_transfer_bytes_histo3.attr,
216         &dev_attr_spi_master_transfer_bytes_histo4.attr,
217         &dev_attr_spi_master_transfer_bytes_histo5.attr,
218         &dev_attr_spi_master_transfer_bytes_histo6.attr,
219         &dev_attr_spi_master_transfer_bytes_histo7.attr,
220         &dev_attr_spi_master_transfer_bytes_histo8.attr,
221         &dev_attr_spi_master_transfer_bytes_histo9.attr,
222         &dev_attr_spi_master_transfer_bytes_histo10.attr,
223         &dev_attr_spi_master_transfer_bytes_histo11.attr,
224         &dev_attr_spi_master_transfer_bytes_histo12.attr,
225         &dev_attr_spi_master_transfer_bytes_histo13.attr,
226         &dev_attr_spi_master_transfer_bytes_histo14.attr,
227         &dev_attr_spi_master_transfer_bytes_histo15.attr,
228         &dev_attr_spi_master_transfer_bytes_histo16.attr,
229         &dev_attr_spi_master_transfers_split_maxsize.attr,
230         NULL,
231 };
232
233 static const struct attribute_group spi_master_statistics_group = {
234         .name  = "statistics",
235         .attrs  = spi_master_statistics_attrs,
236 };
237
238 static const struct attribute_group *spi_master_groups[] = {
239         &spi_master_statistics_group,
240         NULL,
241 };
242
243 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
244                                        struct spi_transfer *xfer,
245                                        struct spi_master *master)
246 {
247         unsigned long flags;
248         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
249
250         if (l2len < 0)
251                 l2len = 0;
252
253         spin_lock_irqsave(&stats->lock, flags);
254
255         stats->transfers++;
256         stats->transfer_bytes_histo[l2len]++;
257
258         stats->bytes += xfer->len;
259         if ((xfer->tx_buf) &&
260             (xfer->tx_buf != master->dummy_tx))
261                 stats->bytes_tx += xfer->len;
262         if ((xfer->rx_buf) &&
263             (xfer->rx_buf != master->dummy_rx))
264                 stats->bytes_rx += xfer->len;
265
266         spin_unlock_irqrestore(&stats->lock, flags);
267 }
268 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
269
270 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
271  * and the sysfs version makes coldplug work too.
272  */
273
274 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
275                                                 const struct spi_device *sdev)
276 {
277         while (id->name[0]) {
278                 if (!strcmp(sdev->modalias, id->name))
279                         return id;
280                 id++;
281         }
282         return NULL;
283 }
284
285 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
286 {
287         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
288
289         return spi_match_id(sdrv->id_table, sdev);
290 }
291 EXPORT_SYMBOL_GPL(spi_get_device_id);
292
293 static int spi_match_device(struct device *dev, struct device_driver *drv)
294 {
295         const struct spi_device *spi = to_spi_device(dev);
296         const struct spi_driver *sdrv = to_spi_driver(drv);
297
298         /* Attempt an OF style match */
299         if (of_driver_match_device(dev, drv))
300                 return 1;
301
302         /* Then try ACPI */
303         if (acpi_driver_match_device(dev, drv))
304                 return 1;
305
306         if (sdrv->id_table)
307                 return !!spi_match_id(sdrv->id_table, spi);
308
309         return strcmp(spi->modalias, drv->name) == 0;
310 }
311
312 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
313 {
314         const struct spi_device         *spi = to_spi_device(dev);
315         int rc;
316
317         rc = acpi_device_uevent_modalias(dev, env);
318         if (rc != -ENODEV)
319                 return rc;
320
321         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
322         return 0;
323 }
324
325 struct bus_type spi_bus_type = {
326         .name           = "spi",
327         .dev_groups     = spi_dev_groups,
328         .match          = spi_match_device,
329         .uevent         = spi_uevent,
330 };
331 EXPORT_SYMBOL_GPL(spi_bus_type);
332
333
334 static int spi_drv_probe(struct device *dev)
335 {
336         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
337         struct spi_device               *spi = to_spi_device(dev);
338         int ret;
339
340         ret = of_clk_set_defaults(dev->of_node, false);
341         if (ret)
342                 return ret;
343
344         if (dev->of_node) {
345                 spi->irq = of_irq_get(dev->of_node, 0);
346                 if (spi->irq == -EPROBE_DEFER)
347                         return -EPROBE_DEFER;
348                 if (spi->irq < 0)
349                         spi->irq = 0;
350         }
351
352         ret = dev_pm_domain_attach(dev, true);
353         if (ret != -EPROBE_DEFER) {
354                 ret = sdrv->probe(spi);
355                 if (ret)
356                         dev_pm_domain_detach(dev, true);
357         }
358
359         return ret;
360 }
361
362 static int spi_drv_remove(struct device *dev)
363 {
364         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
365         int ret;
366
367         ret = sdrv->remove(to_spi_device(dev));
368         dev_pm_domain_detach(dev, true);
369
370         return ret;
371 }
372
373 static void spi_drv_shutdown(struct device *dev)
374 {
375         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
376
377         sdrv->shutdown(to_spi_device(dev));
378 }
379
380 /**
381  * __spi_register_driver - register a SPI driver
382  * @owner: owner module of the driver to register
383  * @sdrv: the driver to register
384  * Context: can sleep
385  *
386  * Return: zero on success, else a negative error code.
387  */
388 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
389 {
390         sdrv->driver.owner = owner;
391         sdrv->driver.bus = &spi_bus_type;
392         if (sdrv->probe)
393                 sdrv->driver.probe = spi_drv_probe;
394         if (sdrv->remove)
395                 sdrv->driver.remove = spi_drv_remove;
396         if (sdrv->shutdown)
397                 sdrv->driver.shutdown = spi_drv_shutdown;
398         return driver_register(&sdrv->driver);
399 }
400 EXPORT_SYMBOL_GPL(__spi_register_driver);
401
402 /*-------------------------------------------------------------------------*/
403
404 /* SPI devices should normally not be created by SPI device drivers; that
405  * would make them board-specific.  Similarly with SPI master drivers.
406  * Device registration normally goes into like arch/.../mach.../board-YYY.c
407  * with other readonly (flashable) information about mainboard devices.
408  */
409
410 struct boardinfo {
411         struct list_head        list;
412         struct spi_board_info   board_info;
413 };
414
415 static LIST_HEAD(board_list);
416 static LIST_HEAD(spi_master_list);
417
418 /*
419  * Used to protect add/del opertion for board_info list and
420  * spi_master list, and their matching process
421  */
422 static DEFINE_MUTEX(board_lock);
423
424 /**
425  * spi_alloc_device - Allocate a new SPI device
426  * @master: Controller to which device is connected
427  * Context: can sleep
428  *
429  * Allows a driver to allocate and initialize a spi_device without
430  * registering it immediately.  This allows a driver to directly
431  * fill the spi_device with device parameters before calling
432  * spi_add_device() on it.
433  *
434  * Caller is responsible to call spi_add_device() on the returned
435  * spi_device structure to add it to the SPI master.  If the caller
436  * needs to discard the spi_device without adding it, then it should
437  * call spi_dev_put() on it.
438  *
439  * Return: a pointer to the new device, or NULL.
440  */
441 struct spi_device *spi_alloc_device(struct spi_master *master)
442 {
443         struct spi_device       *spi;
444
445         if (!spi_master_get(master))
446                 return NULL;
447
448         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
449         if (!spi) {
450                 spi_master_put(master);
451                 return NULL;
452         }
453
454         spi->master = master;
455         spi->dev.parent = &master->dev;
456         spi->dev.bus = &spi_bus_type;
457         spi->dev.release = spidev_release;
458         spi->cs_gpio = -ENOENT;
459
460         spin_lock_init(&spi->statistics.lock);
461
462         device_initialize(&spi->dev);
463         return spi;
464 }
465 EXPORT_SYMBOL_GPL(spi_alloc_device);
466
467 static void spi_dev_set_name(struct spi_device *spi)
468 {
469         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
470
471         if (adev) {
472                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
473                 return;
474         }
475
476         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
477                      spi->chip_select);
478 }
479
480 static int spi_dev_check(struct device *dev, void *data)
481 {
482         struct spi_device *spi = to_spi_device(dev);
483         struct spi_device *new_spi = data;
484
485         if (spi->master == new_spi->master &&
486             spi->chip_select == new_spi->chip_select)
487                 return -EBUSY;
488         return 0;
489 }
490
491 /**
492  * spi_add_device - Add spi_device allocated with spi_alloc_device
493  * @spi: spi_device to register
494  *
495  * Companion function to spi_alloc_device.  Devices allocated with
496  * spi_alloc_device can be added onto the spi bus with this function.
497  *
498  * Return: 0 on success; negative errno on failure
499  */
500 int spi_add_device(struct spi_device *spi)
501 {
502         static DEFINE_MUTEX(spi_add_lock);
503         struct spi_master *master = spi->master;
504         struct device *dev = master->dev.parent;
505         int status;
506
507         /* Chipselects are numbered 0..max; validate. */
508         if (spi->chip_select >= master->num_chipselect) {
509                 dev_err(dev, "cs%d >= max %d\n",
510                         spi->chip_select,
511                         master->num_chipselect);
512                 return -EINVAL;
513         }
514
515         /* Set the bus ID string */
516         spi_dev_set_name(spi);
517
518         /* We need to make sure there's no other device with this
519          * chipselect **BEFORE** we call setup(), else we'll trash
520          * its configuration.  Lock against concurrent add() calls.
521          */
522         mutex_lock(&spi_add_lock);
523
524         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
525         if (status) {
526                 dev_err(dev, "chipselect %d already in use\n",
527                                 spi->chip_select);
528                 goto done;
529         }
530
531         if (master->cs_gpios)
532                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
533
534         /* Drivers may modify this initial i/o setup, but will
535          * normally rely on the device being setup.  Devices
536          * using SPI_CS_HIGH can't coexist well otherwise...
537          */
538         status = spi_setup(spi);
539         if (status < 0) {
540                 dev_err(dev, "can't setup %s, status %d\n",
541                                 dev_name(&spi->dev), status);
542                 goto done;
543         }
544
545         /* Device may be bound to an active driver when this returns */
546         status = device_add(&spi->dev);
547         if (status < 0)
548                 dev_err(dev, "can't add %s, status %d\n",
549                                 dev_name(&spi->dev), status);
550         else
551                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
552
553 done:
554         mutex_unlock(&spi_add_lock);
555         return status;
556 }
557 EXPORT_SYMBOL_GPL(spi_add_device);
558
559 /**
560  * spi_new_device - instantiate one new SPI device
561  * @master: Controller to which device is connected
562  * @chip: Describes the SPI device
563  * Context: can sleep
564  *
565  * On typical mainboards, this is purely internal; and it's not needed
566  * after board init creates the hard-wired devices.  Some development
567  * platforms may not be able to use spi_register_board_info though, and
568  * this is exported so that for example a USB or parport based adapter
569  * driver could add devices (which it would learn about out-of-band).
570  *
571  * Return: the new device, or NULL.
572  */
573 struct spi_device *spi_new_device(struct spi_master *master,
574                                   struct spi_board_info *chip)
575 {
576         struct spi_device       *proxy;
577         int                     status;
578
579         /* NOTE:  caller did any chip->bus_num checks necessary.
580          *
581          * Also, unless we change the return value convention to use
582          * error-or-pointer (not NULL-or-pointer), troubleshootability
583          * suggests syslogged diagnostics are best here (ugh).
584          */
585
586         proxy = spi_alloc_device(master);
587         if (!proxy)
588                 return NULL;
589
590         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
591
592         proxy->chip_select = chip->chip_select;
593         proxy->max_speed_hz = chip->max_speed_hz;
594         proxy->mode = chip->mode;
595         proxy->irq = chip->irq;
596         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
597         proxy->dev.platform_data = (void *) chip->platform_data;
598         proxy->controller_data = chip->controller_data;
599         proxy->controller_state = NULL;
600
601         status = spi_add_device(proxy);
602         if (status < 0) {
603                 spi_dev_put(proxy);
604                 return NULL;
605         }
606
607         return proxy;
608 }
609 EXPORT_SYMBOL_GPL(spi_new_device);
610
611 /**
612  * spi_unregister_device - unregister a single SPI device
613  * @spi: spi_device to unregister
614  *
615  * Start making the passed SPI device vanish. Normally this would be handled
616  * by spi_unregister_master().
617  */
618 void spi_unregister_device(struct spi_device *spi)
619 {
620         if (!spi)
621                 return;
622
623         if (spi->dev.of_node)
624                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
625         if (ACPI_COMPANION(&spi->dev))
626                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
627         device_unregister(&spi->dev);
628 }
629 EXPORT_SYMBOL_GPL(spi_unregister_device);
630
631 static void spi_match_master_to_boardinfo(struct spi_master *master,
632                                 struct spi_board_info *bi)
633 {
634         struct spi_device *dev;
635
636         if (master->bus_num != bi->bus_num)
637                 return;
638
639         dev = spi_new_device(master, bi);
640         if (!dev)
641                 dev_err(master->dev.parent, "can't create new device for %s\n",
642                         bi->modalias);
643 }
644
645 /**
646  * spi_register_board_info - register SPI devices for a given board
647  * @info: array of chip descriptors
648  * @n: how many descriptors are provided
649  * Context: can sleep
650  *
651  * Board-specific early init code calls this (probably during arch_initcall)
652  * with segments of the SPI device table.  Any device nodes are created later,
653  * after the relevant parent SPI controller (bus_num) is defined.  We keep
654  * this table of devices forever, so that reloading a controller driver will
655  * not make Linux forget about these hard-wired devices.
656  *
657  * Other code can also call this, e.g. a particular add-on board might provide
658  * SPI devices through its expansion connector, so code initializing that board
659  * would naturally declare its SPI devices.
660  *
661  * The board info passed can safely be __initdata ... but be careful of
662  * any embedded pointers (platform_data, etc), they're copied as-is.
663  *
664  * Return: zero on success, else a negative error code.
665  */
666 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
667 {
668         struct boardinfo *bi;
669         int i;
670
671         if (!n)
672                 return -EINVAL;
673
674         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
675         if (!bi)
676                 return -ENOMEM;
677
678         for (i = 0; i < n; i++, bi++, info++) {
679                 struct spi_master *master;
680
681                 memcpy(&bi->board_info, info, sizeof(*info));
682                 mutex_lock(&board_lock);
683                 list_add_tail(&bi->list, &board_list);
684                 list_for_each_entry(master, &spi_master_list, list)
685                         spi_match_master_to_boardinfo(master, &bi->board_info);
686                 mutex_unlock(&board_lock);
687         }
688
689         return 0;
690 }
691
692 /*-------------------------------------------------------------------------*/
693
694 static void spi_set_cs(struct spi_device *spi, bool enable)
695 {
696         if (spi->mode & SPI_CS_HIGH)
697                 enable = !enable;
698
699         if (gpio_is_valid(spi->cs_gpio))
700                 gpio_set_value(spi->cs_gpio, !enable);
701         else if (spi->master->set_cs)
702                 spi->master->set_cs(spi, !enable);
703 }
704
705 #ifdef CONFIG_HAS_DMA
706 static int spi_map_buf(struct spi_master *master, struct device *dev,
707                        struct sg_table *sgt, void *buf, size_t len,
708                        enum dma_data_direction dir)
709 {
710         const bool vmalloced_buf = is_vmalloc_addr(buf);
711         unsigned int max_seg_size = dma_get_max_seg_size(dev);
712         int desc_len;
713         int sgs;
714         struct page *vm_page;
715         void *sg_buf;
716         size_t min;
717         int i, ret;
718
719         if (vmalloced_buf) {
720                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
721                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
722         } else if (virt_addr_valid(buf)) {
723                 desc_len = min_t(int, max_seg_size, master->max_dma_len);
724                 sgs = DIV_ROUND_UP(len, desc_len);
725         } else {
726                 return -EINVAL;
727         }
728
729         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
730         if (ret != 0)
731                 return ret;
732
733         for (i = 0; i < sgs; i++) {
734
735                 if (vmalloced_buf) {
736                         min = min_t(size_t,
737                                     len, desc_len - offset_in_page(buf));
738                         vm_page = vmalloc_to_page(buf);
739                         if (!vm_page) {
740                                 sg_free_table(sgt);
741                                 return -ENOMEM;
742                         }
743                         sg_set_page(&sgt->sgl[i], vm_page,
744                                     min, offset_in_page(buf));
745                 } else {
746                         min = min_t(size_t, len, desc_len);
747                         sg_buf = buf;
748                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
749                 }
750
751                 buf += min;
752                 len -= min;
753         }
754
755         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
756         if (!ret)
757                 ret = -ENOMEM;
758         if (ret < 0) {
759                 sg_free_table(sgt);
760                 return ret;
761         }
762
763         sgt->nents = ret;
764
765         return 0;
766 }
767
768 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
769                           struct sg_table *sgt, enum dma_data_direction dir)
770 {
771         if (sgt->orig_nents) {
772                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
773                 sg_free_table(sgt);
774         }
775 }
776
777 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
778 {
779         struct device *tx_dev, *rx_dev;
780         struct spi_transfer *xfer;
781         int ret;
782
783         if (!master->can_dma)
784                 return 0;
785
786         if (master->dma_tx)
787                 tx_dev = master->dma_tx->device->dev;
788         else
789                 tx_dev = &master->dev;
790
791         if (master->dma_rx)
792                 rx_dev = master->dma_rx->device->dev;
793         else
794                 rx_dev = &master->dev;
795
796         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
797                 if (!master->can_dma(master, msg->spi, xfer))
798                         continue;
799
800                 if (xfer->tx_buf != NULL) {
801                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
802                                           (void *)xfer->tx_buf, xfer->len,
803                                           DMA_TO_DEVICE);
804                         if (ret != 0)
805                                 return ret;
806                 }
807
808                 if (xfer->rx_buf != NULL) {
809                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
810                                           xfer->rx_buf, xfer->len,
811                                           DMA_FROM_DEVICE);
812                         if (ret != 0) {
813                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
814                                               DMA_TO_DEVICE);
815                                 return ret;
816                         }
817                 }
818         }
819
820         master->cur_msg_mapped = true;
821
822         return 0;
823 }
824
825 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
826 {
827         struct spi_transfer *xfer;
828         struct device *tx_dev, *rx_dev;
829
830         if (!master->cur_msg_mapped || !master->can_dma)
831                 return 0;
832
833         if (master->dma_tx)
834                 tx_dev = master->dma_tx->device->dev;
835         else
836                 tx_dev = &master->dev;
837
838         if (master->dma_rx)
839                 rx_dev = master->dma_rx->device->dev;
840         else
841                 rx_dev = &master->dev;
842
843         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
844                 if (!master->can_dma(master, msg->spi, xfer))
845                         continue;
846
847                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
848                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
849         }
850
851         return 0;
852 }
853 #else /* !CONFIG_HAS_DMA */
854 static inline int __spi_map_msg(struct spi_master *master,
855                                 struct spi_message *msg)
856 {
857         return 0;
858 }
859
860 static inline int __spi_unmap_msg(struct spi_master *master,
861                                   struct spi_message *msg)
862 {
863         return 0;
864 }
865 #endif /* !CONFIG_HAS_DMA */
866
867 static inline int spi_unmap_msg(struct spi_master *master,
868                                 struct spi_message *msg)
869 {
870         struct spi_transfer *xfer;
871
872         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
873                 /*
874                  * Restore the original value of tx_buf or rx_buf if they are
875                  * NULL.
876                  */
877                 if (xfer->tx_buf == master->dummy_tx)
878                         xfer->tx_buf = NULL;
879                 if (xfer->rx_buf == master->dummy_rx)
880                         xfer->rx_buf = NULL;
881         }
882
883         return __spi_unmap_msg(master, msg);
884 }
885
886 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
887 {
888         struct spi_transfer *xfer;
889         void *tmp;
890         unsigned int max_tx, max_rx;
891
892         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
893                 max_tx = 0;
894                 max_rx = 0;
895
896                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
897                         if ((master->flags & SPI_MASTER_MUST_TX) &&
898                             !xfer->tx_buf)
899                                 max_tx = max(xfer->len, max_tx);
900                         if ((master->flags & SPI_MASTER_MUST_RX) &&
901                             !xfer->rx_buf)
902                                 max_rx = max(xfer->len, max_rx);
903                 }
904
905                 if (max_tx) {
906                         tmp = krealloc(master->dummy_tx, max_tx,
907                                        GFP_KERNEL | GFP_DMA);
908                         if (!tmp)
909                                 return -ENOMEM;
910                         master->dummy_tx = tmp;
911                         memset(tmp, 0, max_tx);
912                 }
913
914                 if (max_rx) {
915                         tmp = krealloc(master->dummy_rx, max_rx,
916                                        GFP_KERNEL | GFP_DMA);
917                         if (!tmp)
918                                 return -ENOMEM;
919                         master->dummy_rx = tmp;
920                 }
921
922                 if (max_tx || max_rx) {
923                         list_for_each_entry(xfer, &msg->transfers,
924                                             transfer_list) {
925                                 if (!xfer->tx_buf)
926                                         xfer->tx_buf = master->dummy_tx;
927                                 if (!xfer->rx_buf)
928                                         xfer->rx_buf = master->dummy_rx;
929                         }
930                 }
931         }
932
933         return __spi_map_msg(master, msg);
934 }
935
936 /*
937  * spi_transfer_one_message - Default implementation of transfer_one_message()
938  *
939  * This is a standard implementation of transfer_one_message() for
940  * drivers which implement a transfer_one() operation.  It provides
941  * standard handling of delays and chip select management.
942  */
943 static int spi_transfer_one_message(struct spi_master *master,
944                                     struct spi_message *msg)
945 {
946         struct spi_transfer *xfer;
947         bool keep_cs = false;
948         int ret = 0;
949         unsigned long ms = 1;
950         struct spi_statistics *statm = &master->statistics;
951         struct spi_statistics *stats = &msg->spi->statistics;
952
953         spi_set_cs(msg->spi, true);
954
955         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
956         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
957
958         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
959                 trace_spi_transfer_start(msg, xfer);
960
961                 spi_statistics_add_transfer_stats(statm, xfer, master);
962                 spi_statistics_add_transfer_stats(stats, xfer, master);
963
964                 if (xfer->tx_buf || xfer->rx_buf) {
965                         reinit_completion(&master->xfer_completion);
966
967                         ret = master->transfer_one(master, msg->spi, xfer);
968                         if (ret < 0) {
969                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
970                                                                errors);
971                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
972                                                                errors);
973                                 dev_err(&msg->spi->dev,
974                                         "SPI transfer failed: %d\n", ret);
975                                 goto out;
976                         }
977
978                         if (ret > 0) {
979                                 ret = 0;
980                                 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
981                                 ms += ms + 100; /* some tolerance */
982
983                                 ms = wait_for_completion_timeout(&master->xfer_completion,
984                                                                  msecs_to_jiffies(ms));
985                         }
986
987                         if (ms == 0) {
988                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
989                                                                timedout);
990                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
991                                                                timedout);
992                                 dev_err(&msg->spi->dev,
993                                         "SPI transfer timed out\n");
994                                 msg->status = -ETIMEDOUT;
995                         }
996                 } else {
997                         if (xfer->len)
998                                 dev_err(&msg->spi->dev,
999                                         "Bufferless transfer has length %u\n",
1000                                         xfer->len);
1001                 }
1002
1003                 trace_spi_transfer_stop(msg, xfer);
1004
1005                 if (msg->status != -EINPROGRESS)
1006                         goto out;
1007
1008                 if (xfer->delay_usecs)
1009                         udelay(xfer->delay_usecs);
1010
1011                 if (xfer->cs_change) {
1012                         if (list_is_last(&xfer->transfer_list,
1013                                          &msg->transfers)) {
1014                                 keep_cs = true;
1015                         } else {
1016                                 spi_set_cs(msg->spi, false);
1017                                 udelay(10);
1018                                 spi_set_cs(msg->spi, true);
1019                         }
1020                 }
1021
1022                 msg->actual_length += xfer->len;
1023         }
1024
1025 out:
1026         if (ret != 0 || !keep_cs)
1027                 spi_set_cs(msg->spi, false);
1028
1029         if (msg->status == -EINPROGRESS)
1030                 msg->status = ret;
1031
1032         if (msg->status && master->handle_err)
1033                 master->handle_err(master, msg);
1034
1035         spi_res_release(master, msg);
1036
1037         spi_finalize_current_message(master);
1038
1039         return ret;
1040 }
1041
1042 /**
1043  * spi_finalize_current_transfer - report completion of a transfer
1044  * @master: the master reporting completion
1045  *
1046  * Called by SPI drivers using the core transfer_one_message()
1047  * implementation to notify it that the current interrupt driven
1048  * transfer has finished and the next one may be scheduled.
1049  */
1050 void spi_finalize_current_transfer(struct spi_master *master)
1051 {
1052         complete(&master->xfer_completion);
1053 }
1054 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1055
1056 /**
1057  * __spi_pump_messages - function which processes spi message queue
1058  * @master: master to process queue for
1059  * @in_kthread: true if we are in the context of the message pump thread
1060  * @bus_locked: true if the bus mutex is held when calling this function
1061  *
1062  * This function checks if there is any spi message in the queue that
1063  * needs processing and if so call out to the driver to initialize hardware
1064  * and transfer each message.
1065  *
1066  * Note that it is called both from the kthread itself and also from
1067  * inside spi_sync(); the queue extraction handling at the top of the
1068  * function should deal with this safely.
1069  */
1070 static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
1071                                 bool bus_locked)
1072 {
1073         unsigned long flags;
1074         bool was_busy = false;
1075         int ret;
1076
1077         /* Lock queue */
1078         spin_lock_irqsave(&master->queue_lock, flags);
1079
1080         /* Make sure we are not already running a message */
1081         if (master->cur_msg) {
1082                 spin_unlock_irqrestore(&master->queue_lock, flags);
1083                 return;
1084         }
1085
1086         /* If another context is idling the device then defer */
1087         if (master->idling) {
1088                 queue_kthread_work(&master->kworker, &master->pump_messages);
1089                 spin_unlock_irqrestore(&master->queue_lock, flags);
1090                 return;
1091         }
1092
1093         /* Check if the queue is idle */
1094         if (list_empty(&master->queue) || !master->running) {
1095                 if (!master->busy) {
1096                         spin_unlock_irqrestore(&master->queue_lock, flags);
1097                         return;
1098                 }
1099
1100                 /* Only do teardown in the thread */
1101                 if (!in_kthread) {
1102                         queue_kthread_work(&master->kworker,
1103                                            &master->pump_messages);
1104                         spin_unlock_irqrestore(&master->queue_lock, flags);
1105                         return;
1106                 }
1107
1108                 master->busy = false;
1109                 master->idling = true;
1110                 spin_unlock_irqrestore(&master->queue_lock, flags);
1111
1112                 kfree(master->dummy_rx);
1113                 master->dummy_rx = NULL;
1114                 kfree(master->dummy_tx);
1115                 master->dummy_tx = NULL;
1116                 if (master->unprepare_transfer_hardware &&
1117                     master->unprepare_transfer_hardware(master))
1118                         dev_err(&master->dev,
1119                                 "failed to unprepare transfer hardware\n");
1120                 if (master->auto_runtime_pm) {
1121                         pm_runtime_mark_last_busy(master->dev.parent);
1122                         pm_runtime_put_autosuspend(master->dev.parent);
1123                 }
1124                 trace_spi_master_idle(master);
1125
1126                 spin_lock_irqsave(&master->queue_lock, flags);
1127                 master->idling = false;
1128                 spin_unlock_irqrestore(&master->queue_lock, flags);
1129                 return;
1130         }
1131
1132         /* Extract head of queue */
1133         master->cur_msg =
1134                 list_first_entry(&master->queue, struct spi_message, queue);
1135
1136         list_del_init(&master->cur_msg->queue);
1137         if (master->busy)
1138                 was_busy = true;
1139         else
1140                 master->busy = true;
1141         spin_unlock_irqrestore(&master->queue_lock, flags);
1142
1143         if (!was_busy && master->auto_runtime_pm) {
1144                 ret = pm_runtime_get_sync(master->dev.parent);
1145                 if (ret < 0) {
1146                         dev_err(&master->dev, "Failed to power device: %d\n",
1147                                 ret);
1148                         return;
1149                 }
1150         }
1151
1152         if (!was_busy)
1153                 trace_spi_master_busy(master);
1154
1155         if (!was_busy && master->prepare_transfer_hardware) {
1156                 ret = master->prepare_transfer_hardware(master);
1157                 if (ret) {
1158                         dev_err(&master->dev,
1159                                 "failed to prepare transfer hardware\n");
1160
1161                         if (master->auto_runtime_pm)
1162                                 pm_runtime_put(master->dev.parent);
1163                         return;
1164                 }
1165         }
1166
1167         if (!bus_locked)
1168                 mutex_lock(&master->bus_lock_mutex);
1169
1170         trace_spi_message_start(master->cur_msg);
1171
1172         if (master->prepare_message) {
1173                 ret = master->prepare_message(master, master->cur_msg);
1174                 if (ret) {
1175                         dev_err(&master->dev,
1176                                 "failed to prepare message: %d\n", ret);
1177                         master->cur_msg->status = ret;
1178                         spi_finalize_current_message(master);
1179                         goto out;
1180                 }
1181                 master->cur_msg_prepared = true;
1182         }
1183
1184         ret = spi_map_msg(master, master->cur_msg);
1185         if (ret) {
1186                 master->cur_msg->status = ret;
1187                 spi_finalize_current_message(master);
1188                 goto out;
1189         }
1190
1191         ret = master->transfer_one_message(master, master->cur_msg);
1192         if (ret) {
1193                 dev_err(&master->dev,
1194                         "failed to transfer one message from queue\n");
1195                 goto out;
1196         }
1197
1198 out:
1199         if (!bus_locked)
1200                 mutex_unlock(&master->bus_lock_mutex);
1201
1202         /* Prod the scheduler in case transfer_one() was busy waiting */
1203         if (!ret)
1204                 cond_resched();
1205 }
1206
1207 /**
1208  * spi_pump_messages - kthread work function which processes spi message queue
1209  * @work: pointer to kthread work struct contained in the master struct
1210  */
1211 static void spi_pump_messages(struct kthread_work *work)
1212 {
1213         struct spi_master *master =
1214                 container_of(work, struct spi_master, pump_messages);
1215
1216         __spi_pump_messages(master, true, master->bus_lock_flag);
1217 }
1218
1219 static int spi_init_queue(struct spi_master *master)
1220 {
1221         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1222
1223         master->running = false;
1224         master->busy = false;
1225
1226         init_kthread_worker(&master->kworker);
1227         master->kworker_task = kthread_run(kthread_worker_fn,
1228                                            &master->kworker, "%s",
1229                                            dev_name(&master->dev));
1230         if (IS_ERR(master->kworker_task)) {
1231                 dev_err(&master->dev, "failed to create message pump task\n");
1232                 return PTR_ERR(master->kworker_task);
1233         }
1234         init_kthread_work(&master->pump_messages, spi_pump_messages);
1235
1236         /*
1237          * Master config will indicate if this controller should run the
1238          * message pump with high (realtime) priority to reduce the transfer
1239          * latency on the bus by minimising the delay between a transfer
1240          * request and the scheduling of the message pump thread. Without this
1241          * setting the message pump thread will remain at default priority.
1242          */
1243         if (master->rt) {
1244                 dev_info(&master->dev,
1245                         "will run message pump with realtime priority\n");
1246                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1247         }
1248
1249         return 0;
1250 }
1251
1252 /**
1253  * spi_get_next_queued_message() - called by driver to check for queued
1254  * messages
1255  * @master: the master to check for queued messages
1256  *
1257  * If there are more messages in the queue, the next message is returned from
1258  * this call.
1259  *
1260  * Return: the next message in the queue, else NULL if the queue is empty.
1261  */
1262 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1263 {
1264         struct spi_message *next;
1265         unsigned long flags;
1266
1267         /* get a pointer to the next message, if any */
1268         spin_lock_irqsave(&master->queue_lock, flags);
1269         next = list_first_entry_or_null(&master->queue, struct spi_message,
1270                                         queue);
1271         spin_unlock_irqrestore(&master->queue_lock, flags);
1272
1273         return next;
1274 }
1275 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1276
1277 /**
1278  * spi_finalize_current_message() - the current message is complete
1279  * @master: the master to return the message to
1280  *
1281  * Called by the driver to notify the core that the message in the front of the
1282  * queue is complete and can be removed from the queue.
1283  */
1284 void spi_finalize_current_message(struct spi_master *master)
1285 {
1286         struct spi_message *mesg;
1287         unsigned long flags;
1288         int ret;
1289
1290         spin_lock_irqsave(&master->queue_lock, flags);
1291         mesg = master->cur_msg;
1292         spin_unlock_irqrestore(&master->queue_lock, flags);
1293
1294         spi_unmap_msg(master, mesg);
1295
1296         if (master->cur_msg_prepared && master->unprepare_message) {
1297                 ret = master->unprepare_message(master, mesg);
1298                 if (ret) {
1299                         dev_err(&master->dev,
1300                                 "failed to unprepare message: %d\n", ret);
1301                 }
1302         }
1303
1304         spin_lock_irqsave(&master->queue_lock, flags);
1305         master->cur_msg = NULL;
1306         master->cur_msg_prepared = false;
1307         queue_kthread_work(&master->kworker, &master->pump_messages);
1308         spin_unlock_irqrestore(&master->queue_lock, flags);
1309
1310         trace_spi_message_done(mesg);
1311
1312         mesg->state = NULL;
1313         if (mesg->complete)
1314                 mesg->complete(mesg->context);
1315 }
1316 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1317
1318 static int spi_start_queue(struct spi_master *master)
1319 {
1320         unsigned long flags;
1321
1322         spin_lock_irqsave(&master->queue_lock, flags);
1323
1324         if (master->running || master->busy) {
1325                 spin_unlock_irqrestore(&master->queue_lock, flags);
1326                 return -EBUSY;
1327         }
1328
1329         master->running = true;
1330         master->cur_msg = NULL;
1331         spin_unlock_irqrestore(&master->queue_lock, flags);
1332
1333         queue_kthread_work(&master->kworker, &master->pump_messages);
1334
1335         return 0;
1336 }
1337
1338 static int spi_stop_queue(struct spi_master *master)
1339 {
1340         unsigned long flags;
1341         unsigned limit = 500;
1342         int ret = 0;
1343
1344         spin_lock_irqsave(&master->queue_lock, flags);
1345
1346         /*
1347          * This is a bit lame, but is optimized for the common execution path.
1348          * A wait_queue on the master->busy could be used, but then the common
1349          * execution path (pump_messages) would be required to call wake_up or
1350          * friends on every SPI message. Do this instead.
1351          */
1352         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1353                 spin_unlock_irqrestore(&master->queue_lock, flags);
1354                 usleep_range(10000, 11000);
1355                 spin_lock_irqsave(&master->queue_lock, flags);
1356         }
1357
1358         if (!list_empty(&master->queue) || master->busy)
1359                 ret = -EBUSY;
1360         else
1361                 master->running = false;
1362
1363         spin_unlock_irqrestore(&master->queue_lock, flags);
1364
1365         if (ret) {
1366                 dev_warn(&master->dev,
1367                          "could not stop message queue\n");
1368                 return ret;
1369         }
1370         return ret;
1371 }
1372
1373 static int spi_destroy_queue(struct spi_master *master)
1374 {
1375         int ret;
1376
1377         ret = spi_stop_queue(master);
1378
1379         /*
1380          * flush_kthread_worker will block until all work is done.
1381          * If the reason that stop_queue timed out is that the work will never
1382          * finish, then it does no good to call flush/stop thread, so
1383          * return anyway.
1384          */
1385         if (ret) {
1386                 dev_err(&master->dev, "problem destroying queue\n");
1387                 return ret;
1388         }
1389
1390         flush_kthread_worker(&master->kworker);
1391         kthread_stop(master->kworker_task);
1392
1393         return 0;
1394 }
1395
1396 static int __spi_queued_transfer(struct spi_device *spi,
1397                                  struct spi_message *msg,
1398                                  bool need_pump)
1399 {
1400         struct spi_master *master = spi->master;
1401         unsigned long flags;
1402
1403         spin_lock_irqsave(&master->queue_lock, flags);
1404
1405         if (!master->running) {
1406                 spin_unlock_irqrestore(&master->queue_lock, flags);
1407                 return -ESHUTDOWN;
1408         }
1409         msg->actual_length = 0;
1410         msg->status = -EINPROGRESS;
1411
1412         list_add_tail(&msg->queue, &master->queue);
1413         if (!master->busy && need_pump)
1414                 queue_kthread_work(&master->kworker, &master->pump_messages);
1415
1416         spin_unlock_irqrestore(&master->queue_lock, flags);
1417         return 0;
1418 }
1419
1420 /**
1421  * spi_queued_transfer - transfer function for queued transfers
1422  * @spi: spi device which is requesting transfer
1423  * @msg: spi message which is to handled is queued to driver queue
1424  *
1425  * Return: zero on success, else a negative error code.
1426  */
1427 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1428 {
1429         return __spi_queued_transfer(spi, msg, true);
1430 }
1431
1432 static int spi_master_initialize_queue(struct spi_master *master)
1433 {
1434         int ret;
1435
1436         master->transfer = spi_queued_transfer;
1437         if (!master->transfer_one_message)
1438                 master->transfer_one_message = spi_transfer_one_message;
1439
1440         /* Initialize and start queue */
1441         ret = spi_init_queue(master);
1442         if (ret) {
1443                 dev_err(&master->dev, "problem initializing queue\n");
1444                 goto err_init_queue;
1445         }
1446         master->queued = true;
1447         ret = spi_start_queue(master);
1448         if (ret) {
1449                 dev_err(&master->dev, "problem starting queue\n");
1450                 goto err_start_queue;
1451         }
1452
1453         return 0;
1454
1455 err_start_queue:
1456         spi_destroy_queue(master);
1457 err_init_queue:
1458         return ret;
1459 }
1460
1461 /*-------------------------------------------------------------------------*/
1462
1463 #if defined(CONFIG_OF)
1464 static struct spi_device *
1465 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1466 {
1467         struct spi_device *spi;
1468         int rc;
1469         u32 value;
1470
1471         /* Alloc an spi_device */
1472         spi = spi_alloc_device(master);
1473         if (!spi) {
1474                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1475                         nc->full_name);
1476                 rc = -ENOMEM;
1477                 goto err_out;
1478         }
1479
1480         /* Select device driver */
1481         rc = of_modalias_node(nc, spi->modalias,
1482                                 sizeof(spi->modalias));
1483         if (rc < 0) {
1484                 dev_err(&master->dev, "cannot find modalias for %s\n",
1485                         nc->full_name);
1486                 goto err_out;
1487         }
1488
1489         /* Device address */
1490         rc = of_property_read_u32(nc, "reg", &value);
1491         if (rc) {
1492                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1493                         nc->full_name, rc);
1494                 goto err_out;
1495         }
1496         spi->chip_select = value;
1497
1498         /* Mode (clock phase/polarity/etc.) */
1499         if (of_find_property(nc, "spi-cpha", NULL))
1500                 spi->mode |= SPI_CPHA;
1501         if (of_find_property(nc, "spi-cpol", NULL))
1502                 spi->mode |= SPI_CPOL;
1503         if (of_find_property(nc, "spi-cs-high", NULL))
1504                 spi->mode |= SPI_CS_HIGH;
1505         if (of_find_property(nc, "spi-3wire", NULL))
1506                 spi->mode |= SPI_3WIRE;
1507         if (of_find_property(nc, "spi-lsb-first", NULL))
1508                 spi->mode |= SPI_LSB_FIRST;
1509
1510         /* Device DUAL/QUAD mode */
1511         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1512                 switch (value) {
1513                 case 1:
1514                         break;
1515                 case 2:
1516                         spi->mode |= SPI_TX_DUAL;
1517                         break;
1518                 case 4:
1519                         spi->mode |= SPI_TX_QUAD;
1520                         break;
1521                 default:
1522                         dev_warn(&master->dev,
1523                                 "spi-tx-bus-width %d not supported\n",
1524                                 value);
1525                         break;
1526                 }
1527         }
1528
1529         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1530                 switch (value) {
1531                 case 1:
1532                         break;
1533                 case 2:
1534                         spi->mode |= SPI_RX_DUAL;
1535                         break;
1536                 case 4:
1537                         spi->mode |= SPI_RX_QUAD;
1538                         break;
1539                 default:
1540                         dev_warn(&master->dev,
1541                                 "spi-rx-bus-width %d not supported\n",
1542                                 value);
1543                         break;
1544                 }
1545         }
1546
1547         /* Device speed */
1548         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1549         if (rc) {
1550                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1551                         nc->full_name, rc);
1552                 goto err_out;
1553         }
1554         spi->max_speed_hz = value;
1555
1556         /* Store a pointer to the node in the device structure */
1557         of_node_get(nc);
1558         spi->dev.of_node = nc;
1559
1560         /* Register the new device */
1561         rc = spi_add_device(spi);
1562         if (rc) {
1563                 dev_err(&master->dev, "spi_device register error %s\n",
1564                         nc->full_name);
1565                 goto err_out;
1566         }
1567
1568         return spi;
1569
1570 err_out:
1571         spi_dev_put(spi);
1572         return ERR_PTR(rc);
1573 }
1574
1575 /**
1576  * of_register_spi_devices() - Register child devices onto the SPI bus
1577  * @master:     Pointer to spi_master device
1578  *
1579  * Registers an spi_device for each child node of master node which has a 'reg'
1580  * property.
1581  */
1582 static void of_register_spi_devices(struct spi_master *master)
1583 {
1584         struct spi_device *spi;
1585         struct device_node *nc;
1586
1587         if (!master->dev.of_node)
1588                 return;
1589
1590         for_each_available_child_of_node(master->dev.of_node, nc) {
1591                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1592                         continue;
1593                 spi = of_register_spi_device(master, nc);
1594                 if (IS_ERR(spi))
1595                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1596                                 nc->full_name);
1597         }
1598 }
1599 #else
1600 static void of_register_spi_devices(struct spi_master *master) { }
1601 #endif
1602
1603 #ifdef CONFIG_ACPI
1604 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1605 {
1606         struct spi_device *spi = data;
1607         struct spi_master *master = spi->master;
1608
1609         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1610                 struct acpi_resource_spi_serialbus *sb;
1611
1612                 sb = &ares->data.spi_serial_bus;
1613                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1614                         /*
1615                          * ACPI DeviceSelection numbering is handled by the
1616                          * host controller driver in Windows and can vary
1617                          * from driver to driver. In Linux we always expect
1618                          * 0 .. max - 1 so we need to ask the driver to
1619                          * translate between the two schemes.
1620                          */
1621                         if (master->fw_translate_cs) {
1622                                 int cs = master->fw_translate_cs(master,
1623                                                 sb->device_selection);
1624                                 if (cs < 0)
1625                                         return cs;
1626                                 spi->chip_select = cs;
1627                         } else {
1628                                 spi->chip_select = sb->device_selection;
1629                         }
1630
1631                         spi->max_speed_hz = sb->connection_speed;
1632
1633                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1634                                 spi->mode |= SPI_CPHA;
1635                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1636                                 spi->mode |= SPI_CPOL;
1637                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1638                                 spi->mode |= SPI_CS_HIGH;
1639                 }
1640         } else if (spi->irq < 0) {
1641                 struct resource r;
1642
1643                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1644                         spi->irq = r.start;
1645         }
1646
1647         /* Always tell the ACPI core to skip this resource */
1648         return 1;
1649 }
1650
1651 static acpi_status acpi_register_spi_device(struct spi_master *master,
1652                                             struct acpi_device *adev)
1653 {
1654         struct list_head resource_list;
1655         struct spi_device *spi;
1656         int ret;
1657
1658         if (acpi_bus_get_status(adev) || !adev->status.present ||
1659             acpi_device_enumerated(adev))
1660                 return AE_OK;
1661
1662         spi = spi_alloc_device(master);
1663         if (!spi) {
1664                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1665                         dev_name(&adev->dev));
1666                 return AE_NO_MEMORY;
1667         }
1668
1669         ACPI_COMPANION_SET(&spi->dev, adev);
1670         spi->irq = -1;
1671
1672         INIT_LIST_HEAD(&resource_list);
1673         ret = acpi_dev_get_resources(adev, &resource_list,
1674                                      acpi_spi_add_resource, spi);
1675         acpi_dev_free_resource_list(&resource_list);
1676
1677         if (ret < 0 || !spi->max_speed_hz) {
1678                 spi_dev_put(spi);
1679                 return AE_OK;
1680         }
1681
1682         if (spi->irq < 0)
1683                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1684
1685         acpi_device_set_enumerated(adev);
1686
1687         adev->power.flags.ignore_parent = true;
1688         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1689         if (spi_add_device(spi)) {
1690                 adev->power.flags.ignore_parent = false;
1691                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1692                         dev_name(&adev->dev));
1693                 spi_dev_put(spi);
1694         }
1695
1696         return AE_OK;
1697 }
1698
1699 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1700                                        void *data, void **return_value)
1701 {
1702         struct spi_master *master = data;
1703         struct acpi_device *adev;
1704
1705         if (acpi_bus_get_device(handle, &adev))
1706                 return AE_OK;
1707
1708         return acpi_register_spi_device(master, adev);
1709 }
1710
1711 static void acpi_register_spi_devices(struct spi_master *master)
1712 {
1713         acpi_status status;
1714         acpi_handle handle;
1715
1716         handle = ACPI_HANDLE(master->dev.parent);
1717         if (!handle)
1718                 return;
1719
1720         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1721                                      acpi_spi_add_device, NULL,
1722                                      master, NULL);
1723         if (ACPI_FAILURE(status))
1724                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1725 }
1726 #else
1727 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1728 #endif /* CONFIG_ACPI */
1729
1730 static void spi_master_release(struct device *dev)
1731 {
1732         struct spi_master *master;
1733
1734         master = container_of(dev, struct spi_master, dev);
1735         kfree(master);
1736 }
1737
1738 static struct class spi_master_class = {
1739         .name           = "spi_master",
1740         .owner          = THIS_MODULE,
1741         .dev_release    = spi_master_release,
1742         .dev_groups     = spi_master_groups,
1743 };
1744
1745
1746 /**
1747  * spi_alloc_master - allocate SPI master controller
1748  * @dev: the controller, possibly using the platform_bus
1749  * @size: how much zeroed driver-private data to allocate; the pointer to this
1750  *      memory is in the driver_data field of the returned device,
1751  *      accessible with spi_master_get_devdata().
1752  * Context: can sleep
1753  *
1754  * This call is used only by SPI master controller drivers, which are the
1755  * only ones directly touching chip registers.  It's how they allocate
1756  * an spi_master structure, prior to calling spi_register_master().
1757  *
1758  * This must be called from context that can sleep.
1759  *
1760  * The caller is responsible for assigning the bus number and initializing
1761  * the master's methods before calling spi_register_master(); and (after errors
1762  * adding the device) calling spi_master_put() to prevent a memory leak.
1763  *
1764  * Return: the SPI master structure on success, else NULL.
1765  */
1766 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1767 {
1768         struct spi_master       *master;
1769
1770         if (!dev)
1771                 return NULL;
1772
1773         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1774         if (!master)
1775                 return NULL;
1776
1777         device_initialize(&master->dev);
1778         master->bus_num = -1;
1779         master->num_chipselect = 1;
1780         master->dev.class = &spi_master_class;
1781         master->dev.parent = dev;
1782         pm_suspend_ignore_children(&master->dev, true);
1783         spi_master_set_devdata(master, &master[1]);
1784
1785         return master;
1786 }
1787 EXPORT_SYMBOL_GPL(spi_alloc_master);
1788
1789 #ifdef CONFIG_OF
1790 static int of_spi_register_master(struct spi_master *master)
1791 {
1792         int nb, i, *cs;
1793         struct device_node *np = master->dev.of_node;
1794
1795         if (!np)
1796                 return 0;
1797
1798         nb = of_gpio_named_count(np, "cs-gpios");
1799         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1800
1801         /* Return error only for an incorrectly formed cs-gpios property */
1802         if (nb == 0 || nb == -ENOENT)
1803                 return 0;
1804         else if (nb < 0)
1805                 return nb;
1806
1807         cs = devm_kzalloc(&master->dev,
1808                           sizeof(int) * master->num_chipselect,
1809                           GFP_KERNEL);
1810         master->cs_gpios = cs;
1811
1812         if (!master->cs_gpios)
1813                 return -ENOMEM;
1814
1815         for (i = 0; i < master->num_chipselect; i++)
1816                 cs[i] = -ENOENT;
1817
1818         for (i = 0; i < nb; i++)
1819                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1820
1821         return 0;
1822 }
1823 #else
1824 static int of_spi_register_master(struct spi_master *master)
1825 {
1826         return 0;
1827 }
1828 #endif
1829
1830 /**
1831  * spi_register_master - register SPI master controller
1832  * @master: initialized master, originally from spi_alloc_master()
1833  * Context: can sleep
1834  *
1835  * SPI master controllers connect to their drivers using some non-SPI bus,
1836  * such as the platform bus.  The final stage of probe() in that code
1837  * includes calling spi_register_master() to hook up to this SPI bus glue.
1838  *
1839  * SPI controllers use board specific (often SOC specific) bus numbers,
1840  * and board-specific addressing for SPI devices combines those numbers
1841  * with chip select numbers.  Since SPI does not directly support dynamic
1842  * device identification, boards need configuration tables telling which
1843  * chip is at which address.
1844  *
1845  * This must be called from context that can sleep.  It returns zero on
1846  * success, else a negative error code (dropping the master's refcount).
1847  * After a successful return, the caller is responsible for calling
1848  * spi_unregister_master().
1849  *
1850  * Return: zero on success, else a negative error code.
1851  */
1852 int spi_register_master(struct spi_master *master)
1853 {
1854         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1855         struct device           *dev = master->dev.parent;
1856         struct boardinfo        *bi;
1857         int                     status = -ENODEV;
1858         int                     dynamic = 0;
1859
1860         if (!dev)
1861                 return -ENODEV;
1862
1863         status = of_spi_register_master(master);
1864         if (status)
1865                 return status;
1866
1867         /* even if it's just one always-selected device, there must
1868          * be at least one chipselect
1869          */
1870         if (master->num_chipselect == 0)
1871                 return -EINVAL;
1872
1873         if ((master->bus_num < 0) && master->dev.of_node)
1874                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1875
1876         /* convention:  dynamically assigned bus IDs count down from the max */
1877         if (master->bus_num < 0) {
1878                 /* FIXME switch to an IDR based scheme, something like
1879                  * I2C now uses, so we can't run out of "dynamic" IDs
1880                  */
1881                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1882                 dynamic = 1;
1883         }
1884
1885         INIT_LIST_HEAD(&master->queue);
1886         spin_lock_init(&master->queue_lock);
1887         spin_lock_init(&master->bus_lock_spinlock);
1888         mutex_init(&master->bus_lock_mutex);
1889         master->bus_lock_flag = 0;
1890         init_completion(&master->xfer_completion);
1891         if (!master->max_dma_len)
1892                 master->max_dma_len = INT_MAX;
1893
1894         /* register the device, then userspace will see it.
1895          * registration fails if the bus ID is in use.
1896          */
1897         dev_set_name(&master->dev, "spi%u", master->bus_num);
1898         status = device_add(&master->dev);
1899         if (status < 0)
1900                 goto done;
1901         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1902                         dynamic ? " (dynamic)" : "");
1903
1904         /* If we're using a queued driver, start the queue */
1905         if (master->transfer)
1906                 dev_info(dev, "master is unqueued, this is deprecated\n");
1907         else {
1908                 status = spi_master_initialize_queue(master);
1909                 if (status) {
1910                         device_del(&master->dev);
1911                         goto done;
1912                 }
1913         }
1914         /* add statistics */
1915         spin_lock_init(&master->statistics.lock);
1916
1917         mutex_lock(&board_lock);
1918         list_add_tail(&master->list, &spi_master_list);
1919         list_for_each_entry(bi, &board_list, list)
1920                 spi_match_master_to_boardinfo(master, &bi->board_info);
1921         mutex_unlock(&board_lock);
1922
1923         /* Register devices from the device tree and ACPI */
1924         of_register_spi_devices(master);
1925         acpi_register_spi_devices(master);
1926 done:
1927         return status;
1928 }
1929 EXPORT_SYMBOL_GPL(spi_register_master);
1930
1931 static void devm_spi_unregister(struct device *dev, void *res)
1932 {
1933         spi_unregister_master(*(struct spi_master **)res);
1934 }
1935
1936 /**
1937  * dev_spi_register_master - register managed SPI master controller
1938  * @dev:    device managing SPI master
1939  * @master: initialized master, originally from spi_alloc_master()
1940  * Context: can sleep
1941  *
1942  * Register a SPI device as with spi_register_master() which will
1943  * automatically be unregister
1944  *
1945  * Return: zero on success, else a negative error code.
1946  */
1947 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1948 {
1949         struct spi_master **ptr;
1950         int ret;
1951
1952         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1953         if (!ptr)
1954                 return -ENOMEM;
1955
1956         ret = spi_register_master(master);
1957         if (!ret) {
1958                 *ptr = master;
1959                 devres_add(dev, ptr);
1960         } else {
1961                 devres_free(ptr);
1962         }
1963
1964         return ret;
1965 }
1966 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1967
1968 static int __unregister(struct device *dev, void *null)
1969 {
1970         spi_unregister_device(to_spi_device(dev));
1971         return 0;
1972 }
1973
1974 /**
1975  * spi_unregister_master - unregister SPI master controller
1976  * @master: the master being unregistered
1977  * Context: can sleep
1978  *
1979  * This call is used only by SPI master controller drivers, which are the
1980  * only ones directly touching chip registers.
1981  *
1982  * This must be called from context that can sleep.
1983  */
1984 void spi_unregister_master(struct spi_master *master)
1985 {
1986         int dummy;
1987
1988         if (master->queued) {
1989                 if (spi_destroy_queue(master))
1990                         dev_err(&master->dev, "queue remove failed\n");
1991         }
1992
1993         mutex_lock(&board_lock);
1994         list_del(&master->list);
1995         mutex_unlock(&board_lock);
1996
1997         dummy = device_for_each_child(&master->dev, NULL, __unregister);
1998         device_unregister(&master->dev);
1999 }
2000 EXPORT_SYMBOL_GPL(spi_unregister_master);
2001
2002 int spi_master_suspend(struct spi_master *master)
2003 {
2004         int ret;
2005
2006         /* Basically no-ops for non-queued masters */
2007         if (!master->queued)
2008                 return 0;
2009
2010         ret = spi_stop_queue(master);
2011         if (ret)
2012                 dev_err(&master->dev, "queue stop failed\n");
2013
2014         return ret;
2015 }
2016 EXPORT_SYMBOL_GPL(spi_master_suspend);
2017
2018 int spi_master_resume(struct spi_master *master)
2019 {
2020         int ret;
2021
2022         if (!master->queued)
2023                 return 0;
2024
2025         ret = spi_start_queue(master);
2026         if (ret)
2027                 dev_err(&master->dev, "queue restart failed\n");
2028
2029         return ret;
2030 }
2031 EXPORT_SYMBOL_GPL(spi_master_resume);
2032
2033 static int __spi_master_match(struct device *dev, const void *data)
2034 {
2035         struct spi_master *m;
2036         const u16 *bus_num = data;
2037
2038         m = container_of(dev, struct spi_master, dev);
2039         return m->bus_num == *bus_num;
2040 }
2041
2042 /**
2043  * spi_busnum_to_master - look up master associated with bus_num
2044  * @bus_num: the master's bus number
2045  * Context: can sleep
2046  *
2047  * This call may be used with devices that are registered after
2048  * arch init time.  It returns a refcounted pointer to the relevant
2049  * spi_master (which the caller must release), or NULL if there is
2050  * no such master registered.
2051  *
2052  * Return: the SPI master structure on success, else NULL.
2053  */
2054 struct spi_master *spi_busnum_to_master(u16 bus_num)
2055 {
2056         struct device           *dev;
2057         struct spi_master       *master = NULL;
2058
2059         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2060                                 __spi_master_match);
2061         if (dev)
2062                 master = container_of(dev, struct spi_master, dev);
2063         /* reference got in class_find_device */
2064         return master;
2065 }
2066 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2067
2068 /*-------------------------------------------------------------------------*/
2069
2070 /* Core methods for SPI resource management */
2071
2072 /**
2073  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2074  *                 during the processing of a spi_message while using
2075  *                 spi_transfer_one
2076  * @spi:     the spi device for which we allocate memory
2077  * @release: the release code to execute for this resource
2078  * @size:    size to alloc and return
2079  * @gfp:     GFP allocation flags
2080  *
2081  * Return: the pointer to the allocated data
2082  *
2083  * This may get enhanced in the future to allocate from a memory pool
2084  * of the @spi_device or @spi_master to avoid repeated allocations.
2085  */
2086 void *spi_res_alloc(struct spi_device *spi,
2087                     spi_res_release_t release,
2088                     size_t size, gfp_t gfp)
2089 {
2090         struct spi_res *sres;
2091
2092         sres = kzalloc(sizeof(*sres) + size, gfp);
2093         if (!sres)
2094                 return NULL;
2095
2096         INIT_LIST_HEAD(&sres->entry);
2097         sres->release = release;
2098
2099         return sres->data;
2100 }
2101 EXPORT_SYMBOL_GPL(spi_res_alloc);
2102
2103 /**
2104  * spi_res_free - free an spi resource
2105  * @res: pointer to the custom data of a resource
2106  *
2107  */
2108 void spi_res_free(void *res)
2109 {
2110         struct spi_res *sres = container_of(res, struct spi_res, data);
2111
2112         if (!res)
2113                 return;
2114
2115         WARN_ON(!list_empty(&sres->entry));
2116         kfree(sres);
2117 }
2118 EXPORT_SYMBOL_GPL(spi_res_free);
2119
2120 /**
2121  * spi_res_add - add a spi_res to the spi_message
2122  * @message: the spi message
2123  * @res:     the spi_resource
2124  */
2125 void spi_res_add(struct spi_message *message, void *res)
2126 {
2127         struct spi_res *sres = container_of(res, struct spi_res, data);
2128
2129         WARN_ON(!list_empty(&sres->entry));
2130         list_add_tail(&sres->entry, &message->resources);
2131 }
2132 EXPORT_SYMBOL_GPL(spi_res_add);
2133
2134 /**
2135  * spi_res_release - release all spi resources for this message
2136  * @master:  the @spi_master
2137  * @message: the @spi_message
2138  */
2139 void spi_res_release(struct spi_master *master,
2140                      struct spi_message *message)
2141 {
2142         struct spi_res *res;
2143
2144         while (!list_empty(&message->resources)) {
2145                 res = list_last_entry(&message->resources,
2146                                       struct spi_res, entry);
2147
2148                 if (res->release)
2149                         res->release(master, message, res->data);
2150
2151                 list_del(&res->entry);
2152
2153                 kfree(res);
2154         }
2155 }
2156 EXPORT_SYMBOL_GPL(spi_res_release);
2157
2158 /*-------------------------------------------------------------------------*/
2159
2160 /* Core methods for spi_message alterations */
2161
2162 static void __spi_replace_transfers_release(struct spi_master *master,
2163                                             struct spi_message *msg,
2164                                             void *res)
2165 {
2166         struct spi_replaced_transfers *rxfer = res;
2167         size_t i;
2168
2169         /* call extra callback if requested */
2170         if (rxfer->release)
2171                 rxfer->release(master, msg, res);
2172
2173         /* insert replaced transfers back into the message */
2174         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2175
2176         /* remove the formerly inserted entries */
2177         for (i = 0; i < rxfer->inserted; i++)
2178                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2179 }
2180
2181 /**
2182  * spi_replace_transfers - replace transfers with several transfers
2183  *                         and register change with spi_message.resources
2184  * @msg:           the spi_message we work upon
2185  * @xfer_first:    the first spi_transfer we want to replace
2186  * @remove:        number of transfers to remove
2187  * @insert:        the number of transfers we want to insert instead
2188  * @release:       extra release code necessary in some circumstances
2189  * @extradatasize: extra data to allocate (with alignment guarantees
2190  *                 of struct @spi_transfer)
2191  * @gfp:           gfp flags
2192  *
2193  * Returns: pointer to @spi_replaced_transfers,
2194  *          PTR_ERR(...) in case of errors.
2195  */
2196 struct spi_replaced_transfers *spi_replace_transfers(
2197         struct spi_message *msg,
2198         struct spi_transfer *xfer_first,
2199         size_t remove,
2200         size_t insert,
2201         spi_replaced_release_t release,
2202         size_t extradatasize,
2203         gfp_t gfp)
2204 {
2205         struct spi_replaced_transfers *rxfer;
2206         struct spi_transfer *xfer;
2207         size_t i;
2208
2209         /* allocate the structure using spi_res */
2210         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2211                               insert * sizeof(struct spi_transfer)
2212                               + sizeof(struct spi_replaced_transfers)
2213                               + extradatasize,
2214                               gfp);
2215         if (!rxfer)
2216                 return ERR_PTR(-ENOMEM);
2217
2218         /* the release code to invoke before running the generic release */
2219         rxfer->release = release;
2220
2221         /* assign extradata */
2222         if (extradatasize)
2223                 rxfer->extradata =
2224                         &rxfer->inserted_transfers[insert];
2225
2226         /* init the replaced_transfers list */
2227         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2228
2229         /* assign the list_entry after which we should reinsert
2230          * the @replaced_transfers - it may be spi_message.messages!
2231          */
2232         rxfer->replaced_after = xfer_first->transfer_list.prev;
2233
2234         /* remove the requested number of transfers */
2235         for (i = 0; i < remove; i++) {
2236                 /* if the entry after replaced_after it is msg->transfers
2237                  * then we have been requested to remove more transfers
2238                  * than are in the list
2239                  */
2240                 if (rxfer->replaced_after->next == &msg->transfers) {
2241                         dev_err(&msg->spi->dev,
2242                                 "requested to remove more spi_transfers than are available\n");
2243                         /* insert replaced transfers back into the message */
2244                         list_splice(&rxfer->replaced_transfers,
2245                                     rxfer->replaced_after);
2246
2247                         /* free the spi_replace_transfer structure */
2248                         spi_res_free(rxfer);
2249
2250                         /* and return with an error */
2251                         return ERR_PTR(-EINVAL);
2252                 }
2253
2254                 /* remove the entry after replaced_after from list of
2255                  * transfers and add it to list of replaced_transfers
2256                  */
2257                 list_move_tail(rxfer->replaced_after->next,
2258                                &rxfer->replaced_transfers);
2259         }
2260
2261         /* create copy of the given xfer with identical settings
2262          * based on the first transfer to get removed
2263          */
2264         for (i = 0; i < insert; i++) {
2265                 /* we need to run in reverse order */
2266                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2267
2268                 /* copy all spi_transfer data */
2269                 memcpy(xfer, xfer_first, sizeof(*xfer));
2270
2271                 /* add to list */
2272                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2273
2274                 /* clear cs_change and delay_usecs for all but the last */
2275                 if (i) {
2276                         xfer->cs_change = false;
2277                         xfer->delay_usecs = 0;
2278                 }
2279         }
2280
2281         /* set up inserted */
2282         rxfer->inserted = insert;
2283
2284         /* and register it with spi_res/spi_message */
2285         spi_res_add(msg, rxfer);
2286
2287         return rxfer;
2288 }
2289 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2290
2291 static int __spi_split_transfer_maxsize(struct spi_master *master,
2292                                         struct spi_message *msg,
2293                                         struct spi_transfer **xferp,
2294                                         size_t maxsize,
2295                                         gfp_t gfp)
2296 {
2297         struct spi_transfer *xfer = *xferp, *xfers;
2298         struct spi_replaced_transfers *srt;
2299         size_t offset;
2300         size_t count, i;
2301
2302         /* warn once about this fact that we are splitting a transfer */
2303         dev_warn_once(&msg->spi->dev,
2304                       "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2305                       xfer->len, maxsize);
2306
2307         /* calculate how many we have to replace */
2308         count = DIV_ROUND_UP(xfer->len, maxsize);
2309
2310         /* create replacement */
2311         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2312         if (IS_ERR(srt))
2313                 return PTR_ERR(srt);
2314         xfers = srt->inserted_transfers;
2315
2316         /* now handle each of those newly inserted spi_transfers
2317          * note that the replacements spi_transfers all are preset
2318          * to the same values as *xferp, so tx_buf, rx_buf and len
2319          * are all identical (as well as most others)
2320          * so we just have to fix up len and the pointers.
2321          *
2322          * this also includes support for the depreciated
2323          * spi_message.is_dma_mapped interface
2324          */
2325
2326         /* the first transfer just needs the length modified, so we
2327          * run it outside the loop
2328          */
2329         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2330
2331         /* all the others need rx_buf/tx_buf also set */
2332         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2333                 /* update rx_buf, tx_buf and dma */
2334                 if (xfers[i].rx_buf)
2335                         xfers[i].rx_buf += offset;
2336                 if (xfers[i].rx_dma)
2337                         xfers[i].rx_dma += offset;
2338                 if (xfers[i].tx_buf)
2339                         xfers[i].tx_buf += offset;
2340                 if (xfers[i].tx_dma)
2341                         xfers[i].tx_dma += offset;
2342
2343                 /* update length */
2344                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2345         }
2346
2347         /* we set up xferp to the last entry we have inserted,
2348          * so that we skip those already split transfers
2349          */
2350         *xferp = &xfers[count - 1];
2351
2352         /* increment statistics counters */
2353         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2354                                        transfers_split_maxsize);
2355         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2356                                        transfers_split_maxsize);
2357
2358         return 0;
2359 }
2360
2361 /**
2362  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2363  *                              when an individual transfer exceeds a
2364  *                              certain size
2365  * @master:    the @spi_master for this transfer
2366  * @msg:   the @spi_message to transform
2367  * @maxsize:  the maximum when to apply this
2368  * @gfp: GFP allocation flags
2369  *
2370  * Return: status of transformation
2371  */
2372 int spi_split_transfers_maxsize(struct spi_master *master,
2373                                 struct spi_message *msg,
2374                                 size_t maxsize,
2375                                 gfp_t gfp)
2376 {
2377         struct spi_transfer *xfer;
2378         int ret;
2379
2380         /* iterate over the transfer_list,
2381          * but note that xfer is advanced to the last transfer inserted
2382          * to avoid checking sizes again unnecessarily (also xfer does
2383          * potentiall belong to a different list by the time the
2384          * replacement has happened
2385          */
2386         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2387                 if (xfer->len > maxsize) {
2388                         ret = __spi_split_transfer_maxsize(
2389                                 master, msg, &xfer, maxsize, gfp);
2390                         if (ret)
2391                                 return ret;
2392                 }
2393         }
2394
2395         return 0;
2396 }
2397 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2398
2399 /*-------------------------------------------------------------------------*/
2400
2401 /* Core methods for SPI master protocol drivers.  Some of the
2402  * other core methods are currently defined as inline functions.
2403  */
2404
2405 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2406 {
2407         if (master->bits_per_word_mask) {
2408                 /* Only 32 bits fit in the mask */
2409                 if (bits_per_word > 32)
2410                         return -EINVAL;
2411                 if (!(master->bits_per_word_mask &
2412                                 SPI_BPW_MASK(bits_per_word)))
2413                         return -EINVAL;
2414         }
2415
2416         return 0;
2417 }
2418
2419 /**
2420  * spi_setup - setup SPI mode and clock rate
2421  * @spi: the device whose settings are being modified
2422  * Context: can sleep, and no requests are queued to the device
2423  *
2424  * SPI protocol drivers may need to update the transfer mode if the
2425  * device doesn't work with its default.  They may likewise need
2426  * to update clock rates or word sizes from initial values.  This function
2427  * changes those settings, and must be called from a context that can sleep.
2428  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2429  * effect the next time the device is selected and data is transferred to
2430  * or from it.  When this function returns, the spi device is deselected.
2431  *
2432  * Note that this call will fail if the protocol driver specifies an option
2433  * that the underlying controller or its driver does not support.  For
2434  * example, not all hardware supports wire transfers using nine bit words,
2435  * LSB-first wire encoding, or active-high chipselects.
2436  *
2437  * Return: zero on success, else a negative error code.
2438  */
2439 int spi_setup(struct spi_device *spi)
2440 {
2441         unsigned        bad_bits, ugly_bits;
2442         int             status;
2443
2444         /* check mode to prevent that DUAL and QUAD set at the same time
2445          */
2446         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2447                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2448                 dev_err(&spi->dev,
2449                 "setup: can not select dual and quad at the same time\n");
2450                 return -EINVAL;
2451         }
2452         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2453          */
2454         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2455                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2456                 return -EINVAL;
2457         /* help drivers fail *cleanly* when they need options
2458          * that aren't supported with their current master
2459          */
2460         bad_bits = spi->mode & ~spi->master->mode_bits;
2461         ugly_bits = bad_bits &
2462                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2463         if (ugly_bits) {
2464                 dev_warn(&spi->dev,
2465                          "setup: ignoring unsupported mode bits %x\n",
2466                          ugly_bits);
2467                 spi->mode &= ~ugly_bits;
2468                 bad_bits &= ~ugly_bits;
2469         }
2470         if (bad_bits) {
2471                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2472                         bad_bits);
2473                 return -EINVAL;
2474         }
2475
2476         if (!spi->bits_per_word)
2477                 spi->bits_per_word = 8;
2478
2479         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2480         if (status)
2481                 return status;
2482
2483         if (!spi->max_speed_hz)
2484                 spi->max_speed_hz = spi->master->max_speed_hz;
2485
2486         if (spi->master->setup)
2487                 status = spi->master->setup(spi);
2488
2489         spi_set_cs(spi, false);
2490
2491         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2492                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2493                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2494                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2495                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2496                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2497                         spi->bits_per_word, spi->max_speed_hz,
2498                         status);
2499
2500         return status;
2501 }
2502 EXPORT_SYMBOL_GPL(spi_setup);
2503
2504 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2505 {
2506         struct spi_master *master = spi->master;
2507         struct spi_transfer *xfer;
2508         int w_size;
2509
2510         if (list_empty(&message->transfers))
2511                 return -EINVAL;
2512
2513         /* Half-duplex links include original MicroWire, and ones with
2514          * only one data pin like SPI_3WIRE (switches direction) or where
2515          * either MOSI or MISO is missing.  They can also be caused by
2516          * software limitations.
2517          */
2518         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2519                         || (spi->mode & SPI_3WIRE)) {
2520                 unsigned flags = master->flags;
2521
2522                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2523                         if (xfer->rx_buf && xfer->tx_buf)
2524                                 return -EINVAL;
2525                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2526                                 return -EINVAL;
2527                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2528                                 return -EINVAL;
2529                 }
2530         }
2531
2532         /**
2533          * Set transfer bits_per_word and max speed as spi device default if
2534          * it is not set for this transfer.
2535          * Set transfer tx_nbits and rx_nbits as single transfer default
2536          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2537          */
2538         message->frame_length = 0;
2539         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2540                 message->frame_length += xfer->len;
2541                 if (!xfer->bits_per_word)
2542                         xfer->bits_per_word = spi->bits_per_word;
2543
2544                 if (!xfer->speed_hz)
2545                         xfer->speed_hz = spi->max_speed_hz;
2546                 if (!xfer->speed_hz)
2547                         xfer->speed_hz = master->max_speed_hz;
2548
2549                 if (master->max_speed_hz &&
2550                     xfer->speed_hz > master->max_speed_hz)
2551                         xfer->speed_hz = master->max_speed_hz;
2552
2553                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2554                         return -EINVAL;
2555
2556                 /*
2557                  * SPI transfer length should be multiple of SPI word size
2558                  * where SPI word size should be power-of-two multiple
2559                  */
2560                 if (xfer->bits_per_word <= 8)
2561                         w_size = 1;
2562                 else if (xfer->bits_per_word <= 16)
2563                         w_size = 2;
2564                 else
2565                         w_size = 4;
2566
2567                 /* No partial transfers accepted */
2568                 if (xfer->len % w_size)
2569                         return -EINVAL;
2570
2571                 if (xfer->speed_hz && master->min_speed_hz &&
2572                     xfer->speed_hz < master->min_speed_hz)
2573                         return -EINVAL;
2574
2575                 if (xfer->tx_buf && !xfer->tx_nbits)
2576                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2577                 if (xfer->rx_buf && !xfer->rx_nbits)
2578                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2579                 /* check transfer tx/rx_nbits:
2580                  * 1. check the value matches one of single, dual and quad
2581                  * 2. check tx/rx_nbits match the mode in spi_device
2582                  */
2583                 if (xfer->tx_buf) {
2584                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2585                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2586                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2587                                 return -EINVAL;
2588                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2589                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2590                                 return -EINVAL;
2591                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2592                                 !(spi->mode & SPI_TX_QUAD))
2593                                 return -EINVAL;
2594                 }
2595                 /* check transfer rx_nbits */
2596                 if (xfer->rx_buf) {
2597                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2598                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2599                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2600                                 return -EINVAL;
2601                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2602                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2603                                 return -EINVAL;
2604                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2605                                 !(spi->mode & SPI_RX_QUAD))
2606                                 return -EINVAL;
2607                 }
2608         }
2609
2610         message->status = -EINPROGRESS;
2611
2612         return 0;
2613 }
2614
2615 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2616 {
2617         struct spi_master *master = spi->master;
2618
2619         message->spi = spi;
2620
2621         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2622         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2623
2624         trace_spi_message_submit(message);
2625
2626         return master->transfer(spi, message);
2627 }
2628
2629 /**
2630  * spi_async - asynchronous SPI transfer
2631  * @spi: device with which data will be exchanged
2632  * @message: describes the data transfers, including completion callback
2633  * Context: any (irqs may be blocked, etc)
2634  *
2635  * This call may be used in_irq and other contexts which can't sleep,
2636  * as well as from task contexts which can sleep.
2637  *
2638  * The completion callback is invoked in a context which can't sleep.
2639  * Before that invocation, the value of message->status is undefined.
2640  * When the callback is issued, message->status holds either zero (to
2641  * indicate complete success) or a negative error code.  After that
2642  * callback returns, the driver which issued the transfer request may
2643  * deallocate the associated memory; it's no longer in use by any SPI
2644  * core or controller driver code.
2645  *
2646  * Note that although all messages to a spi_device are handled in
2647  * FIFO order, messages may go to different devices in other orders.
2648  * Some device might be higher priority, or have various "hard" access
2649  * time requirements, for example.
2650  *
2651  * On detection of any fault during the transfer, processing of
2652  * the entire message is aborted, and the device is deselected.
2653  * Until returning from the associated message completion callback,
2654  * no other spi_message queued to that device will be processed.
2655  * (This rule applies equally to all the synchronous transfer calls,
2656  * which are wrappers around this core asynchronous primitive.)
2657  *
2658  * Return: zero on success, else a negative error code.
2659  */
2660 int spi_async(struct spi_device *spi, struct spi_message *message)
2661 {
2662         struct spi_master *master = spi->master;
2663         int ret;
2664         unsigned long flags;
2665
2666         ret = __spi_validate(spi, message);
2667         if (ret != 0)
2668                 return ret;
2669
2670         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2671
2672         if (master->bus_lock_flag)
2673                 ret = -EBUSY;
2674         else
2675                 ret = __spi_async(spi, message);
2676
2677         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2678
2679         return ret;
2680 }
2681 EXPORT_SYMBOL_GPL(spi_async);
2682
2683 /**
2684  * spi_async_locked - version of spi_async with exclusive bus usage
2685  * @spi: device with which data will be exchanged
2686  * @message: describes the data transfers, including completion callback
2687  * Context: any (irqs may be blocked, etc)
2688  *
2689  * This call may be used in_irq and other contexts which can't sleep,
2690  * as well as from task contexts which can sleep.
2691  *
2692  * The completion callback is invoked in a context which can't sleep.
2693  * Before that invocation, the value of message->status is undefined.
2694  * When the callback is issued, message->status holds either zero (to
2695  * indicate complete success) or a negative error code.  After that
2696  * callback returns, the driver which issued the transfer request may
2697  * deallocate the associated memory; it's no longer in use by any SPI
2698  * core or controller driver code.
2699  *
2700  * Note that although all messages to a spi_device are handled in
2701  * FIFO order, messages may go to different devices in other orders.
2702  * Some device might be higher priority, or have various "hard" access
2703  * time requirements, for example.
2704  *
2705  * On detection of any fault during the transfer, processing of
2706  * the entire message is aborted, and the device is deselected.
2707  * Until returning from the associated message completion callback,
2708  * no other spi_message queued to that device will be processed.
2709  * (This rule applies equally to all the synchronous transfer calls,
2710  * which are wrappers around this core asynchronous primitive.)
2711  *
2712  * Return: zero on success, else a negative error code.
2713  */
2714 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2715 {
2716         struct spi_master *master = spi->master;
2717         int ret;
2718         unsigned long flags;
2719
2720         ret = __spi_validate(spi, message);
2721         if (ret != 0)
2722                 return ret;
2723
2724         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2725
2726         ret = __spi_async(spi, message);
2727
2728         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2729
2730         return ret;
2731
2732 }
2733 EXPORT_SYMBOL_GPL(spi_async_locked);
2734
2735
2736 int spi_flash_read(struct spi_device *spi,
2737                    struct spi_flash_read_message *msg)
2738
2739 {
2740         struct spi_master *master = spi->master;
2741         int ret;
2742
2743         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2744              msg->addr_nbits == SPI_NBITS_DUAL) &&
2745             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2746                 return -EINVAL;
2747         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2748              msg->addr_nbits == SPI_NBITS_QUAD) &&
2749             !(spi->mode & SPI_TX_QUAD))
2750                 return -EINVAL;
2751         if (msg->data_nbits == SPI_NBITS_DUAL &&
2752             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2753                 return -EINVAL;
2754         if (msg->data_nbits == SPI_NBITS_QUAD &&
2755             !(spi->mode &  SPI_RX_QUAD))
2756                 return -EINVAL;
2757
2758         if (master->auto_runtime_pm) {
2759                 ret = pm_runtime_get_sync(master->dev.parent);
2760                 if (ret < 0) {
2761                         dev_err(&master->dev, "Failed to power device: %d\n",
2762                                 ret);
2763                         return ret;
2764                 }
2765         }
2766         mutex_lock(&master->bus_lock_mutex);
2767         ret = master->spi_flash_read(spi, msg);
2768         mutex_unlock(&master->bus_lock_mutex);
2769         if (master->auto_runtime_pm)
2770                 pm_runtime_put(master->dev.parent);
2771
2772         return ret;
2773 }
2774 EXPORT_SYMBOL_GPL(spi_flash_read);
2775
2776 /*-------------------------------------------------------------------------*/
2777
2778 /* Utility methods for SPI master protocol drivers, layered on
2779  * top of the core.  Some other utility methods are defined as
2780  * inline functions.
2781  */
2782
2783 static void spi_complete(void *arg)
2784 {
2785         complete(arg);
2786 }
2787
2788 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2789                       int bus_locked)
2790 {
2791         DECLARE_COMPLETION_ONSTACK(done);
2792         int status;
2793         struct spi_master *master = spi->master;
2794         unsigned long flags;
2795
2796         status = __spi_validate(spi, message);
2797         if (status != 0)
2798                 return status;
2799
2800         message->complete = spi_complete;
2801         message->context = &done;
2802         message->spi = spi;
2803
2804         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2805         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2806
2807         if (!bus_locked)
2808                 mutex_lock(&master->bus_lock_mutex);
2809
2810         /* If we're not using the legacy transfer method then we will
2811          * try to transfer in the calling context so special case.
2812          * This code would be less tricky if we could remove the
2813          * support for driver implemented message queues.
2814          */
2815         if (master->transfer == spi_queued_transfer) {
2816                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2817
2818                 trace_spi_message_submit(message);
2819
2820                 status = __spi_queued_transfer(spi, message, false);
2821
2822                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2823         } else {
2824                 status = spi_async_locked(spi, message);
2825         }
2826
2827         if (!bus_locked)
2828                 mutex_unlock(&master->bus_lock_mutex);
2829
2830         if (status == 0) {
2831                 /* Push out the messages in the calling context if we
2832                  * can.
2833                  */
2834                 if (master->transfer == spi_queued_transfer) {
2835                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2836                                                        spi_sync_immediate);
2837                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2838                                                        spi_sync_immediate);
2839                         __spi_pump_messages(master, false, bus_locked);
2840                 }
2841
2842                 wait_for_completion(&done);
2843                 status = message->status;
2844         }
2845         message->context = NULL;
2846         return status;
2847 }
2848
2849 /**
2850  * spi_sync - blocking/synchronous SPI data transfers
2851  * @spi: device with which data will be exchanged
2852  * @message: describes the data transfers
2853  * Context: can sleep
2854  *
2855  * This call may only be used from a context that may sleep.  The sleep
2856  * is non-interruptible, and has no timeout.  Low-overhead controller
2857  * drivers may DMA directly into and out of the message buffers.
2858  *
2859  * Note that the SPI device's chip select is active during the message,
2860  * and then is normally disabled between messages.  Drivers for some
2861  * frequently-used devices may want to minimize costs of selecting a chip,
2862  * by leaving it selected in anticipation that the next message will go
2863  * to the same chip.  (That may increase power usage.)
2864  *
2865  * Also, the caller is guaranteeing that the memory associated with the
2866  * message will not be freed before this call returns.
2867  *
2868  * Return: zero on success, else a negative error code.
2869  */
2870 int spi_sync(struct spi_device *spi, struct spi_message *message)
2871 {
2872         return __spi_sync(spi, message, spi->master->bus_lock_flag);
2873 }
2874 EXPORT_SYMBOL_GPL(spi_sync);
2875
2876 /**
2877  * spi_sync_locked - version of spi_sync with exclusive bus usage
2878  * @spi: device with which data will be exchanged
2879  * @message: describes the data transfers
2880  * Context: can sleep
2881  *
2882  * This call may only be used from a context that may sleep.  The sleep
2883  * is non-interruptible, and has no timeout.  Low-overhead controller
2884  * drivers may DMA directly into and out of the message buffers.
2885  *
2886  * This call should be used by drivers that require exclusive access to the
2887  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2888  * be released by a spi_bus_unlock call when the exclusive access is over.
2889  *
2890  * Return: zero on success, else a negative error code.
2891  */
2892 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2893 {
2894         return __spi_sync(spi, message, 1);
2895 }
2896 EXPORT_SYMBOL_GPL(spi_sync_locked);
2897
2898 /**
2899  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2900  * @master: SPI bus master that should be locked for exclusive bus access
2901  * Context: can sleep
2902  *
2903  * This call may only be used from a context that may sleep.  The sleep
2904  * is non-interruptible, and has no timeout.
2905  *
2906  * This call should be used by drivers that require exclusive access to the
2907  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2908  * exclusive access is over. Data transfer must be done by spi_sync_locked
2909  * and spi_async_locked calls when the SPI bus lock is held.
2910  *
2911  * Return: always zero.
2912  */
2913 int spi_bus_lock(struct spi_master *master)
2914 {
2915         unsigned long flags;
2916
2917         mutex_lock(&master->bus_lock_mutex);
2918
2919         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2920         master->bus_lock_flag = 1;
2921         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2922
2923         /* mutex remains locked until spi_bus_unlock is called */
2924
2925         return 0;
2926 }
2927 EXPORT_SYMBOL_GPL(spi_bus_lock);
2928
2929 /**
2930  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2931  * @master: SPI bus master that was locked for exclusive bus access
2932  * Context: can sleep
2933  *
2934  * This call may only be used from a context that may sleep.  The sleep
2935  * is non-interruptible, and has no timeout.
2936  *
2937  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2938  * call.
2939  *
2940  * Return: always zero.
2941  */
2942 int spi_bus_unlock(struct spi_master *master)
2943 {
2944         master->bus_lock_flag = 0;
2945
2946         mutex_unlock(&master->bus_lock_mutex);
2947
2948         return 0;
2949 }
2950 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2951
2952 /* portable code must never pass more than 32 bytes */
2953 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2954
2955 static u8       *buf;
2956
2957 /**
2958  * spi_write_then_read - SPI synchronous write followed by read
2959  * @spi: device with which data will be exchanged
2960  * @txbuf: data to be written (need not be dma-safe)
2961  * @n_tx: size of txbuf, in bytes
2962  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2963  * @n_rx: size of rxbuf, in bytes
2964  * Context: can sleep
2965  *
2966  * This performs a half duplex MicroWire style transaction with the
2967  * device, sending txbuf and then reading rxbuf.  The return value
2968  * is zero for success, else a negative errno status code.
2969  * This call may only be used from a context that may sleep.
2970  *
2971  * Parameters to this routine are always copied using a small buffer;
2972  * portable code should never use this for more than 32 bytes.
2973  * Performance-sensitive or bulk transfer code should instead use
2974  * spi_{async,sync}() calls with dma-safe buffers.
2975  *
2976  * Return: zero on success, else a negative error code.
2977  */
2978 int spi_write_then_read(struct spi_device *spi,
2979                 const void *txbuf, unsigned n_tx,
2980                 void *rxbuf, unsigned n_rx)
2981 {
2982         static DEFINE_MUTEX(lock);
2983
2984         int                     status;
2985         struct spi_message      message;
2986         struct spi_transfer     x[2];
2987         u8                      *local_buf;
2988
2989         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2990          * copying here, (as a pure convenience thing), but we can
2991          * keep heap costs out of the hot path unless someone else is
2992          * using the pre-allocated buffer or the transfer is too large.
2993          */
2994         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2995                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2996                                     GFP_KERNEL | GFP_DMA);
2997                 if (!local_buf)
2998                         return -ENOMEM;
2999         } else {
3000                 local_buf = buf;
3001         }
3002
3003         spi_message_init(&message);
3004         memset(x, 0, sizeof(x));
3005         if (n_tx) {
3006                 x[0].len = n_tx;
3007                 spi_message_add_tail(&x[0], &message);
3008         }
3009         if (n_rx) {
3010                 x[1].len = n_rx;
3011                 spi_message_add_tail(&x[1], &message);
3012         }
3013
3014         memcpy(local_buf, txbuf, n_tx);
3015         x[0].tx_buf = local_buf;
3016         x[1].rx_buf = local_buf + n_tx;
3017
3018         /* do the i/o */
3019         status = spi_sync(spi, &message);
3020         if (status == 0)
3021                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3022
3023         if (x[0].tx_buf == buf)
3024                 mutex_unlock(&lock);
3025         else
3026                 kfree(local_buf);
3027
3028         return status;
3029 }
3030 EXPORT_SYMBOL_GPL(spi_write_then_read);
3031
3032 /*-------------------------------------------------------------------------*/
3033
3034 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3035 static int __spi_of_device_match(struct device *dev, void *data)
3036 {
3037         return dev->of_node == data;
3038 }
3039
3040 /* must call put_device() when done with returned spi_device device */
3041 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3042 {
3043         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3044                                                 __spi_of_device_match);
3045         return dev ? to_spi_device(dev) : NULL;
3046 }
3047
3048 static int __spi_of_master_match(struct device *dev, const void *data)
3049 {
3050         return dev->of_node == data;
3051 }
3052
3053 /* the spi masters are not using spi_bus, so we find it with another way */
3054 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3055 {
3056         struct device *dev;
3057
3058         dev = class_find_device(&spi_master_class, NULL, node,
3059                                 __spi_of_master_match);
3060         if (!dev)
3061                 return NULL;
3062
3063         /* reference got in class_find_device */
3064         return container_of(dev, struct spi_master, dev);
3065 }
3066
3067 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3068                          void *arg)
3069 {
3070         struct of_reconfig_data *rd = arg;
3071         struct spi_master *master;
3072         struct spi_device *spi;
3073
3074         switch (of_reconfig_get_state_change(action, arg)) {
3075         case OF_RECONFIG_CHANGE_ADD:
3076                 master = of_find_spi_master_by_node(rd->dn->parent);
3077                 if (master == NULL)
3078                         return NOTIFY_OK;       /* not for us */
3079
3080                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3081                         put_device(&master->dev);
3082                         return NOTIFY_OK;
3083                 }
3084
3085                 spi = of_register_spi_device(master, rd->dn);
3086                 put_device(&master->dev);
3087
3088                 if (IS_ERR(spi)) {
3089                         pr_err("%s: failed to create for '%s'\n",
3090                                         __func__, rd->dn->full_name);
3091                         return notifier_from_errno(PTR_ERR(spi));
3092                 }
3093                 break;
3094
3095         case OF_RECONFIG_CHANGE_REMOVE:
3096                 /* already depopulated? */
3097                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3098                         return NOTIFY_OK;
3099
3100                 /* find our device by node */
3101                 spi = of_find_spi_device_by_node(rd->dn);
3102                 if (spi == NULL)
3103                         return NOTIFY_OK;       /* no? not meant for us */
3104
3105                 /* unregister takes one ref away */
3106                 spi_unregister_device(spi);
3107
3108                 /* and put the reference of the find */
3109                 put_device(&spi->dev);
3110                 break;
3111         }
3112
3113         return NOTIFY_OK;
3114 }
3115
3116 static struct notifier_block spi_of_notifier = {
3117         .notifier_call = of_spi_notify,
3118 };
3119 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3120 extern struct notifier_block spi_of_notifier;
3121 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3122
3123 #if IS_ENABLED(CONFIG_ACPI)
3124 static int spi_acpi_master_match(struct device *dev, const void *data)
3125 {
3126         return ACPI_COMPANION(dev->parent) == data;
3127 }
3128
3129 static int spi_acpi_device_match(struct device *dev, void *data)
3130 {
3131         return ACPI_COMPANION(dev) == data;
3132 }
3133
3134 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3135 {
3136         struct device *dev;
3137
3138         dev = class_find_device(&spi_master_class, NULL, adev,
3139                                 spi_acpi_master_match);
3140         if (!dev)
3141                 return NULL;
3142
3143         return container_of(dev, struct spi_master, dev);
3144 }
3145
3146 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3147 {
3148         struct device *dev;
3149
3150         dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3151
3152         return dev ? to_spi_device(dev) : NULL;
3153 }
3154
3155 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3156                            void *arg)
3157 {
3158         struct acpi_device *adev = arg;
3159         struct spi_master *master;
3160         struct spi_device *spi;
3161
3162         switch (value) {
3163         case ACPI_RECONFIG_DEVICE_ADD:
3164                 master = acpi_spi_find_master_by_adev(adev->parent);
3165                 if (!master)
3166                         break;
3167
3168                 acpi_register_spi_device(master, adev);
3169                 put_device(&master->dev);
3170                 break;
3171         case ACPI_RECONFIG_DEVICE_REMOVE:
3172                 if (!acpi_device_enumerated(adev))
3173                         break;
3174
3175                 spi = acpi_spi_find_device_by_adev(adev);
3176                 if (!spi)
3177                         break;
3178
3179                 spi_unregister_device(spi);
3180                 put_device(&spi->dev);
3181                 break;
3182         }
3183
3184         return NOTIFY_OK;
3185 }
3186
3187 static struct notifier_block spi_acpi_notifier = {
3188         .notifier_call = acpi_spi_notify,
3189 };
3190 #else
3191 extern struct notifier_block spi_acpi_notifier;
3192 #endif
3193
3194 static int __init spi_init(void)
3195 {
3196         int     status;
3197
3198         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3199         if (!buf) {
3200                 status = -ENOMEM;
3201                 goto err0;
3202         }
3203
3204         status = bus_register(&spi_bus_type);
3205         if (status < 0)
3206                 goto err1;
3207
3208         status = class_register(&spi_master_class);
3209         if (status < 0)
3210                 goto err2;
3211
3212         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3213                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3214         if (IS_ENABLED(CONFIG_ACPI))
3215                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3216
3217         return 0;
3218
3219 err2:
3220         bus_unregister(&spi_bus_type);
3221 err1:
3222         kfree(buf);
3223         buf = NULL;
3224 err0:
3225         return status;
3226 }
3227
3228 /* board_info is normally registered in arch_initcall(),
3229  * but even essential drivers wait till later
3230  *
3231  * REVISIT only boardinfo really needs static linking. the rest (device and
3232  * driver registration) _could_ be dynamically linked (modular) ... costs
3233  * include needing to have boardinfo data structures be much more public.
3234  */
3235 postcore_initcall(spi_init);
3236