Merge remote-tracking branch 'jk/vfs' into work.misc
[cascardo/linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = to_spi_device(dev);                    \
88         return spi_statistics_##field##_show(&spi->statistics, buf);    \
89 }                                                                       \
90 static struct device_attribute dev_attr_spi_device_##field = {          \
91         .attr = { .name = file, .mode = S_IRUGO },                      \
92         .show = spi_device_##field##_show,                              \
93 }
94
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
97                                             char *buf)                  \
98 {                                                                       \
99         unsigned long flags;                                            \
100         ssize_t len;                                                    \
101         spin_lock_irqsave(&stat->lock, flags);                          \
102         len = sprintf(buf, format_string, stat->field);                 \
103         spin_unlock_irqrestore(&stat->lock, flags);                     \
104         return len;                                                     \
105 }                                                                       \
106 SPI_STATISTICS_ATTRS(name, file)
107
108 #define SPI_STATISTICS_SHOW(field, format_string)                       \
109         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
110                                  field, format_string)
111
112 SPI_STATISTICS_SHOW(messages, "%lu");
113 SPI_STATISTICS_SHOW(transfers, "%lu");
114 SPI_STATISTICS_SHOW(errors, "%lu");
115 SPI_STATISTICS_SHOW(timedout, "%lu");
116
117 SPI_STATISTICS_SHOW(spi_sync, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
119 SPI_STATISTICS_SHOW(spi_async, "%lu");
120
121 SPI_STATISTICS_SHOW(bytes, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
124
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
126         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
127                                  "transfer_bytes_histo_" number,        \
128                                  transfer_bytes_histo[index],  "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
146
147 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
148
149 static struct attribute *spi_dev_attrs[] = {
150         &dev_attr_modalias.attr,
151         NULL,
152 };
153
154 static const struct attribute_group spi_dev_group = {
155         .attrs  = spi_dev_attrs,
156 };
157
158 static struct attribute *spi_device_statistics_attrs[] = {
159         &dev_attr_spi_device_messages.attr,
160         &dev_attr_spi_device_transfers.attr,
161         &dev_attr_spi_device_errors.attr,
162         &dev_attr_spi_device_timedout.attr,
163         &dev_attr_spi_device_spi_sync.attr,
164         &dev_attr_spi_device_spi_sync_immediate.attr,
165         &dev_attr_spi_device_spi_async.attr,
166         &dev_attr_spi_device_bytes.attr,
167         &dev_attr_spi_device_bytes_rx.attr,
168         &dev_attr_spi_device_bytes_tx.attr,
169         &dev_attr_spi_device_transfer_bytes_histo0.attr,
170         &dev_attr_spi_device_transfer_bytes_histo1.attr,
171         &dev_attr_spi_device_transfer_bytes_histo2.attr,
172         &dev_attr_spi_device_transfer_bytes_histo3.attr,
173         &dev_attr_spi_device_transfer_bytes_histo4.attr,
174         &dev_attr_spi_device_transfer_bytes_histo5.attr,
175         &dev_attr_spi_device_transfer_bytes_histo6.attr,
176         &dev_attr_spi_device_transfer_bytes_histo7.attr,
177         &dev_attr_spi_device_transfer_bytes_histo8.attr,
178         &dev_attr_spi_device_transfer_bytes_histo9.attr,
179         &dev_attr_spi_device_transfer_bytes_histo10.attr,
180         &dev_attr_spi_device_transfer_bytes_histo11.attr,
181         &dev_attr_spi_device_transfer_bytes_histo12.attr,
182         &dev_attr_spi_device_transfer_bytes_histo13.attr,
183         &dev_attr_spi_device_transfer_bytes_histo14.attr,
184         &dev_attr_spi_device_transfer_bytes_histo15.attr,
185         &dev_attr_spi_device_transfer_bytes_histo16.attr,
186         &dev_attr_spi_device_transfers_split_maxsize.attr,
187         NULL,
188 };
189
190 static const struct attribute_group spi_device_statistics_group = {
191         .name  = "statistics",
192         .attrs  = spi_device_statistics_attrs,
193 };
194
195 static const struct attribute_group *spi_dev_groups[] = {
196         &spi_dev_group,
197         &spi_device_statistics_group,
198         NULL,
199 };
200
201 static struct attribute *spi_master_statistics_attrs[] = {
202         &dev_attr_spi_master_messages.attr,
203         &dev_attr_spi_master_transfers.attr,
204         &dev_attr_spi_master_errors.attr,
205         &dev_attr_spi_master_timedout.attr,
206         &dev_attr_spi_master_spi_sync.attr,
207         &dev_attr_spi_master_spi_sync_immediate.attr,
208         &dev_attr_spi_master_spi_async.attr,
209         &dev_attr_spi_master_bytes.attr,
210         &dev_attr_spi_master_bytes_rx.attr,
211         &dev_attr_spi_master_bytes_tx.attr,
212         &dev_attr_spi_master_transfer_bytes_histo0.attr,
213         &dev_attr_spi_master_transfer_bytes_histo1.attr,
214         &dev_attr_spi_master_transfer_bytes_histo2.attr,
215         &dev_attr_spi_master_transfer_bytes_histo3.attr,
216         &dev_attr_spi_master_transfer_bytes_histo4.attr,
217         &dev_attr_spi_master_transfer_bytes_histo5.attr,
218         &dev_attr_spi_master_transfer_bytes_histo6.attr,
219         &dev_attr_spi_master_transfer_bytes_histo7.attr,
220         &dev_attr_spi_master_transfer_bytes_histo8.attr,
221         &dev_attr_spi_master_transfer_bytes_histo9.attr,
222         &dev_attr_spi_master_transfer_bytes_histo10.attr,
223         &dev_attr_spi_master_transfer_bytes_histo11.attr,
224         &dev_attr_spi_master_transfer_bytes_histo12.attr,
225         &dev_attr_spi_master_transfer_bytes_histo13.attr,
226         &dev_attr_spi_master_transfer_bytes_histo14.attr,
227         &dev_attr_spi_master_transfer_bytes_histo15.attr,
228         &dev_attr_spi_master_transfer_bytes_histo16.attr,
229         &dev_attr_spi_master_transfers_split_maxsize.attr,
230         NULL,
231 };
232
233 static const struct attribute_group spi_master_statistics_group = {
234         .name  = "statistics",
235         .attrs  = spi_master_statistics_attrs,
236 };
237
238 static const struct attribute_group *spi_master_groups[] = {
239         &spi_master_statistics_group,
240         NULL,
241 };
242
243 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
244                                        struct spi_transfer *xfer,
245                                        struct spi_master *master)
246 {
247         unsigned long flags;
248         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
249
250         if (l2len < 0)
251                 l2len = 0;
252
253         spin_lock_irqsave(&stats->lock, flags);
254
255         stats->transfers++;
256         stats->transfer_bytes_histo[l2len]++;
257
258         stats->bytes += xfer->len;
259         if ((xfer->tx_buf) &&
260             (xfer->tx_buf != master->dummy_tx))
261                 stats->bytes_tx += xfer->len;
262         if ((xfer->rx_buf) &&
263             (xfer->rx_buf != master->dummy_rx))
264                 stats->bytes_rx += xfer->len;
265
266         spin_unlock_irqrestore(&stats->lock, flags);
267 }
268 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
269
270 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
271  * and the sysfs version makes coldplug work too.
272  */
273
274 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
275                                                 const struct spi_device *sdev)
276 {
277         while (id->name[0]) {
278                 if (!strcmp(sdev->modalias, id->name))
279                         return id;
280                 id++;
281         }
282         return NULL;
283 }
284
285 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
286 {
287         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
288
289         return spi_match_id(sdrv->id_table, sdev);
290 }
291 EXPORT_SYMBOL_GPL(spi_get_device_id);
292
293 static int spi_match_device(struct device *dev, struct device_driver *drv)
294 {
295         const struct spi_device *spi = to_spi_device(dev);
296         const struct spi_driver *sdrv = to_spi_driver(drv);
297
298         /* Attempt an OF style match */
299         if (of_driver_match_device(dev, drv))
300                 return 1;
301
302         /* Then try ACPI */
303         if (acpi_driver_match_device(dev, drv))
304                 return 1;
305
306         if (sdrv->id_table)
307                 return !!spi_match_id(sdrv->id_table, spi);
308
309         return strcmp(spi->modalias, drv->name) == 0;
310 }
311
312 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
313 {
314         const struct spi_device         *spi = to_spi_device(dev);
315         int rc;
316
317         rc = acpi_device_uevent_modalias(dev, env);
318         if (rc != -ENODEV)
319                 return rc;
320
321         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
322         return 0;
323 }
324
325 struct bus_type spi_bus_type = {
326         .name           = "spi",
327         .dev_groups     = spi_dev_groups,
328         .match          = spi_match_device,
329         .uevent         = spi_uevent,
330 };
331 EXPORT_SYMBOL_GPL(spi_bus_type);
332
333
334 static int spi_drv_probe(struct device *dev)
335 {
336         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
337         struct spi_device               *spi = to_spi_device(dev);
338         int ret;
339
340         ret = of_clk_set_defaults(dev->of_node, false);
341         if (ret)
342                 return ret;
343
344         if (dev->of_node) {
345                 spi->irq = of_irq_get(dev->of_node, 0);
346                 if (spi->irq == -EPROBE_DEFER)
347                         return -EPROBE_DEFER;
348                 if (spi->irq < 0)
349                         spi->irq = 0;
350         }
351
352         ret = dev_pm_domain_attach(dev, true);
353         if (ret != -EPROBE_DEFER) {
354                 ret = sdrv->probe(spi);
355                 if (ret)
356                         dev_pm_domain_detach(dev, true);
357         }
358
359         return ret;
360 }
361
362 static int spi_drv_remove(struct device *dev)
363 {
364         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
365         int ret;
366
367         ret = sdrv->remove(to_spi_device(dev));
368         dev_pm_domain_detach(dev, true);
369
370         return ret;
371 }
372
373 static void spi_drv_shutdown(struct device *dev)
374 {
375         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
376
377         sdrv->shutdown(to_spi_device(dev));
378 }
379
380 /**
381  * __spi_register_driver - register a SPI driver
382  * @owner: owner module of the driver to register
383  * @sdrv: the driver to register
384  * Context: can sleep
385  *
386  * Return: zero on success, else a negative error code.
387  */
388 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
389 {
390         sdrv->driver.owner = owner;
391         sdrv->driver.bus = &spi_bus_type;
392         if (sdrv->probe)
393                 sdrv->driver.probe = spi_drv_probe;
394         if (sdrv->remove)
395                 sdrv->driver.remove = spi_drv_remove;
396         if (sdrv->shutdown)
397                 sdrv->driver.shutdown = spi_drv_shutdown;
398         return driver_register(&sdrv->driver);
399 }
400 EXPORT_SYMBOL_GPL(__spi_register_driver);
401
402 /*-------------------------------------------------------------------------*/
403
404 /* SPI devices should normally not be created by SPI device drivers; that
405  * would make them board-specific.  Similarly with SPI master drivers.
406  * Device registration normally goes into like arch/.../mach.../board-YYY.c
407  * with other readonly (flashable) information about mainboard devices.
408  */
409
410 struct boardinfo {
411         struct list_head        list;
412         struct spi_board_info   board_info;
413 };
414
415 static LIST_HEAD(board_list);
416 static LIST_HEAD(spi_master_list);
417
418 /*
419  * Used to protect add/del opertion for board_info list and
420  * spi_master list, and their matching process
421  */
422 static DEFINE_MUTEX(board_lock);
423
424 /**
425  * spi_alloc_device - Allocate a new SPI device
426  * @master: Controller to which device is connected
427  * Context: can sleep
428  *
429  * Allows a driver to allocate and initialize a spi_device without
430  * registering it immediately.  This allows a driver to directly
431  * fill the spi_device with device parameters before calling
432  * spi_add_device() on it.
433  *
434  * Caller is responsible to call spi_add_device() on the returned
435  * spi_device structure to add it to the SPI master.  If the caller
436  * needs to discard the spi_device without adding it, then it should
437  * call spi_dev_put() on it.
438  *
439  * Return: a pointer to the new device, or NULL.
440  */
441 struct spi_device *spi_alloc_device(struct spi_master *master)
442 {
443         struct spi_device       *spi;
444
445         if (!spi_master_get(master))
446                 return NULL;
447
448         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
449         if (!spi) {
450                 spi_master_put(master);
451                 return NULL;
452         }
453
454         spi->master = master;
455         spi->dev.parent = &master->dev;
456         spi->dev.bus = &spi_bus_type;
457         spi->dev.release = spidev_release;
458         spi->cs_gpio = -ENOENT;
459
460         spin_lock_init(&spi->statistics.lock);
461
462         device_initialize(&spi->dev);
463         return spi;
464 }
465 EXPORT_SYMBOL_GPL(spi_alloc_device);
466
467 static void spi_dev_set_name(struct spi_device *spi)
468 {
469         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
470
471         if (adev) {
472                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
473                 return;
474         }
475
476         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
477                      spi->chip_select);
478 }
479
480 static int spi_dev_check(struct device *dev, void *data)
481 {
482         struct spi_device *spi = to_spi_device(dev);
483         struct spi_device *new_spi = data;
484
485         if (spi->master == new_spi->master &&
486             spi->chip_select == new_spi->chip_select)
487                 return -EBUSY;
488         return 0;
489 }
490
491 /**
492  * spi_add_device - Add spi_device allocated with spi_alloc_device
493  * @spi: spi_device to register
494  *
495  * Companion function to spi_alloc_device.  Devices allocated with
496  * spi_alloc_device can be added onto the spi bus with this function.
497  *
498  * Return: 0 on success; negative errno on failure
499  */
500 int spi_add_device(struct spi_device *spi)
501 {
502         static DEFINE_MUTEX(spi_add_lock);
503         struct spi_master *master = spi->master;
504         struct device *dev = master->dev.parent;
505         int status;
506
507         /* Chipselects are numbered 0..max; validate. */
508         if (spi->chip_select >= master->num_chipselect) {
509                 dev_err(dev, "cs%d >= max %d\n",
510                         spi->chip_select,
511                         master->num_chipselect);
512                 return -EINVAL;
513         }
514
515         /* Set the bus ID string */
516         spi_dev_set_name(spi);
517
518         /* We need to make sure there's no other device with this
519          * chipselect **BEFORE** we call setup(), else we'll trash
520          * its configuration.  Lock against concurrent add() calls.
521          */
522         mutex_lock(&spi_add_lock);
523
524         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
525         if (status) {
526                 dev_err(dev, "chipselect %d already in use\n",
527                                 spi->chip_select);
528                 goto done;
529         }
530
531         if (master->cs_gpios)
532                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
533
534         /* Drivers may modify this initial i/o setup, but will
535          * normally rely on the device being setup.  Devices
536          * using SPI_CS_HIGH can't coexist well otherwise...
537          */
538         status = spi_setup(spi);
539         if (status < 0) {
540                 dev_err(dev, "can't setup %s, status %d\n",
541                                 dev_name(&spi->dev), status);
542                 goto done;
543         }
544
545         /* Device may be bound to an active driver when this returns */
546         status = device_add(&spi->dev);
547         if (status < 0)
548                 dev_err(dev, "can't add %s, status %d\n",
549                                 dev_name(&spi->dev), status);
550         else
551                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
552
553 done:
554         mutex_unlock(&spi_add_lock);
555         return status;
556 }
557 EXPORT_SYMBOL_GPL(spi_add_device);
558
559 /**
560  * spi_new_device - instantiate one new SPI device
561  * @master: Controller to which device is connected
562  * @chip: Describes the SPI device
563  * Context: can sleep
564  *
565  * On typical mainboards, this is purely internal; and it's not needed
566  * after board init creates the hard-wired devices.  Some development
567  * platforms may not be able to use spi_register_board_info though, and
568  * this is exported so that for example a USB or parport based adapter
569  * driver could add devices (which it would learn about out-of-band).
570  *
571  * Return: the new device, or NULL.
572  */
573 struct spi_device *spi_new_device(struct spi_master *master,
574                                   struct spi_board_info *chip)
575 {
576         struct spi_device       *proxy;
577         int                     status;
578
579         /* NOTE:  caller did any chip->bus_num checks necessary.
580          *
581          * Also, unless we change the return value convention to use
582          * error-or-pointer (not NULL-or-pointer), troubleshootability
583          * suggests syslogged diagnostics are best here (ugh).
584          */
585
586         proxy = spi_alloc_device(master);
587         if (!proxy)
588                 return NULL;
589
590         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
591
592         proxy->chip_select = chip->chip_select;
593         proxy->max_speed_hz = chip->max_speed_hz;
594         proxy->mode = chip->mode;
595         proxy->irq = chip->irq;
596         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
597         proxy->dev.platform_data = (void *) chip->platform_data;
598         proxy->controller_data = chip->controller_data;
599         proxy->controller_state = NULL;
600
601         status = spi_add_device(proxy);
602         if (status < 0) {
603                 spi_dev_put(proxy);
604                 return NULL;
605         }
606
607         return proxy;
608 }
609 EXPORT_SYMBOL_GPL(spi_new_device);
610
611 /**
612  * spi_unregister_device - unregister a single SPI device
613  * @spi: spi_device to unregister
614  *
615  * Start making the passed SPI device vanish. Normally this would be handled
616  * by spi_unregister_master().
617  */
618 void spi_unregister_device(struct spi_device *spi)
619 {
620         if (!spi)
621                 return;
622
623         if (spi->dev.of_node)
624                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
625         if (ACPI_COMPANION(&spi->dev))
626                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
627         device_unregister(&spi->dev);
628 }
629 EXPORT_SYMBOL_GPL(spi_unregister_device);
630
631 static void spi_match_master_to_boardinfo(struct spi_master *master,
632                                 struct spi_board_info *bi)
633 {
634         struct spi_device *dev;
635
636         if (master->bus_num != bi->bus_num)
637                 return;
638
639         dev = spi_new_device(master, bi);
640         if (!dev)
641                 dev_err(master->dev.parent, "can't create new device for %s\n",
642                         bi->modalias);
643 }
644
645 /**
646  * spi_register_board_info - register SPI devices for a given board
647  * @info: array of chip descriptors
648  * @n: how many descriptors are provided
649  * Context: can sleep
650  *
651  * Board-specific early init code calls this (probably during arch_initcall)
652  * with segments of the SPI device table.  Any device nodes are created later,
653  * after the relevant parent SPI controller (bus_num) is defined.  We keep
654  * this table of devices forever, so that reloading a controller driver will
655  * not make Linux forget about these hard-wired devices.
656  *
657  * Other code can also call this, e.g. a particular add-on board might provide
658  * SPI devices through its expansion connector, so code initializing that board
659  * would naturally declare its SPI devices.
660  *
661  * The board info passed can safely be __initdata ... but be careful of
662  * any embedded pointers (platform_data, etc), they're copied as-is.
663  *
664  * Return: zero on success, else a negative error code.
665  */
666 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
667 {
668         struct boardinfo *bi;
669         int i;
670
671         if (!n)
672                 return -EINVAL;
673
674         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
675         if (!bi)
676                 return -ENOMEM;
677
678         for (i = 0; i < n; i++, bi++, info++) {
679                 struct spi_master *master;
680
681                 memcpy(&bi->board_info, info, sizeof(*info));
682                 mutex_lock(&board_lock);
683                 list_add_tail(&bi->list, &board_list);
684                 list_for_each_entry(master, &spi_master_list, list)
685                         spi_match_master_to_boardinfo(master, &bi->board_info);
686                 mutex_unlock(&board_lock);
687         }
688
689         return 0;
690 }
691
692 /*-------------------------------------------------------------------------*/
693
694 static void spi_set_cs(struct spi_device *spi, bool enable)
695 {
696         if (spi->mode & SPI_CS_HIGH)
697                 enable = !enable;
698
699         if (gpio_is_valid(spi->cs_gpio))
700                 gpio_set_value(spi->cs_gpio, !enable);
701         else if (spi->master->set_cs)
702                 spi->master->set_cs(spi, !enable);
703 }
704
705 #ifdef CONFIG_HAS_DMA
706 static int spi_map_buf(struct spi_master *master, struct device *dev,
707                        struct sg_table *sgt, void *buf, size_t len,
708                        enum dma_data_direction dir)
709 {
710         const bool vmalloced_buf = is_vmalloc_addr(buf);
711         unsigned int max_seg_size = dma_get_max_seg_size(dev);
712         int desc_len;
713         int sgs;
714         struct page *vm_page;
715         void *sg_buf;
716         size_t min;
717         int i, ret;
718
719         if (vmalloced_buf) {
720                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
721                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
722         } else if (virt_addr_valid(buf)) {
723                 desc_len = min_t(int, max_seg_size, master->max_dma_len);
724                 sgs = DIV_ROUND_UP(len, desc_len);
725         } else {
726                 return -EINVAL;
727         }
728
729         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
730         if (ret != 0)
731                 return ret;
732
733         for (i = 0; i < sgs; i++) {
734
735                 if (vmalloced_buf) {
736                         min = min_t(size_t,
737                                     len, desc_len - offset_in_page(buf));
738                         vm_page = vmalloc_to_page(buf);
739                         if (!vm_page) {
740                                 sg_free_table(sgt);
741                                 return -ENOMEM;
742                         }
743                         sg_set_page(&sgt->sgl[i], vm_page,
744                                     min, offset_in_page(buf));
745                 } else {
746                         min = min_t(size_t, len, desc_len);
747                         sg_buf = buf;
748                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
749                 }
750
751                 buf += min;
752                 len -= min;
753         }
754
755         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
756         if (!ret)
757                 ret = -ENOMEM;
758         if (ret < 0) {
759                 sg_free_table(sgt);
760                 return ret;
761         }
762
763         sgt->nents = ret;
764
765         return 0;
766 }
767
768 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
769                           struct sg_table *sgt, enum dma_data_direction dir)
770 {
771         if (sgt->orig_nents) {
772                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
773                 sg_free_table(sgt);
774         }
775 }
776
777 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
778 {
779         struct device *tx_dev, *rx_dev;
780         struct spi_transfer *xfer;
781         int ret;
782
783         if (!master->can_dma)
784                 return 0;
785
786         if (master->dma_tx)
787                 tx_dev = master->dma_tx->device->dev;
788         else
789                 tx_dev = &master->dev;
790
791         if (master->dma_rx)
792                 rx_dev = master->dma_rx->device->dev;
793         else
794                 rx_dev = &master->dev;
795
796         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
797                 if (!master->can_dma(master, msg->spi, xfer))
798                         continue;
799
800                 if (xfer->tx_buf != NULL) {
801                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
802                                           (void *)xfer->tx_buf, xfer->len,
803                                           DMA_TO_DEVICE);
804                         if (ret != 0)
805                                 return ret;
806                 }
807
808                 if (xfer->rx_buf != NULL) {
809                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
810                                           xfer->rx_buf, xfer->len,
811                                           DMA_FROM_DEVICE);
812                         if (ret != 0) {
813                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
814                                               DMA_TO_DEVICE);
815                                 return ret;
816                         }
817                 }
818         }
819
820         master->cur_msg_mapped = true;
821
822         return 0;
823 }
824
825 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
826 {
827         struct spi_transfer *xfer;
828         struct device *tx_dev, *rx_dev;
829
830         if (!master->cur_msg_mapped || !master->can_dma)
831                 return 0;
832
833         if (master->dma_tx)
834                 tx_dev = master->dma_tx->device->dev;
835         else
836                 tx_dev = &master->dev;
837
838         if (master->dma_rx)
839                 rx_dev = master->dma_rx->device->dev;
840         else
841                 rx_dev = &master->dev;
842
843         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
844                 if (!master->can_dma(master, msg->spi, xfer))
845                         continue;
846
847                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
848                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
849         }
850
851         return 0;
852 }
853 #else /* !CONFIG_HAS_DMA */
854 static inline int spi_map_buf(struct spi_master *master,
855                               struct device *dev, struct sg_table *sgt,
856                               void *buf, size_t len,
857                               enum dma_data_direction dir)
858 {
859         return -EINVAL;
860 }
861
862 static inline void spi_unmap_buf(struct spi_master *master,
863                                  struct device *dev, struct sg_table *sgt,
864                                  enum dma_data_direction dir)
865 {
866 }
867
868 static inline int __spi_map_msg(struct spi_master *master,
869                                 struct spi_message *msg)
870 {
871         return 0;
872 }
873
874 static inline int __spi_unmap_msg(struct spi_master *master,
875                                   struct spi_message *msg)
876 {
877         return 0;
878 }
879 #endif /* !CONFIG_HAS_DMA */
880
881 static inline int spi_unmap_msg(struct spi_master *master,
882                                 struct spi_message *msg)
883 {
884         struct spi_transfer *xfer;
885
886         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
887                 /*
888                  * Restore the original value of tx_buf or rx_buf if they are
889                  * NULL.
890                  */
891                 if (xfer->tx_buf == master->dummy_tx)
892                         xfer->tx_buf = NULL;
893                 if (xfer->rx_buf == master->dummy_rx)
894                         xfer->rx_buf = NULL;
895         }
896
897         return __spi_unmap_msg(master, msg);
898 }
899
900 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
901 {
902         struct spi_transfer *xfer;
903         void *tmp;
904         unsigned int max_tx, max_rx;
905
906         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
907                 max_tx = 0;
908                 max_rx = 0;
909
910                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
911                         if ((master->flags & SPI_MASTER_MUST_TX) &&
912                             !xfer->tx_buf)
913                                 max_tx = max(xfer->len, max_tx);
914                         if ((master->flags & SPI_MASTER_MUST_RX) &&
915                             !xfer->rx_buf)
916                                 max_rx = max(xfer->len, max_rx);
917                 }
918
919                 if (max_tx) {
920                         tmp = krealloc(master->dummy_tx, max_tx,
921                                        GFP_KERNEL | GFP_DMA);
922                         if (!tmp)
923                                 return -ENOMEM;
924                         master->dummy_tx = tmp;
925                         memset(tmp, 0, max_tx);
926                 }
927
928                 if (max_rx) {
929                         tmp = krealloc(master->dummy_rx, max_rx,
930                                        GFP_KERNEL | GFP_DMA);
931                         if (!tmp)
932                                 return -ENOMEM;
933                         master->dummy_rx = tmp;
934                 }
935
936                 if (max_tx || max_rx) {
937                         list_for_each_entry(xfer, &msg->transfers,
938                                             transfer_list) {
939                                 if (!xfer->tx_buf)
940                                         xfer->tx_buf = master->dummy_tx;
941                                 if (!xfer->rx_buf)
942                                         xfer->rx_buf = master->dummy_rx;
943                         }
944                 }
945         }
946
947         return __spi_map_msg(master, msg);
948 }
949
950 /*
951  * spi_transfer_one_message - Default implementation of transfer_one_message()
952  *
953  * This is a standard implementation of transfer_one_message() for
954  * drivers which implement a transfer_one() operation.  It provides
955  * standard handling of delays and chip select management.
956  */
957 static int spi_transfer_one_message(struct spi_master *master,
958                                     struct spi_message *msg)
959 {
960         struct spi_transfer *xfer;
961         bool keep_cs = false;
962         int ret = 0;
963         unsigned long long ms = 1;
964         struct spi_statistics *statm = &master->statistics;
965         struct spi_statistics *stats = &msg->spi->statistics;
966
967         spi_set_cs(msg->spi, true);
968
969         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
970         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
971
972         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
973                 trace_spi_transfer_start(msg, xfer);
974
975                 spi_statistics_add_transfer_stats(statm, xfer, master);
976                 spi_statistics_add_transfer_stats(stats, xfer, master);
977
978                 if (xfer->tx_buf || xfer->rx_buf) {
979                         reinit_completion(&master->xfer_completion);
980
981                         ret = master->transfer_one(master, msg->spi, xfer);
982                         if (ret < 0) {
983                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
984                                                                errors);
985                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
986                                                                errors);
987                                 dev_err(&msg->spi->dev,
988                                         "SPI transfer failed: %d\n", ret);
989                                 goto out;
990                         }
991
992                         if (ret > 0) {
993                                 ret = 0;
994                                 ms = 8LL * 1000LL * xfer->len;
995                                 do_div(ms, xfer->speed_hz);
996                                 ms += ms + 100; /* some tolerance */
997
998                                 if (ms > UINT_MAX)
999                                         ms = UINT_MAX;
1000
1001                                 ms = wait_for_completion_timeout(&master->xfer_completion,
1002                                                                  msecs_to_jiffies(ms));
1003                         }
1004
1005                         if (ms == 0) {
1006                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1007                                                                timedout);
1008                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1009                                                                timedout);
1010                                 dev_err(&msg->spi->dev,
1011                                         "SPI transfer timed out\n");
1012                                 msg->status = -ETIMEDOUT;
1013                         }
1014                 } else {
1015                         if (xfer->len)
1016                                 dev_err(&msg->spi->dev,
1017                                         "Bufferless transfer has length %u\n",
1018                                         xfer->len);
1019                 }
1020
1021                 trace_spi_transfer_stop(msg, xfer);
1022
1023                 if (msg->status != -EINPROGRESS)
1024                         goto out;
1025
1026                 if (xfer->delay_usecs)
1027                         udelay(xfer->delay_usecs);
1028
1029                 if (xfer->cs_change) {
1030                         if (list_is_last(&xfer->transfer_list,
1031                                          &msg->transfers)) {
1032                                 keep_cs = true;
1033                         } else {
1034                                 spi_set_cs(msg->spi, false);
1035                                 udelay(10);
1036                                 spi_set_cs(msg->spi, true);
1037                         }
1038                 }
1039
1040                 msg->actual_length += xfer->len;
1041         }
1042
1043 out:
1044         if (ret != 0 || !keep_cs)
1045                 spi_set_cs(msg->spi, false);
1046
1047         if (msg->status == -EINPROGRESS)
1048                 msg->status = ret;
1049
1050         if (msg->status && master->handle_err)
1051                 master->handle_err(master, msg);
1052
1053         spi_res_release(master, msg);
1054
1055         spi_finalize_current_message(master);
1056
1057         return ret;
1058 }
1059
1060 /**
1061  * spi_finalize_current_transfer - report completion of a transfer
1062  * @master: the master reporting completion
1063  *
1064  * Called by SPI drivers using the core transfer_one_message()
1065  * implementation to notify it that the current interrupt driven
1066  * transfer has finished and the next one may be scheduled.
1067  */
1068 void spi_finalize_current_transfer(struct spi_master *master)
1069 {
1070         complete(&master->xfer_completion);
1071 }
1072 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1073
1074 /**
1075  * __spi_pump_messages - function which processes spi message queue
1076  * @master: master to process queue for
1077  * @in_kthread: true if we are in the context of the message pump thread
1078  *
1079  * This function checks if there is any spi message in the queue that
1080  * needs processing and if so call out to the driver to initialize hardware
1081  * and transfer each message.
1082  *
1083  * Note that it is called both from the kthread itself and also from
1084  * inside spi_sync(); the queue extraction handling at the top of the
1085  * function should deal with this safely.
1086  */
1087 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1088 {
1089         unsigned long flags;
1090         bool was_busy = false;
1091         int ret;
1092
1093         /* Lock queue */
1094         spin_lock_irqsave(&master->queue_lock, flags);
1095
1096         /* Make sure we are not already running a message */
1097         if (master->cur_msg) {
1098                 spin_unlock_irqrestore(&master->queue_lock, flags);
1099                 return;
1100         }
1101
1102         /* If another context is idling the device then defer */
1103         if (master->idling) {
1104                 queue_kthread_work(&master->kworker, &master->pump_messages);
1105                 spin_unlock_irqrestore(&master->queue_lock, flags);
1106                 return;
1107         }
1108
1109         /* Check if the queue is idle */
1110         if (list_empty(&master->queue) || !master->running) {
1111                 if (!master->busy) {
1112                         spin_unlock_irqrestore(&master->queue_lock, flags);
1113                         return;
1114                 }
1115
1116                 /* Only do teardown in the thread */
1117                 if (!in_kthread) {
1118                         queue_kthread_work(&master->kworker,
1119                                            &master->pump_messages);
1120                         spin_unlock_irqrestore(&master->queue_lock, flags);
1121                         return;
1122                 }
1123
1124                 master->busy = false;
1125                 master->idling = true;
1126                 spin_unlock_irqrestore(&master->queue_lock, flags);
1127
1128                 kfree(master->dummy_rx);
1129                 master->dummy_rx = NULL;
1130                 kfree(master->dummy_tx);
1131                 master->dummy_tx = NULL;
1132                 if (master->unprepare_transfer_hardware &&
1133                     master->unprepare_transfer_hardware(master))
1134                         dev_err(&master->dev,
1135                                 "failed to unprepare transfer hardware\n");
1136                 if (master->auto_runtime_pm) {
1137                         pm_runtime_mark_last_busy(master->dev.parent);
1138                         pm_runtime_put_autosuspend(master->dev.parent);
1139                 }
1140                 trace_spi_master_idle(master);
1141
1142                 spin_lock_irqsave(&master->queue_lock, flags);
1143                 master->idling = false;
1144                 spin_unlock_irqrestore(&master->queue_lock, flags);
1145                 return;
1146         }
1147
1148         /* Extract head of queue */
1149         master->cur_msg =
1150                 list_first_entry(&master->queue, struct spi_message, queue);
1151
1152         list_del_init(&master->cur_msg->queue);
1153         if (master->busy)
1154                 was_busy = true;
1155         else
1156                 master->busy = true;
1157         spin_unlock_irqrestore(&master->queue_lock, flags);
1158
1159         mutex_lock(&master->io_mutex);
1160
1161         if (!was_busy && master->auto_runtime_pm) {
1162                 ret = pm_runtime_get_sync(master->dev.parent);
1163                 if (ret < 0) {
1164                         dev_err(&master->dev, "Failed to power device: %d\n",
1165                                 ret);
1166                         mutex_unlock(&master->io_mutex);
1167                         return;
1168                 }
1169         }
1170
1171         if (!was_busy)
1172                 trace_spi_master_busy(master);
1173
1174         if (!was_busy && master->prepare_transfer_hardware) {
1175                 ret = master->prepare_transfer_hardware(master);
1176                 if (ret) {
1177                         dev_err(&master->dev,
1178                                 "failed to prepare transfer hardware\n");
1179
1180                         if (master->auto_runtime_pm)
1181                                 pm_runtime_put(master->dev.parent);
1182                         mutex_unlock(&master->io_mutex);
1183                         return;
1184                 }
1185         }
1186
1187         trace_spi_message_start(master->cur_msg);
1188
1189         if (master->prepare_message) {
1190                 ret = master->prepare_message(master, master->cur_msg);
1191                 if (ret) {
1192                         dev_err(&master->dev,
1193                                 "failed to prepare message: %d\n", ret);
1194                         master->cur_msg->status = ret;
1195                         spi_finalize_current_message(master);
1196                         goto out;
1197                 }
1198                 master->cur_msg_prepared = true;
1199         }
1200
1201         ret = spi_map_msg(master, master->cur_msg);
1202         if (ret) {
1203                 master->cur_msg->status = ret;
1204                 spi_finalize_current_message(master);
1205                 goto out;
1206         }
1207
1208         ret = master->transfer_one_message(master, master->cur_msg);
1209         if (ret) {
1210                 dev_err(&master->dev,
1211                         "failed to transfer one message from queue\n");
1212                 goto out;
1213         }
1214
1215 out:
1216         mutex_unlock(&master->io_mutex);
1217
1218         /* Prod the scheduler in case transfer_one() was busy waiting */
1219         if (!ret)
1220                 cond_resched();
1221 }
1222
1223 /**
1224  * spi_pump_messages - kthread work function which processes spi message queue
1225  * @work: pointer to kthread work struct contained in the master struct
1226  */
1227 static void spi_pump_messages(struct kthread_work *work)
1228 {
1229         struct spi_master *master =
1230                 container_of(work, struct spi_master, pump_messages);
1231
1232         __spi_pump_messages(master, true);
1233 }
1234
1235 static int spi_init_queue(struct spi_master *master)
1236 {
1237         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1238
1239         master->running = false;
1240         master->busy = false;
1241
1242         init_kthread_worker(&master->kworker);
1243         master->kworker_task = kthread_run(kthread_worker_fn,
1244                                            &master->kworker, "%s",
1245                                            dev_name(&master->dev));
1246         if (IS_ERR(master->kworker_task)) {
1247                 dev_err(&master->dev, "failed to create message pump task\n");
1248                 return PTR_ERR(master->kworker_task);
1249         }
1250         init_kthread_work(&master->pump_messages, spi_pump_messages);
1251
1252         /*
1253          * Master config will indicate if this controller should run the
1254          * message pump with high (realtime) priority to reduce the transfer
1255          * latency on the bus by minimising the delay between a transfer
1256          * request and the scheduling of the message pump thread. Without this
1257          * setting the message pump thread will remain at default priority.
1258          */
1259         if (master->rt) {
1260                 dev_info(&master->dev,
1261                         "will run message pump with realtime priority\n");
1262                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1263         }
1264
1265         return 0;
1266 }
1267
1268 /**
1269  * spi_get_next_queued_message() - called by driver to check for queued
1270  * messages
1271  * @master: the master to check for queued messages
1272  *
1273  * If there are more messages in the queue, the next message is returned from
1274  * this call.
1275  *
1276  * Return: the next message in the queue, else NULL if the queue is empty.
1277  */
1278 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1279 {
1280         struct spi_message *next;
1281         unsigned long flags;
1282
1283         /* get a pointer to the next message, if any */
1284         spin_lock_irqsave(&master->queue_lock, flags);
1285         next = list_first_entry_or_null(&master->queue, struct spi_message,
1286                                         queue);
1287         spin_unlock_irqrestore(&master->queue_lock, flags);
1288
1289         return next;
1290 }
1291 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1292
1293 /**
1294  * spi_finalize_current_message() - the current message is complete
1295  * @master: the master to return the message to
1296  *
1297  * Called by the driver to notify the core that the message in the front of the
1298  * queue is complete and can be removed from the queue.
1299  */
1300 void spi_finalize_current_message(struct spi_master *master)
1301 {
1302         struct spi_message *mesg;
1303         unsigned long flags;
1304         int ret;
1305
1306         spin_lock_irqsave(&master->queue_lock, flags);
1307         mesg = master->cur_msg;
1308         spin_unlock_irqrestore(&master->queue_lock, flags);
1309
1310         spi_unmap_msg(master, mesg);
1311
1312         if (master->cur_msg_prepared && master->unprepare_message) {
1313                 ret = master->unprepare_message(master, mesg);
1314                 if (ret) {
1315                         dev_err(&master->dev,
1316                                 "failed to unprepare message: %d\n", ret);
1317                 }
1318         }
1319
1320         spin_lock_irqsave(&master->queue_lock, flags);
1321         master->cur_msg = NULL;
1322         master->cur_msg_prepared = false;
1323         queue_kthread_work(&master->kworker, &master->pump_messages);
1324         spin_unlock_irqrestore(&master->queue_lock, flags);
1325
1326         trace_spi_message_done(mesg);
1327
1328         mesg->state = NULL;
1329         if (mesg->complete)
1330                 mesg->complete(mesg->context);
1331 }
1332 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1333
1334 static int spi_start_queue(struct spi_master *master)
1335 {
1336         unsigned long flags;
1337
1338         spin_lock_irqsave(&master->queue_lock, flags);
1339
1340         if (master->running || master->busy) {
1341                 spin_unlock_irqrestore(&master->queue_lock, flags);
1342                 return -EBUSY;
1343         }
1344
1345         master->running = true;
1346         master->cur_msg = NULL;
1347         spin_unlock_irqrestore(&master->queue_lock, flags);
1348
1349         queue_kthread_work(&master->kworker, &master->pump_messages);
1350
1351         return 0;
1352 }
1353
1354 static int spi_stop_queue(struct spi_master *master)
1355 {
1356         unsigned long flags;
1357         unsigned limit = 500;
1358         int ret = 0;
1359
1360         spin_lock_irqsave(&master->queue_lock, flags);
1361
1362         /*
1363          * This is a bit lame, but is optimized for the common execution path.
1364          * A wait_queue on the master->busy could be used, but then the common
1365          * execution path (pump_messages) would be required to call wake_up or
1366          * friends on every SPI message. Do this instead.
1367          */
1368         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1369                 spin_unlock_irqrestore(&master->queue_lock, flags);
1370                 usleep_range(10000, 11000);
1371                 spin_lock_irqsave(&master->queue_lock, flags);
1372         }
1373
1374         if (!list_empty(&master->queue) || master->busy)
1375                 ret = -EBUSY;
1376         else
1377                 master->running = false;
1378
1379         spin_unlock_irqrestore(&master->queue_lock, flags);
1380
1381         if (ret) {
1382                 dev_warn(&master->dev,
1383                          "could not stop message queue\n");
1384                 return ret;
1385         }
1386         return ret;
1387 }
1388
1389 static int spi_destroy_queue(struct spi_master *master)
1390 {
1391         int ret;
1392
1393         ret = spi_stop_queue(master);
1394
1395         /*
1396          * flush_kthread_worker will block until all work is done.
1397          * If the reason that stop_queue timed out is that the work will never
1398          * finish, then it does no good to call flush/stop thread, so
1399          * return anyway.
1400          */
1401         if (ret) {
1402                 dev_err(&master->dev, "problem destroying queue\n");
1403                 return ret;
1404         }
1405
1406         flush_kthread_worker(&master->kworker);
1407         kthread_stop(master->kworker_task);
1408
1409         return 0;
1410 }
1411
1412 static int __spi_queued_transfer(struct spi_device *spi,
1413                                  struct spi_message *msg,
1414                                  bool need_pump)
1415 {
1416         struct spi_master *master = spi->master;
1417         unsigned long flags;
1418
1419         spin_lock_irqsave(&master->queue_lock, flags);
1420
1421         if (!master->running) {
1422                 spin_unlock_irqrestore(&master->queue_lock, flags);
1423                 return -ESHUTDOWN;
1424         }
1425         msg->actual_length = 0;
1426         msg->status = -EINPROGRESS;
1427
1428         list_add_tail(&msg->queue, &master->queue);
1429         if (!master->busy && need_pump)
1430                 queue_kthread_work(&master->kworker, &master->pump_messages);
1431
1432         spin_unlock_irqrestore(&master->queue_lock, flags);
1433         return 0;
1434 }
1435
1436 /**
1437  * spi_queued_transfer - transfer function for queued transfers
1438  * @spi: spi device which is requesting transfer
1439  * @msg: spi message which is to handled is queued to driver queue
1440  *
1441  * Return: zero on success, else a negative error code.
1442  */
1443 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1444 {
1445         return __spi_queued_transfer(spi, msg, true);
1446 }
1447
1448 static int spi_master_initialize_queue(struct spi_master *master)
1449 {
1450         int ret;
1451
1452         master->transfer = spi_queued_transfer;
1453         if (!master->transfer_one_message)
1454                 master->transfer_one_message = spi_transfer_one_message;
1455
1456         /* Initialize and start queue */
1457         ret = spi_init_queue(master);
1458         if (ret) {
1459                 dev_err(&master->dev, "problem initializing queue\n");
1460                 goto err_init_queue;
1461         }
1462         master->queued = true;
1463         ret = spi_start_queue(master);
1464         if (ret) {
1465                 dev_err(&master->dev, "problem starting queue\n");
1466                 goto err_start_queue;
1467         }
1468
1469         return 0;
1470
1471 err_start_queue:
1472         spi_destroy_queue(master);
1473 err_init_queue:
1474         return ret;
1475 }
1476
1477 /*-------------------------------------------------------------------------*/
1478
1479 #if defined(CONFIG_OF)
1480 static struct spi_device *
1481 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1482 {
1483         struct spi_device *spi;
1484         int rc;
1485         u32 value;
1486
1487         /* Alloc an spi_device */
1488         spi = spi_alloc_device(master);
1489         if (!spi) {
1490                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1491                         nc->full_name);
1492                 rc = -ENOMEM;
1493                 goto err_out;
1494         }
1495
1496         /* Select device driver */
1497         rc = of_modalias_node(nc, spi->modalias,
1498                                 sizeof(spi->modalias));
1499         if (rc < 0) {
1500                 dev_err(&master->dev, "cannot find modalias for %s\n",
1501                         nc->full_name);
1502                 goto err_out;
1503         }
1504
1505         /* Device address */
1506         rc = of_property_read_u32(nc, "reg", &value);
1507         if (rc) {
1508                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1509                         nc->full_name, rc);
1510                 goto err_out;
1511         }
1512         spi->chip_select = value;
1513
1514         /* Mode (clock phase/polarity/etc.) */
1515         if (of_find_property(nc, "spi-cpha", NULL))
1516                 spi->mode |= SPI_CPHA;
1517         if (of_find_property(nc, "spi-cpol", NULL))
1518                 spi->mode |= SPI_CPOL;
1519         if (of_find_property(nc, "spi-cs-high", NULL))
1520                 spi->mode |= SPI_CS_HIGH;
1521         if (of_find_property(nc, "spi-3wire", NULL))
1522                 spi->mode |= SPI_3WIRE;
1523         if (of_find_property(nc, "spi-lsb-first", NULL))
1524                 spi->mode |= SPI_LSB_FIRST;
1525
1526         /* Device DUAL/QUAD mode */
1527         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1528                 switch (value) {
1529                 case 1:
1530                         break;
1531                 case 2:
1532                         spi->mode |= SPI_TX_DUAL;
1533                         break;
1534                 case 4:
1535                         spi->mode |= SPI_TX_QUAD;
1536                         break;
1537                 default:
1538                         dev_warn(&master->dev,
1539                                 "spi-tx-bus-width %d not supported\n",
1540                                 value);
1541                         break;
1542                 }
1543         }
1544
1545         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1546                 switch (value) {
1547                 case 1:
1548                         break;
1549                 case 2:
1550                         spi->mode |= SPI_RX_DUAL;
1551                         break;
1552                 case 4:
1553                         spi->mode |= SPI_RX_QUAD;
1554                         break;
1555                 default:
1556                         dev_warn(&master->dev,
1557                                 "spi-rx-bus-width %d not supported\n",
1558                                 value);
1559                         break;
1560                 }
1561         }
1562
1563         /* Device speed */
1564         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1565         if (rc) {
1566                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1567                         nc->full_name, rc);
1568                 goto err_out;
1569         }
1570         spi->max_speed_hz = value;
1571
1572         /* Store a pointer to the node in the device structure */
1573         of_node_get(nc);
1574         spi->dev.of_node = nc;
1575
1576         /* Register the new device */
1577         rc = spi_add_device(spi);
1578         if (rc) {
1579                 dev_err(&master->dev, "spi_device register error %s\n",
1580                         nc->full_name);
1581                 goto err_out;
1582         }
1583
1584         return spi;
1585
1586 err_out:
1587         spi_dev_put(spi);
1588         return ERR_PTR(rc);
1589 }
1590
1591 /**
1592  * of_register_spi_devices() - Register child devices onto the SPI bus
1593  * @master:     Pointer to spi_master device
1594  *
1595  * Registers an spi_device for each child node of master node which has a 'reg'
1596  * property.
1597  */
1598 static void of_register_spi_devices(struct spi_master *master)
1599 {
1600         struct spi_device *spi;
1601         struct device_node *nc;
1602
1603         if (!master->dev.of_node)
1604                 return;
1605
1606         for_each_available_child_of_node(master->dev.of_node, nc) {
1607                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1608                         continue;
1609                 spi = of_register_spi_device(master, nc);
1610                 if (IS_ERR(spi))
1611                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1612                                 nc->full_name);
1613         }
1614 }
1615 #else
1616 static void of_register_spi_devices(struct spi_master *master) { }
1617 #endif
1618
1619 #ifdef CONFIG_ACPI
1620 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1621 {
1622         struct spi_device *spi = data;
1623         struct spi_master *master = spi->master;
1624
1625         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1626                 struct acpi_resource_spi_serialbus *sb;
1627
1628                 sb = &ares->data.spi_serial_bus;
1629                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1630                         /*
1631                          * ACPI DeviceSelection numbering is handled by the
1632                          * host controller driver in Windows and can vary
1633                          * from driver to driver. In Linux we always expect
1634                          * 0 .. max - 1 so we need to ask the driver to
1635                          * translate between the two schemes.
1636                          */
1637                         if (master->fw_translate_cs) {
1638                                 int cs = master->fw_translate_cs(master,
1639                                                 sb->device_selection);
1640                                 if (cs < 0)
1641                                         return cs;
1642                                 spi->chip_select = cs;
1643                         } else {
1644                                 spi->chip_select = sb->device_selection;
1645                         }
1646
1647                         spi->max_speed_hz = sb->connection_speed;
1648
1649                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1650                                 spi->mode |= SPI_CPHA;
1651                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1652                                 spi->mode |= SPI_CPOL;
1653                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1654                                 spi->mode |= SPI_CS_HIGH;
1655                 }
1656         } else if (spi->irq < 0) {
1657                 struct resource r;
1658
1659                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1660                         spi->irq = r.start;
1661         }
1662
1663         /* Always tell the ACPI core to skip this resource */
1664         return 1;
1665 }
1666
1667 static acpi_status acpi_register_spi_device(struct spi_master *master,
1668                                             struct acpi_device *adev)
1669 {
1670         struct list_head resource_list;
1671         struct spi_device *spi;
1672         int ret;
1673
1674         if (acpi_bus_get_status(adev) || !adev->status.present ||
1675             acpi_device_enumerated(adev))
1676                 return AE_OK;
1677
1678         spi = spi_alloc_device(master);
1679         if (!spi) {
1680                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1681                         dev_name(&adev->dev));
1682                 return AE_NO_MEMORY;
1683         }
1684
1685         ACPI_COMPANION_SET(&spi->dev, adev);
1686         spi->irq = -1;
1687
1688         INIT_LIST_HEAD(&resource_list);
1689         ret = acpi_dev_get_resources(adev, &resource_list,
1690                                      acpi_spi_add_resource, spi);
1691         acpi_dev_free_resource_list(&resource_list);
1692
1693         if (ret < 0 || !spi->max_speed_hz) {
1694                 spi_dev_put(spi);
1695                 return AE_OK;
1696         }
1697
1698         if (spi->irq < 0)
1699                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1700
1701         acpi_device_set_enumerated(adev);
1702
1703         adev->power.flags.ignore_parent = true;
1704         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1705         if (spi_add_device(spi)) {
1706                 adev->power.flags.ignore_parent = false;
1707                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1708                         dev_name(&adev->dev));
1709                 spi_dev_put(spi);
1710         }
1711
1712         return AE_OK;
1713 }
1714
1715 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1716                                        void *data, void **return_value)
1717 {
1718         struct spi_master *master = data;
1719         struct acpi_device *adev;
1720
1721         if (acpi_bus_get_device(handle, &adev))
1722                 return AE_OK;
1723
1724         return acpi_register_spi_device(master, adev);
1725 }
1726
1727 static void acpi_register_spi_devices(struct spi_master *master)
1728 {
1729         acpi_status status;
1730         acpi_handle handle;
1731
1732         handle = ACPI_HANDLE(master->dev.parent);
1733         if (!handle)
1734                 return;
1735
1736         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1737                                      acpi_spi_add_device, NULL,
1738                                      master, NULL);
1739         if (ACPI_FAILURE(status))
1740                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1741 }
1742 #else
1743 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1744 #endif /* CONFIG_ACPI */
1745
1746 static void spi_master_release(struct device *dev)
1747 {
1748         struct spi_master *master;
1749
1750         master = container_of(dev, struct spi_master, dev);
1751         kfree(master);
1752 }
1753
1754 static struct class spi_master_class = {
1755         .name           = "spi_master",
1756         .owner          = THIS_MODULE,
1757         .dev_release    = spi_master_release,
1758         .dev_groups     = spi_master_groups,
1759 };
1760
1761
1762 /**
1763  * spi_alloc_master - allocate SPI master controller
1764  * @dev: the controller, possibly using the platform_bus
1765  * @size: how much zeroed driver-private data to allocate; the pointer to this
1766  *      memory is in the driver_data field of the returned device,
1767  *      accessible with spi_master_get_devdata().
1768  * Context: can sleep
1769  *
1770  * This call is used only by SPI master controller drivers, which are the
1771  * only ones directly touching chip registers.  It's how they allocate
1772  * an spi_master structure, prior to calling spi_register_master().
1773  *
1774  * This must be called from context that can sleep.
1775  *
1776  * The caller is responsible for assigning the bus number and initializing
1777  * the master's methods before calling spi_register_master(); and (after errors
1778  * adding the device) calling spi_master_put() to prevent a memory leak.
1779  *
1780  * Return: the SPI master structure on success, else NULL.
1781  */
1782 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1783 {
1784         struct spi_master       *master;
1785
1786         if (!dev)
1787                 return NULL;
1788
1789         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1790         if (!master)
1791                 return NULL;
1792
1793         device_initialize(&master->dev);
1794         master->bus_num = -1;
1795         master->num_chipselect = 1;
1796         master->dev.class = &spi_master_class;
1797         master->dev.parent = dev;
1798         pm_suspend_ignore_children(&master->dev, true);
1799         spi_master_set_devdata(master, &master[1]);
1800
1801         return master;
1802 }
1803 EXPORT_SYMBOL_GPL(spi_alloc_master);
1804
1805 #ifdef CONFIG_OF
1806 static int of_spi_register_master(struct spi_master *master)
1807 {
1808         int nb, i, *cs;
1809         struct device_node *np = master->dev.of_node;
1810
1811         if (!np)
1812                 return 0;
1813
1814         nb = of_gpio_named_count(np, "cs-gpios");
1815         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1816
1817         /* Return error only for an incorrectly formed cs-gpios property */
1818         if (nb == 0 || nb == -ENOENT)
1819                 return 0;
1820         else if (nb < 0)
1821                 return nb;
1822
1823         cs = devm_kzalloc(&master->dev,
1824                           sizeof(int) * master->num_chipselect,
1825                           GFP_KERNEL);
1826         master->cs_gpios = cs;
1827
1828         if (!master->cs_gpios)
1829                 return -ENOMEM;
1830
1831         for (i = 0; i < master->num_chipselect; i++)
1832                 cs[i] = -ENOENT;
1833
1834         for (i = 0; i < nb; i++)
1835                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1836
1837         return 0;
1838 }
1839 #else
1840 static int of_spi_register_master(struct spi_master *master)
1841 {
1842         return 0;
1843 }
1844 #endif
1845
1846 /**
1847  * spi_register_master - register SPI master controller
1848  * @master: initialized master, originally from spi_alloc_master()
1849  * Context: can sleep
1850  *
1851  * SPI master controllers connect to their drivers using some non-SPI bus,
1852  * such as the platform bus.  The final stage of probe() in that code
1853  * includes calling spi_register_master() to hook up to this SPI bus glue.
1854  *
1855  * SPI controllers use board specific (often SOC specific) bus numbers,
1856  * and board-specific addressing for SPI devices combines those numbers
1857  * with chip select numbers.  Since SPI does not directly support dynamic
1858  * device identification, boards need configuration tables telling which
1859  * chip is at which address.
1860  *
1861  * This must be called from context that can sleep.  It returns zero on
1862  * success, else a negative error code (dropping the master's refcount).
1863  * After a successful return, the caller is responsible for calling
1864  * spi_unregister_master().
1865  *
1866  * Return: zero on success, else a negative error code.
1867  */
1868 int spi_register_master(struct spi_master *master)
1869 {
1870         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1871         struct device           *dev = master->dev.parent;
1872         struct boardinfo        *bi;
1873         int                     status = -ENODEV;
1874         int                     dynamic = 0;
1875
1876         if (!dev)
1877                 return -ENODEV;
1878
1879         status = of_spi_register_master(master);
1880         if (status)
1881                 return status;
1882
1883         /* even if it's just one always-selected device, there must
1884          * be at least one chipselect
1885          */
1886         if (master->num_chipselect == 0)
1887                 return -EINVAL;
1888
1889         if ((master->bus_num < 0) && master->dev.of_node)
1890                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1891
1892         /* convention:  dynamically assigned bus IDs count down from the max */
1893         if (master->bus_num < 0) {
1894                 /* FIXME switch to an IDR based scheme, something like
1895                  * I2C now uses, so we can't run out of "dynamic" IDs
1896                  */
1897                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1898                 dynamic = 1;
1899         }
1900
1901         INIT_LIST_HEAD(&master->queue);
1902         spin_lock_init(&master->queue_lock);
1903         spin_lock_init(&master->bus_lock_spinlock);
1904         mutex_init(&master->bus_lock_mutex);
1905         mutex_init(&master->io_mutex);
1906         master->bus_lock_flag = 0;
1907         init_completion(&master->xfer_completion);
1908         if (!master->max_dma_len)
1909                 master->max_dma_len = INT_MAX;
1910
1911         /* register the device, then userspace will see it.
1912          * registration fails if the bus ID is in use.
1913          */
1914         dev_set_name(&master->dev, "spi%u", master->bus_num);
1915         status = device_add(&master->dev);
1916         if (status < 0)
1917                 goto done;
1918         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1919                         dynamic ? " (dynamic)" : "");
1920
1921         /* If we're using a queued driver, start the queue */
1922         if (master->transfer)
1923                 dev_info(dev, "master is unqueued, this is deprecated\n");
1924         else {
1925                 status = spi_master_initialize_queue(master);
1926                 if (status) {
1927                         device_del(&master->dev);
1928                         goto done;
1929                 }
1930         }
1931         /* add statistics */
1932         spin_lock_init(&master->statistics.lock);
1933
1934         mutex_lock(&board_lock);
1935         list_add_tail(&master->list, &spi_master_list);
1936         list_for_each_entry(bi, &board_list, list)
1937                 spi_match_master_to_boardinfo(master, &bi->board_info);
1938         mutex_unlock(&board_lock);
1939
1940         /* Register devices from the device tree and ACPI */
1941         of_register_spi_devices(master);
1942         acpi_register_spi_devices(master);
1943 done:
1944         return status;
1945 }
1946 EXPORT_SYMBOL_GPL(spi_register_master);
1947
1948 static void devm_spi_unregister(struct device *dev, void *res)
1949 {
1950         spi_unregister_master(*(struct spi_master **)res);
1951 }
1952
1953 /**
1954  * dev_spi_register_master - register managed SPI master controller
1955  * @dev:    device managing SPI master
1956  * @master: initialized master, originally from spi_alloc_master()
1957  * Context: can sleep
1958  *
1959  * Register a SPI device as with spi_register_master() which will
1960  * automatically be unregister
1961  *
1962  * Return: zero on success, else a negative error code.
1963  */
1964 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1965 {
1966         struct spi_master **ptr;
1967         int ret;
1968
1969         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1970         if (!ptr)
1971                 return -ENOMEM;
1972
1973         ret = spi_register_master(master);
1974         if (!ret) {
1975                 *ptr = master;
1976                 devres_add(dev, ptr);
1977         } else {
1978                 devres_free(ptr);
1979         }
1980
1981         return ret;
1982 }
1983 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1984
1985 static int __unregister(struct device *dev, void *null)
1986 {
1987         spi_unregister_device(to_spi_device(dev));
1988         return 0;
1989 }
1990
1991 /**
1992  * spi_unregister_master - unregister SPI master controller
1993  * @master: the master being unregistered
1994  * Context: can sleep
1995  *
1996  * This call is used only by SPI master controller drivers, which are the
1997  * only ones directly touching chip registers.
1998  *
1999  * This must be called from context that can sleep.
2000  */
2001 void spi_unregister_master(struct spi_master *master)
2002 {
2003         int dummy;
2004
2005         if (master->queued) {
2006                 if (spi_destroy_queue(master))
2007                         dev_err(&master->dev, "queue remove failed\n");
2008         }
2009
2010         mutex_lock(&board_lock);
2011         list_del(&master->list);
2012         mutex_unlock(&board_lock);
2013
2014         dummy = device_for_each_child(&master->dev, NULL, __unregister);
2015         device_unregister(&master->dev);
2016 }
2017 EXPORT_SYMBOL_GPL(spi_unregister_master);
2018
2019 int spi_master_suspend(struct spi_master *master)
2020 {
2021         int ret;
2022
2023         /* Basically no-ops for non-queued masters */
2024         if (!master->queued)
2025                 return 0;
2026
2027         ret = spi_stop_queue(master);
2028         if (ret)
2029                 dev_err(&master->dev, "queue stop failed\n");
2030
2031         return ret;
2032 }
2033 EXPORT_SYMBOL_GPL(spi_master_suspend);
2034
2035 int spi_master_resume(struct spi_master *master)
2036 {
2037         int ret;
2038
2039         if (!master->queued)
2040                 return 0;
2041
2042         ret = spi_start_queue(master);
2043         if (ret)
2044                 dev_err(&master->dev, "queue restart failed\n");
2045
2046         return ret;
2047 }
2048 EXPORT_SYMBOL_GPL(spi_master_resume);
2049
2050 static int __spi_master_match(struct device *dev, const void *data)
2051 {
2052         struct spi_master *m;
2053         const u16 *bus_num = data;
2054
2055         m = container_of(dev, struct spi_master, dev);
2056         return m->bus_num == *bus_num;
2057 }
2058
2059 /**
2060  * spi_busnum_to_master - look up master associated with bus_num
2061  * @bus_num: the master's bus number
2062  * Context: can sleep
2063  *
2064  * This call may be used with devices that are registered after
2065  * arch init time.  It returns a refcounted pointer to the relevant
2066  * spi_master (which the caller must release), or NULL if there is
2067  * no such master registered.
2068  *
2069  * Return: the SPI master structure on success, else NULL.
2070  */
2071 struct spi_master *spi_busnum_to_master(u16 bus_num)
2072 {
2073         struct device           *dev;
2074         struct spi_master       *master = NULL;
2075
2076         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2077                                 __spi_master_match);
2078         if (dev)
2079                 master = container_of(dev, struct spi_master, dev);
2080         /* reference got in class_find_device */
2081         return master;
2082 }
2083 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2084
2085 /*-------------------------------------------------------------------------*/
2086
2087 /* Core methods for SPI resource management */
2088
2089 /**
2090  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2091  *                 during the processing of a spi_message while using
2092  *                 spi_transfer_one
2093  * @spi:     the spi device for which we allocate memory
2094  * @release: the release code to execute for this resource
2095  * @size:    size to alloc and return
2096  * @gfp:     GFP allocation flags
2097  *
2098  * Return: the pointer to the allocated data
2099  *
2100  * This may get enhanced in the future to allocate from a memory pool
2101  * of the @spi_device or @spi_master to avoid repeated allocations.
2102  */
2103 void *spi_res_alloc(struct spi_device *spi,
2104                     spi_res_release_t release,
2105                     size_t size, gfp_t gfp)
2106 {
2107         struct spi_res *sres;
2108
2109         sres = kzalloc(sizeof(*sres) + size, gfp);
2110         if (!sres)
2111                 return NULL;
2112
2113         INIT_LIST_HEAD(&sres->entry);
2114         sres->release = release;
2115
2116         return sres->data;
2117 }
2118 EXPORT_SYMBOL_GPL(spi_res_alloc);
2119
2120 /**
2121  * spi_res_free - free an spi resource
2122  * @res: pointer to the custom data of a resource
2123  *
2124  */
2125 void spi_res_free(void *res)
2126 {
2127         struct spi_res *sres = container_of(res, struct spi_res, data);
2128
2129         if (!res)
2130                 return;
2131
2132         WARN_ON(!list_empty(&sres->entry));
2133         kfree(sres);
2134 }
2135 EXPORT_SYMBOL_GPL(spi_res_free);
2136
2137 /**
2138  * spi_res_add - add a spi_res to the spi_message
2139  * @message: the spi message
2140  * @res:     the spi_resource
2141  */
2142 void spi_res_add(struct spi_message *message, void *res)
2143 {
2144         struct spi_res *sres = container_of(res, struct spi_res, data);
2145
2146         WARN_ON(!list_empty(&sres->entry));
2147         list_add_tail(&sres->entry, &message->resources);
2148 }
2149 EXPORT_SYMBOL_GPL(spi_res_add);
2150
2151 /**
2152  * spi_res_release - release all spi resources for this message
2153  * @master:  the @spi_master
2154  * @message: the @spi_message
2155  */
2156 void spi_res_release(struct spi_master *master,
2157                      struct spi_message *message)
2158 {
2159         struct spi_res *res;
2160
2161         while (!list_empty(&message->resources)) {
2162                 res = list_last_entry(&message->resources,
2163                                       struct spi_res, entry);
2164
2165                 if (res->release)
2166                         res->release(master, message, res->data);
2167
2168                 list_del(&res->entry);
2169
2170                 kfree(res);
2171         }
2172 }
2173 EXPORT_SYMBOL_GPL(spi_res_release);
2174
2175 /*-------------------------------------------------------------------------*/
2176
2177 /* Core methods for spi_message alterations */
2178
2179 static void __spi_replace_transfers_release(struct spi_master *master,
2180                                             struct spi_message *msg,
2181                                             void *res)
2182 {
2183         struct spi_replaced_transfers *rxfer = res;
2184         size_t i;
2185
2186         /* call extra callback if requested */
2187         if (rxfer->release)
2188                 rxfer->release(master, msg, res);
2189
2190         /* insert replaced transfers back into the message */
2191         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2192
2193         /* remove the formerly inserted entries */
2194         for (i = 0; i < rxfer->inserted; i++)
2195                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2196 }
2197
2198 /**
2199  * spi_replace_transfers - replace transfers with several transfers
2200  *                         and register change with spi_message.resources
2201  * @msg:           the spi_message we work upon
2202  * @xfer_first:    the first spi_transfer we want to replace
2203  * @remove:        number of transfers to remove
2204  * @insert:        the number of transfers we want to insert instead
2205  * @release:       extra release code necessary in some circumstances
2206  * @extradatasize: extra data to allocate (with alignment guarantees
2207  *                 of struct @spi_transfer)
2208  * @gfp:           gfp flags
2209  *
2210  * Returns: pointer to @spi_replaced_transfers,
2211  *          PTR_ERR(...) in case of errors.
2212  */
2213 struct spi_replaced_transfers *spi_replace_transfers(
2214         struct spi_message *msg,
2215         struct spi_transfer *xfer_first,
2216         size_t remove,
2217         size_t insert,
2218         spi_replaced_release_t release,
2219         size_t extradatasize,
2220         gfp_t gfp)
2221 {
2222         struct spi_replaced_transfers *rxfer;
2223         struct spi_transfer *xfer;
2224         size_t i;
2225
2226         /* allocate the structure using spi_res */
2227         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2228                               insert * sizeof(struct spi_transfer)
2229                               + sizeof(struct spi_replaced_transfers)
2230                               + extradatasize,
2231                               gfp);
2232         if (!rxfer)
2233                 return ERR_PTR(-ENOMEM);
2234
2235         /* the release code to invoke before running the generic release */
2236         rxfer->release = release;
2237
2238         /* assign extradata */
2239         if (extradatasize)
2240                 rxfer->extradata =
2241                         &rxfer->inserted_transfers[insert];
2242
2243         /* init the replaced_transfers list */
2244         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2245
2246         /* assign the list_entry after which we should reinsert
2247          * the @replaced_transfers - it may be spi_message.messages!
2248          */
2249         rxfer->replaced_after = xfer_first->transfer_list.prev;
2250
2251         /* remove the requested number of transfers */
2252         for (i = 0; i < remove; i++) {
2253                 /* if the entry after replaced_after it is msg->transfers
2254                  * then we have been requested to remove more transfers
2255                  * than are in the list
2256                  */
2257                 if (rxfer->replaced_after->next == &msg->transfers) {
2258                         dev_err(&msg->spi->dev,
2259                                 "requested to remove more spi_transfers than are available\n");
2260                         /* insert replaced transfers back into the message */
2261                         list_splice(&rxfer->replaced_transfers,
2262                                     rxfer->replaced_after);
2263
2264                         /* free the spi_replace_transfer structure */
2265                         spi_res_free(rxfer);
2266
2267                         /* and return with an error */
2268                         return ERR_PTR(-EINVAL);
2269                 }
2270
2271                 /* remove the entry after replaced_after from list of
2272                  * transfers and add it to list of replaced_transfers
2273                  */
2274                 list_move_tail(rxfer->replaced_after->next,
2275                                &rxfer->replaced_transfers);
2276         }
2277
2278         /* create copy of the given xfer with identical settings
2279          * based on the first transfer to get removed
2280          */
2281         for (i = 0; i < insert; i++) {
2282                 /* we need to run in reverse order */
2283                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2284
2285                 /* copy all spi_transfer data */
2286                 memcpy(xfer, xfer_first, sizeof(*xfer));
2287
2288                 /* add to list */
2289                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2290
2291                 /* clear cs_change and delay_usecs for all but the last */
2292                 if (i) {
2293                         xfer->cs_change = false;
2294                         xfer->delay_usecs = 0;
2295                 }
2296         }
2297
2298         /* set up inserted */
2299         rxfer->inserted = insert;
2300
2301         /* and register it with spi_res/spi_message */
2302         spi_res_add(msg, rxfer);
2303
2304         return rxfer;
2305 }
2306 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2307
2308 static int __spi_split_transfer_maxsize(struct spi_master *master,
2309                                         struct spi_message *msg,
2310                                         struct spi_transfer **xferp,
2311                                         size_t maxsize,
2312                                         gfp_t gfp)
2313 {
2314         struct spi_transfer *xfer = *xferp, *xfers;
2315         struct spi_replaced_transfers *srt;
2316         size_t offset;
2317         size_t count, i;
2318
2319         /* warn once about this fact that we are splitting a transfer */
2320         dev_warn_once(&msg->spi->dev,
2321                       "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2322                       xfer->len, maxsize);
2323
2324         /* calculate how many we have to replace */
2325         count = DIV_ROUND_UP(xfer->len, maxsize);
2326
2327         /* create replacement */
2328         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2329         if (IS_ERR(srt))
2330                 return PTR_ERR(srt);
2331         xfers = srt->inserted_transfers;
2332
2333         /* now handle each of those newly inserted spi_transfers
2334          * note that the replacements spi_transfers all are preset
2335          * to the same values as *xferp, so tx_buf, rx_buf and len
2336          * are all identical (as well as most others)
2337          * so we just have to fix up len and the pointers.
2338          *
2339          * this also includes support for the depreciated
2340          * spi_message.is_dma_mapped interface
2341          */
2342
2343         /* the first transfer just needs the length modified, so we
2344          * run it outside the loop
2345          */
2346         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2347
2348         /* all the others need rx_buf/tx_buf also set */
2349         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2350                 /* update rx_buf, tx_buf and dma */
2351                 if (xfers[i].rx_buf)
2352                         xfers[i].rx_buf += offset;
2353                 if (xfers[i].rx_dma)
2354                         xfers[i].rx_dma += offset;
2355                 if (xfers[i].tx_buf)
2356                         xfers[i].tx_buf += offset;
2357                 if (xfers[i].tx_dma)
2358                         xfers[i].tx_dma += offset;
2359
2360                 /* update length */
2361                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2362         }
2363
2364         /* we set up xferp to the last entry we have inserted,
2365          * so that we skip those already split transfers
2366          */
2367         *xferp = &xfers[count - 1];
2368
2369         /* increment statistics counters */
2370         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2371                                        transfers_split_maxsize);
2372         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2373                                        transfers_split_maxsize);
2374
2375         return 0;
2376 }
2377
2378 /**
2379  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2380  *                              when an individual transfer exceeds a
2381  *                              certain size
2382  * @master:    the @spi_master for this transfer
2383  * @msg:   the @spi_message to transform
2384  * @maxsize:  the maximum when to apply this
2385  * @gfp: GFP allocation flags
2386  *
2387  * Return: status of transformation
2388  */
2389 int spi_split_transfers_maxsize(struct spi_master *master,
2390                                 struct spi_message *msg,
2391                                 size_t maxsize,
2392                                 gfp_t gfp)
2393 {
2394         struct spi_transfer *xfer;
2395         int ret;
2396
2397         /* iterate over the transfer_list,
2398          * but note that xfer is advanced to the last transfer inserted
2399          * to avoid checking sizes again unnecessarily (also xfer does
2400          * potentiall belong to a different list by the time the
2401          * replacement has happened
2402          */
2403         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2404                 if (xfer->len > maxsize) {
2405                         ret = __spi_split_transfer_maxsize(
2406                                 master, msg, &xfer, maxsize, gfp);
2407                         if (ret)
2408                                 return ret;
2409                 }
2410         }
2411
2412         return 0;
2413 }
2414 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2415
2416 /*-------------------------------------------------------------------------*/
2417
2418 /* Core methods for SPI master protocol drivers.  Some of the
2419  * other core methods are currently defined as inline functions.
2420  */
2421
2422 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2423 {
2424         if (master->bits_per_word_mask) {
2425                 /* Only 32 bits fit in the mask */
2426                 if (bits_per_word > 32)
2427                         return -EINVAL;
2428                 if (!(master->bits_per_word_mask &
2429                                 SPI_BPW_MASK(bits_per_word)))
2430                         return -EINVAL;
2431         }
2432
2433         return 0;
2434 }
2435
2436 /**
2437  * spi_setup - setup SPI mode and clock rate
2438  * @spi: the device whose settings are being modified
2439  * Context: can sleep, and no requests are queued to the device
2440  *
2441  * SPI protocol drivers may need to update the transfer mode if the
2442  * device doesn't work with its default.  They may likewise need
2443  * to update clock rates or word sizes from initial values.  This function
2444  * changes those settings, and must be called from a context that can sleep.
2445  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2446  * effect the next time the device is selected and data is transferred to
2447  * or from it.  When this function returns, the spi device is deselected.
2448  *
2449  * Note that this call will fail if the protocol driver specifies an option
2450  * that the underlying controller or its driver does not support.  For
2451  * example, not all hardware supports wire transfers using nine bit words,
2452  * LSB-first wire encoding, or active-high chipselects.
2453  *
2454  * Return: zero on success, else a negative error code.
2455  */
2456 int spi_setup(struct spi_device *spi)
2457 {
2458         unsigned        bad_bits, ugly_bits;
2459         int             status;
2460
2461         /* check mode to prevent that DUAL and QUAD set at the same time
2462          */
2463         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2464                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2465                 dev_err(&spi->dev,
2466                 "setup: can not select dual and quad at the same time\n");
2467                 return -EINVAL;
2468         }
2469         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2470          */
2471         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2472                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2473                 return -EINVAL;
2474         /* help drivers fail *cleanly* when they need options
2475          * that aren't supported with their current master
2476          */
2477         bad_bits = spi->mode & ~spi->master->mode_bits;
2478         ugly_bits = bad_bits &
2479                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2480         if (ugly_bits) {
2481                 dev_warn(&spi->dev,
2482                          "setup: ignoring unsupported mode bits %x\n",
2483                          ugly_bits);
2484                 spi->mode &= ~ugly_bits;
2485                 bad_bits &= ~ugly_bits;
2486         }
2487         if (bad_bits) {
2488                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2489                         bad_bits);
2490                 return -EINVAL;
2491         }
2492
2493         if (!spi->bits_per_word)
2494                 spi->bits_per_word = 8;
2495
2496         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2497         if (status)
2498                 return status;
2499
2500         if (!spi->max_speed_hz)
2501                 spi->max_speed_hz = spi->master->max_speed_hz;
2502
2503         if (spi->master->setup)
2504                 status = spi->master->setup(spi);
2505
2506         spi_set_cs(spi, false);
2507
2508         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2509                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2510                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2511                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2512                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2513                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2514                         spi->bits_per_word, spi->max_speed_hz,
2515                         status);
2516
2517         return status;
2518 }
2519 EXPORT_SYMBOL_GPL(spi_setup);
2520
2521 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2522 {
2523         struct spi_master *master = spi->master;
2524         struct spi_transfer *xfer;
2525         int w_size;
2526
2527         if (list_empty(&message->transfers))
2528                 return -EINVAL;
2529
2530         /* Half-duplex links include original MicroWire, and ones with
2531          * only one data pin like SPI_3WIRE (switches direction) or where
2532          * either MOSI or MISO is missing.  They can also be caused by
2533          * software limitations.
2534          */
2535         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2536                         || (spi->mode & SPI_3WIRE)) {
2537                 unsigned flags = master->flags;
2538
2539                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2540                         if (xfer->rx_buf && xfer->tx_buf)
2541                                 return -EINVAL;
2542                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2543                                 return -EINVAL;
2544                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2545                                 return -EINVAL;
2546                 }
2547         }
2548
2549         /**
2550          * Set transfer bits_per_word and max speed as spi device default if
2551          * it is not set for this transfer.
2552          * Set transfer tx_nbits and rx_nbits as single transfer default
2553          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2554          */
2555         message->frame_length = 0;
2556         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2557                 message->frame_length += xfer->len;
2558                 if (!xfer->bits_per_word)
2559                         xfer->bits_per_word = spi->bits_per_word;
2560
2561                 if (!xfer->speed_hz)
2562                         xfer->speed_hz = spi->max_speed_hz;
2563                 if (!xfer->speed_hz)
2564                         xfer->speed_hz = master->max_speed_hz;
2565
2566                 if (master->max_speed_hz &&
2567                     xfer->speed_hz > master->max_speed_hz)
2568                         xfer->speed_hz = master->max_speed_hz;
2569
2570                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2571                         return -EINVAL;
2572
2573                 /*
2574                  * SPI transfer length should be multiple of SPI word size
2575                  * where SPI word size should be power-of-two multiple
2576                  */
2577                 if (xfer->bits_per_word <= 8)
2578                         w_size = 1;
2579                 else if (xfer->bits_per_word <= 16)
2580                         w_size = 2;
2581                 else
2582                         w_size = 4;
2583
2584                 /* No partial transfers accepted */
2585                 if (xfer->len % w_size)
2586                         return -EINVAL;
2587
2588                 if (xfer->speed_hz && master->min_speed_hz &&
2589                     xfer->speed_hz < master->min_speed_hz)
2590                         return -EINVAL;
2591
2592                 if (xfer->tx_buf && !xfer->tx_nbits)
2593                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2594                 if (xfer->rx_buf && !xfer->rx_nbits)
2595                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2596                 /* check transfer tx/rx_nbits:
2597                  * 1. check the value matches one of single, dual and quad
2598                  * 2. check tx/rx_nbits match the mode in spi_device
2599                  */
2600                 if (xfer->tx_buf) {
2601                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2602                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2603                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2604                                 return -EINVAL;
2605                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2606                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2607                                 return -EINVAL;
2608                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2609                                 !(spi->mode & SPI_TX_QUAD))
2610                                 return -EINVAL;
2611                 }
2612                 /* check transfer rx_nbits */
2613                 if (xfer->rx_buf) {
2614                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2615                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2616                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2617                                 return -EINVAL;
2618                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2619                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2620                                 return -EINVAL;
2621                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2622                                 !(spi->mode & SPI_RX_QUAD))
2623                                 return -EINVAL;
2624                 }
2625         }
2626
2627         message->status = -EINPROGRESS;
2628
2629         return 0;
2630 }
2631
2632 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2633 {
2634         struct spi_master *master = spi->master;
2635
2636         message->spi = spi;
2637
2638         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2639         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2640
2641         trace_spi_message_submit(message);
2642
2643         return master->transfer(spi, message);
2644 }
2645
2646 /**
2647  * spi_async - asynchronous SPI transfer
2648  * @spi: device with which data will be exchanged
2649  * @message: describes the data transfers, including completion callback
2650  * Context: any (irqs may be blocked, etc)
2651  *
2652  * This call may be used in_irq and other contexts which can't sleep,
2653  * as well as from task contexts which can sleep.
2654  *
2655  * The completion callback is invoked in a context which can't sleep.
2656  * Before that invocation, the value of message->status is undefined.
2657  * When the callback is issued, message->status holds either zero (to
2658  * indicate complete success) or a negative error code.  After that
2659  * callback returns, the driver which issued the transfer request may
2660  * deallocate the associated memory; it's no longer in use by any SPI
2661  * core or controller driver code.
2662  *
2663  * Note that although all messages to a spi_device are handled in
2664  * FIFO order, messages may go to different devices in other orders.
2665  * Some device might be higher priority, or have various "hard" access
2666  * time requirements, for example.
2667  *
2668  * On detection of any fault during the transfer, processing of
2669  * the entire message is aborted, and the device is deselected.
2670  * Until returning from the associated message completion callback,
2671  * no other spi_message queued to that device will be processed.
2672  * (This rule applies equally to all the synchronous transfer calls,
2673  * which are wrappers around this core asynchronous primitive.)
2674  *
2675  * Return: zero on success, else a negative error code.
2676  */
2677 int spi_async(struct spi_device *spi, struct spi_message *message)
2678 {
2679         struct spi_master *master = spi->master;
2680         int ret;
2681         unsigned long flags;
2682
2683         ret = __spi_validate(spi, message);
2684         if (ret != 0)
2685                 return ret;
2686
2687         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2688
2689         if (master->bus_lock_flag)
2690                 ret = -EBUSY;
2691         else
2692                 ret = __spi_async(spi, message);
2693
2694         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2695
2696         return ret;
2697 }
2698 EXPORT_SYMBOL_GPL(spi_async);
2699
2700 /**
2701  * spi_async_locked - version of spi_async with exclusive bus usage
2702  * @spi: device with which data will be exchanged
2703  * @message: describes the data transfers, including completion callback
2704  * Context: any (irqs may be blocked, etc)
2705  *
2706  * This call may be used in_irq and other contexts which can't sleep,
2707  * as well as from task contexts which can sleep.
2708  *
2709  * The completion callback is invoked in a context which can't sleep.
2710  * Before that invocation, the value of message->status is undefined.
2711  * When the callback is issued, message->status holds either zero (to
2712  * indicate complete success) or a negative error code.  After that
2713  * callback returns, the driver which issued the transfer request may
2714  * deallocate the associated memory; it's no longer in use by any SPI
2715  * core or controller driver code.
2716  *
2717  * Note that although all messages to a spi_device are handled in
2718  * FIFO order, messages may go to different devices in other orders.
2719  * Some device might be higher priority, or have various "hard" access
2720  * time requirements, for example.
2721  *
2722  * On detection of any fault during the transfer, processing of
2723  * the entire message is aborted, and the device is deselected.
2724  * Until returning from the associated message completion callback,
2725  * no other spi_message queued to that device will be processed.
2726  * (This rule applies equally to all the synchronous transfer calls,
2727  * which are wrappers around this core asynchronous primitive.)
2728  *
2729  * Return: zero on success, else a negative error code.
2730  */
2731 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2732 {
2733         struct spi_master *master = spi->master;
2734         int ret;
2735         unsigned long flags;
2736
2737         ret = __spi_validate(spi, message);
2738         if (ret != 0)
2739                 return ret;
2740
2741         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2742
2743         ret = __spi_async(spi, message);
2744
2745         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2746
2747         return ret;
2748
2749 }
2750 EXPORT_SYMBOL_GPL(spi_async_locked);
2751
2752
2753 int spi_flash_read(struct spi_device *spi,
2754                    struct spi_flash_read_message *msg)
2755
2756 {
2757         struct spi_master *master = spi->master;
2758         struct device *rx_dev = NULL;
2759         int ret;
2760
2761         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2762              msg->addr_nbits == SPI_NBITS_DUAL) &&
2763             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2764                 return -EINVAL;
2765         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2766              msg->addr_nbits == SPI_NBITS_QUAD) &&
2767             !(spi->mode & SPI_TX_QUAD))
2768                 return -EINVAL;
2769         if (msg->data_nbits == SPI_NBITS_DUAL &&
2770             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2771                 return -EINVAL;
2772         if (msg->data_nbits == SPI_NBITS_QUAD &&
2773             !(spi->mode &  SPI_RX_QUAD))
2774                 return -EINVAL;
2775
2776         if (master->auto_runtime_pm) {
2777                 ret = pm_runtime_get_sync(master->dev.parent);
2778                 if (ret < 0) {
2779                         dev_err(&master->dev, "Failed to power device: %d\n",
2780                                 ret);
2781                         return ret;
2782                 }
2783         }
2784
2785         mutex_lock(&master->bus_lock_mutex);
2786         mutex_lock(&master->io_mutex);
2787         if (master->dma_rx) {
2788                 rx_dev = master->dma_rx->device->dev;
2789                 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2790                                   msg->buf, msg->len,
2791                                   DMA_FROM_DEVICE);
2792                 if (!ret)
2793                         msg->cur_msg_mapped = true;
2794         }
2795         ret = master->spi_flash_read(spi, msg);
2796         if (msg->cur_msg_mapped)
2797                 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2798                               DMA_FROM_DEVICE);
2799         mutex_unlock(&master->io_mutex);
2800         mutex_unlock(&master->bus_lock_mutex);
2801
2802         if (master->auto_runtime_pm)
2803                 pm_runtime_put(master->dev.parent);
2804
2805         return ret;
2806 }
2807 EXPORT_SYMBOL_GPL(spi_flash_read);
2808
2809 /*-------------------------------------------------------------------------*/
2810
2811 /* Utility methods for SPI master protocol drivers, layered on
2812  * top of the core.  Some other utility methods are defined as
2813  * inline functions.
2814  */
2815
2816 static void spi_complete(void *arg)
2817 {
2818         complete(arg);
2819 }
2820
2821 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2822 {
2823         DECLARE_COMPLETION_ONSTACK(done);
2824         int status;
2825         struct spi_master *master = spi->master;
2826         unsigned long flags;
2827
2828         status = __spi_validate(spi, message);
2829         if (status != 0)
2830                 return status;
2831
2832         message->complete = spi_complete;
2833         message->context = &done;
2834         message->spi = spi;
2835
2836         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2837         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2838
2839         /* If we're not using the legacy transfer method then we will
2840          * try to transfer in the calling context so special case.
2841          * This code would be less tricky if we could remove the
2842          * support for driver implemented message queues.
2843          */
2844         if (master->transfer == spi_queued_transfer) {
2845                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2846
2847                 trace_spi_message_submit(message);
2848
2849                 status = __spi_queued_transfer(spi, message, false);
2850
2851                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2852         } else {
2853                 status = spi_async_locked(spi, message);
2854         }
2855
2856         if (status == 0) {
2857                 /* Push out the messages in the calling context if we
2858                  * can.
2859                  */
2860                 if (master->transfer == spi_queued_transfer) {
2861                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2862                                                        spi_sync_immediate);
2863                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2864                                                        spi_sync_immediate);
2865                         __spi_pump_messages(master, false);
2866                 }
2867
2868                 wait_for_completion(&done);
2869                 status = message->status;
2870         }
2871         message->context = NULL;
2872         return status;
2873 }
2874
2875 /**
2876  * spi_sync - blocking/synchronous SPI data transfers
2877  * @spi: device with which data will be exchanged
2878  * @message: describes the data transfers
2879  * Context: can sleep
2880  *
2881  * This call may only be used from a context that may sleep.  The sleep
2882  * is non-interruptible, and has no timeout.  Low-overhead controller
2883  * drivers may DMA directly into and out of the message buffers.
2884  *
2885  * Note that the SPI device's chip select is active during the message,
2886  * and then is normally disabled between messages.  Drivers for some
2887  * frequently-used devices may want to minimize costs of selecting a chip,
2888  * by leaving it selected in anticipation that the next message will go
2889  * to the same chip.  (That may increase power usage.)
2890  *
2891  * Also, the caller is guaranteeing that the memory associated with the
2892  * message will not be freed before this call returns.
2893  *
2894  * Return: zero on success, else a negative error code.
2895  */
2896 int spi_sync(struct spi_device *spi, struct spi_message *message)
2897 {
2898         int ret;
2899
2900         mutex_lock(&spi->master->bus_lock_mutex);
2901         ret = __spi_sync(spi, message);
2902         mutex_unlock(&spi->master->bus_lock_mutex);
2903
2904         return ret;
2905 }
2906 EXPORT_SYMBOL_GPL(spi_sync);
2907
2908 /**
2909  * spi_sync_locked - version of spi_sync with exclusive bus usage
2910  * @spi: device with which data will be exchanged
2911  * @message: describes the data transfers
2912  * Context: can sleep
2913  *
2914  * This call may only be used from a context that may sleep.  The sleep
2915  * is non-interruptible, and has no timeout.  Low-overhead controller
2916  * drivers may DMA directly into and out of the message buffers.
2917  *
2918  * This call should be used by drivers that require exclusive access to the
2919  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2920  * be released by a spi_bus_unlock call when the exclusive access is over.
2921  *
2922  * Return: zero on success, else a negative error code.
2923  */
2924 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2925 {
2926         return __spi_sync(spi, message);
2927 }
2928 EXPORT_SYMBOL_GPL(spi_sync_locked);
2929
2930 /**
2931  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2932  * @master: SPI bus master that should be locked for exclusive bus access
2933  * Context: can sleep
2934  *
2935  * This call may only be used from a context that may sleep.  The sleep
2936  * is non-interruptible, and has no timeout.
2937  *
2938  * This call should be used by drivers that require exclusive access to the
2939  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2940  * exclusive access is over. Data transfer must be done by spi_sync_locked
2941  * and spi_async_locked calls when the SPI bus lock is held.
2942  *
2943  * Return: always zero.
2944  */
2945 int spi_bus_lock(struct spi_master *master)
2946 {
2947         unsigned long flags;
2948
2949         mutex_lock(&master->bus_lock_mutex);
2950
2951         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2952         master->bus_lock_flag = 1;
2953         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2954
2955         /* mutex remains locked until spi_bus_unlock is called */
2956
2957         return 0;
2958 }
2959 EXPORT_SYMBOL_GPL(spi_bus_lock);
2960
2961 /**
2962  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2963  * @master: SPI bus master that was locked for exclusive bus access
2964  * Context: can sleep
2965  *
2966  * This call may only be used from a context that may sleep.  The sleep
2967  * is non-interruptible, and has no timeout.
2968  *
2969  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2970  * call.
2971  *
2972  * Return: always zero.
2973  */
2974 int spi_bus_unlock(struct spi_master *master)
2975 {
2976         master->bus_lock_flag = 0;
2977
2978         mutex_unlock(&master->bus_lock_mutex);
2979
2980         return 0;
2981 }
2982 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2983
2984 /* portable code must never pass more than 32 bytes */
2985 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2986
2987 static u8       *buf;
2988
2989 /**
2990  * spi_write_then_read - SPI synchronous write followed by read
2991  * @spi: device with which data will be exchanged
2992  * @txbuf: data to be written (need not be dma-safe)
2993  * @n_tx: size of txbuf, in bytes
2994  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2995  * @n_rx: size of rxbuf, in bytes
2996  * Context: can sleep
2997  *
2998  * This performs a half duplex MicroWire style transaction with the
2999  * device, sending txbuf and then reading rxbuf.  The return value
3000  * is zero for success, else a negative errno status code.
3001  * This call may only be used from a context that may sleep.
3002  *
3003  * Parameters to this routine are always copied using a small buffer;
3004  * portable code should never use this for more than 32 bytes.
3005  * Performance-sensitive or bulk transfer code should instead use
3006  * spi_{async,sync}() calls with dma-safe buffers.
3007  *
3008  * Return: zero on success, else a negative error code.
3009  */
3010 int spi_write_then_read(struct spi_device *spi,
3011                 const void *txbuf, unsigned n_tx,
3012                 void *rxbuf, unsigned n_rx)
3013 {
3014         static DEFINE_MUTEX(lock);
3015
3016         int                     status;
3017         struct spi_message      message;
3018         struct spi_transfer     x[2];
3019         u8                      *local_buf;
3020
3021         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3022          * copying here, (as a pure convenience thing), but we can
3023          * keep heap costs out of the hot path unless someone else is
3024          * using the pre-allocated buffer or the transfer is too large.
3025          */
3026         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3027                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3028                                     GFP_KERNEL | GFP_DMA);
3029                 if (!local_buf)
3030                         return -ENOMEM;
3031         } else {
3032                 local_buf = buf;
3033         }
3034
3035         spi_message_init(&message);
3036         memset(x, 0, sizeof(x));
3037         if (n_tx) {
3038                 x[0].len = n_tx;
3039                 spi_message_add_tail(&x[0], &message);
3040         }
3041         if (n_rx) {
3042                 x[1].len = n_rx;
3043                 spi_message_add_tail(&x[1], &message);
3044         }
3045
3046         memcpy(local_buf, txbuf, n_tx);
3047         x[0].tx_buf = local_buf;
3048         x[1].rx_buf = local_buf + n_tx;
3049
3050         /* do the i/o */
3051         status = spi_sync(spi, &message);
3052         if (status == 0)
3053                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3054
3055         if (x[0].tx_buf == buf)
3056                 mutex_unlock(&lock);
3057         else
3058                 kfree(local_buf);
3059
3060         return status;
3061 }
3062 EXPORT_SYMBOL_GPL(spi_write_then_read);
3063
3064 /*-------------------------------------------------------------------------*/
3065
3066 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3067 static int __spi_of_device_match(struct device *dev, void *data)
3068 {
3069         return dev->of_node == data;
3070 }
3071
3072 /* must call put_device() when done with returned spi_device device */
3073 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3074 {
3075         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3076                                                 __spi_of_device_match);
3077         return dev ? to_spi_device(dev) : NULL;
3078 }
3079
3080 static int __spi_of_master_match(struct device *dev, const void *data)
3081 {
3082         return dev->of_node == data;
3083 }
3084
3085 /* the spi masters are not using spi_bus, so we find it with another way */
3086 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3087 {
3088         struct device *dev;
3089
3090         dev = class_find_device(&spi_master_class, NULL, node,
3091                                 __spi_of_master_match);
3092         if (!dev)
3093                 return NULL;
3094
3095         /* reference got in class_find_device */
3096         return container_of(dev, struct spi_master, dev);
3097 }
3098
3099 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3100                          void *arg)
3101 {
3102         struct of_reconfig_data *rd = arg;
3103         struct spi_master *master;
3104         struct spi_device *spi;
3105
3106         switch (of_reconfig_get_state_change(action, arg)) {
3107         case OF_RECONFIG_CHANGE_ADD:
3108                 master = of_find_spi_master_by_node(rd->dn->parent);
3109                 if (master == NULL)
3110                         return NOTIFY_OK;       /* not for us */
3111
3112                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3113                         put_device(&master->dev);
3114                         return NOTIFY_OK;
3115                 }
3116
3117                 spi = of_register_spi_device(master, rd->dn);
3118                 put_device(&master->dev);
3119
3120                 if (IS_ERR(spi)) {
3121                         pr_err("%s: failed to create for '%s'\n",
3122                                         __func__, rd->dn->full_name);
3123                         return notifier_from_errno(PTR_ERR(spi));
3124                 }
3125                 break;
3126
3127         case OF_RECONFIG_CHANGE_REMOVE:
3128                 /* already depopulated? */
3129                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3130                         return NOTIFY_OK;
3131
3132                 /* find our device by node */
3133                 spi = of_find_spi_device_by_node(rd->dn);
3134                 if (spi == NULL)
3135                         return NOTIFY_OK;       /* no? not meant for us */
3136
3137                 /* unregister takes one ref away */
3138                 spi_unregister_device(spi);
3139
3140                 /* and put the reference of the find */
3141                 put_device(&spi->dev);
3142                 break;
3143         }
3144
3145         return NOTIFY_OK;
3146 }
3147
3148 static struct notifier_block spi_of_notifier = {
3149         .notifier_call = of_spi_notify,
3150 };
3151 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3152 extern struct notifier_block spi_of_notifier;
3153 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3154
3155 #if IS_ENABLED(CONFIG_ACPI)
3156 static int spi_acpi_master_match(struct device *dev, const void *data)
3157 {
3158         return ACPI_COMPANION(dev->parent) == data;
3159 }
3160
3161 static int spi_acpi_device_match(struct device *dev, void *data)
3162 {
3163         return ACPI_COMPANION(dev) == data;
3164 }
3165
3166 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3167 {
3168         struct device *dev;
3169
3170         dev = class_find_device(&spi_master_class, NULL, adev,
3171                                 spi_acpi_master_match);
3172         if (!dev)
3173                 return NULL;
3174
3175         return container_of(dev, struct spi_master, dev);
3176 }
3177
3178 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3179 {
3180         struct device *dev;
3181
3182         dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3183
3184         return dev ? to_spi_device(dev) : NULL;
3185 }
3186
3187 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3188                            void *arg)
3189 {
3190         struct acpi_device *adev = arg;
3191         struct spi_master *master;
3192         struct spi_device *spi;
3193
3194         switch (value) {
3195         case ACPI_RECONFIG_DEVICE_ADD:
3196                 master = acpi_spi_find_master_by_adev(adev->parent);
3197                 if (!master)
3198                         break;
3199
3200                 acpi_register_spi_device(master, adev);
3201                 put_device(&master->dev);
3202                 break;
3203         case ACPI_RECONFIG_DEVICE_REMOVE:
3204                 if (!acpi_device_enumerated(adev))
3205                         break;
3206
3207                 spi = acpi_spi_find_device_by_adev(adev);
3208                 if (!spi)
3209                         break;
3210
3211                 spi_unregister_device(spi);
3212                 put_device(&spi->dev);
3213                 break;
3214         }
3215
3216         return NOTIFY_OK;
3217 }
3218
3219 static struct notifier_block spi_acpi_notifier = {
3220         .notifier_call = acpi_spi_notify,
3221 };
3222 #else
3223 extern struct notifier_block spi_acpi_notifier;
3224 #endif
3225
3226 static int __init spi_init(void)
3227 {
3228         int     status;
3229
3230         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3231         if (!buf) {
3232                 status = -ENOMEM;
3233                 goto err0;
3234         }
3235
3236         status = bus_register(&spi_bus_type);
3237         if (status < 0)
3238                 goto err1;
3239
3240         status = class_register(&spi_master_class);
3241         if (status < 0)
3242                 goto err2;
3243
3244         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3245                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3246         if (IS_ENABLED(CONFIG_ACPI))
3247                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3248
3249         return 0;
3250
3251 err2:
3252         bus_unregister(&spi_bus_type);
3253 err1:
3254         kfree(buf);
3255         buf = NULL;
3256 err0:
3257         return status;
3258 }
3259
3260 /* board_info is normally registered in arch_initcall(),
3261  * but even essential drivers wait till later
3262  *
3263  * REVISIT only boardinfo really needs static linking. the rest (device and
3264  * driver registration) _could_ be dynamically linked (modular) ... costs
3265  * include needing to have boardinfo data structures be much more public.
3266  */
3267 postcore_initcall(spi_init);
3268