spi: mark device nodes only in case of successful instantiation
[cascardo/linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40 #include <linux/highmem.h>
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/spi.h>
44
45 static void spidev_release(struct device *dev)
46 {
47         struct spi_device       *spi = to_spi_device(dev);
48
49         /* spi masters may cleanup for released devices */
50         if (spi->master->cleanup)
51                 spi->master->cleanup(spi);
52
53         spi_master_put(spi->master);
54         kfree(spi);
55 }
56
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60         const struct spi_device *spi = to_spi_device(dev);
61         int len;
62
63         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64         if (len != -ENODEV)
65                 return len;
66
67         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
71 #define SPI_STATISTICS_ATTRS(field, file)                               \
72 static ssize_t spi_master_##field##_show(struct device *dev,            \
73                                          struct device_attribute *attr, \
74                                          char *buf)                     \
75 {                                                                       \
76         struct spi_master *master = container_of(dev,                   \
77                                                  struct spi_master, dev); \
78         return spi_statistics_##field##_show(&master->statistics, buf); \
79 }                                                                       \
80 static struct device_attribute dev_attr_spi_master_##field = {          \
81         .attr = { .name = file, .mode = S_IRUGO },                      \
82         .show = spi_master_##field##_show,                              \
83 };                                                                      \
84 static ssize_t spi_device_##field##_show(struct device *dev,            \
85                                          struct device_attribute *attr, \
86                                         char *buf)                      \
87 {                                                                       \
88         struct spi_device *spi = to_spi_device(dev);                    \
89         return spi_statistics_##field##_show(&spi->statistics, buf);    \
90 }                                                                       \
91 static struct device_attribute dev_attr_spi_device_##field = {          \
92         .attr = { .name = file, .mode = S_IRUGO },                      \
93         .show = spi_device_##field##_show,                              \
94 }
95
96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98                                             char *buf)                  \
99 {                                                                       \
100         unsigned long flags;                                            \
101         ssize_t len;                                                    \
102         spin_lock_irqsave(&stat->lock, flags);                          \
103         len = sprintf(buf, format_string, stat->field);                 \
104         spin_unlock_irqrestore(&stat->lock, flags);                     \
105         return len;                                                     \
106 }                                                                       \
107 SPI_STATISTICS_ATTRS(name, file)
108
109 #define SPI_STATISTICS_SHOW(field, format_string)                       \
110         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
111                                  field, format_string)
112
113 SPI_STATISTICS_SHOW(messages, "%lu");
114 SPI_STATISTICS_SHOW(transfers, "%lu");
115 SPI_STATISTICS_SHOW(errors, "%lu");
116 SPI_STATISTICS_SHOW(timedout, "%lu");
117
118 SPI_STATISTICS_SHOW(spi_sync, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120 SPI_STATISTICS_SHOW(spi_async, "%lu");
121
122 SPI_STATISTICS_SHOW(bytes, "%llu");
123 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125
126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
127         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
128                                  "transfer_bytes_histo_" number,        \
129                                  transfer_bytes_histo[index],  "%lu")
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147
148 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
149
150 static struct attribute *spi_dev_attrs[] = {
151         &dev_attr_modalias.attr,
152         NULL,
153 };
154
155 static const struct attribute_group spi_dev_group = {
156         .attrs  = spi_dev_attrs,
157 };
158
159 static struct attribute *spi_device_statistics_attrs[] = {
160         &dev_attr_spi_device_messages.attr,
161         &dev_attr_spi_device_transfers.attr,
162         &dev_attr_spi_device_errors.attr,
163         &dev_attr_spi_device_timedout.attr,
164         &dev_attr_spi_device_spi_sync.attr,
165         &dev_attr_spi_device_spi_sync_immediate.attr,
166         &dev_attr_spi_device_spi_async.attr,
167         &dev_attr_spi_device_bytes.attr,
168         &dev_attr_spi_device_bytes_rx.attr,
169         &dev_attr_spi_device_bytes_tx.attr,
170         &dev_attr_spi_device_transfer_bytes_histo0.attr,
171         &dev_attr_spi_device_transfer_bytes_histo1.attr,
172         &dev_attr_spi_device_transfer_bytes_histo2.attr,
173         &dev_attr_spi_device_transfer_bytes_histo3.attr,
174         &dev_attr_spi_device_transfer_bytes_histo4.attr,
175         &dev_attr_spi_device_transfer_bytes_histo5.attr,
176         &dev_attr_spi_device_transfer_bytes_histo6.attr,
177         &dev_attr_spi_device_transfer_bytes_histo7.attr,
178         &dev_attr_spi_device_transfer_bytes_histo8.attr,
179         &dev_attr_spi_device_transfer_bytes_histo9.attr,
180         &dev_attr_spi_device_transfer_bytes_histo10.attr,
181         &dev_attr_spi_device_transfer_bytes_histo11.attr,
182         &dev_attr_spi_device_transfer_bytes_histo12.attr,
183         &dev_attr_spi_device_transfer_bytes_histo13.attr,
184         &dev_attr_spi_device_transfer_bytes_histo14.attr,
185         &dev_attr_spi_device_transfer_bytes_histo15.attr,
186         &dev_attr_spi_device_transfer_bytes_histo16.attr,
187         &dev_attr_spi_device_transfers_split_maxsize.attr,
188         NULL,
189 };
190
191 static const struct attribute_group spi_device_statistics_group = {
192         .name  = "statistics",
193         .attrs  = spi_device_statistics_attrs,
194 };
195
196 static const struct attribute_group *spi_dev_groups[] = {
197         &spi_dev_group,
198         &spi_device_statistics_group,
199         NULL,
200 };
201
202 static struct attribute *spi_master_statistics_attrs[] = {
203         &dev_attr_spi_master_messages.attr,
204         &dev_attr_spi_master_transfers.attr,
205         &dev_attr_spi_master_errors.attr,
206         &dev_attr_spi_master_timedout.attr,
207         &dev_attr_spi_master_spi_sync.attr,
208         &dev_attr_spi_master_spi_sync_immediate.attr,
209         &dev_attr_spi_master_spi_async.attr,
210         &dev_attr_spi_master_bytes.attr,
211         &dev_attr_spi_master_bytes_rx.attr,
212         &dev_attr_spi_master_bytes_tx.attr,
213         &dev_attr_spi_master_transfer_bytes_histo0.attr,
214         &dev_attr_spi_master_transfer_bytes_histo1.attr,
215         &dev_attr_spi_master_transfer_bytes_histo2.attr,
216         &dev_attr_spi_master_transfer_bytes_histo3.attr,
217         &dev_attr_spi_master_transfer_bytes_histo4.attr,
218         &dev_attr_spi_master_transfer_bytes_histo5.attr,
219         &dev_attr_spi_master_transfer_bytes_histo6.attr,
220         &dev_attr_spi_master_transfer_bytes_histo7.attr,
221         &dev_attr_spi_master_transfer_bytes_histo8.attr,
222         &dev_attr_spi_master_transfer_bytes_histo9.attr,
223         &dev_attr_spi_master_transfer_bytes_histo10.attr,
224         &dev_attr_spi_master_transfer_bytes_histo11.attr,
225         &dev_attr_spi_master_transfer_bytes_histo12.attr,
226         &dev_attr_spi_master_transfer_bytes_histo13.attr,
227         &dev_attr_spi_master_transfer_bytes_histo14.attr,
228         &dev_attr_spi_master_transfer_bytes_histo15.attr,
229         &dev_attr_spi_master_transfer_bytes_histo16.attr,
230         &dev_attr_spi_master_transfers_split_maxsize.attr,
231         NULL,
232 };
233
234 static const struct attribute_group spi_master_statistics_group = {
235         .name  = "statistics",
236         .attrs  = spi_master_statistics_attrs,
237 };
238
239 static const struct attribute_group *spi_master_groups[] = {
240         &spi_master_statistics_group,
241         NULL,
242 };
243
244 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
245                                        struct spi_transfer *xfer,
246                                        struct spi_master *master)
247 {
248         unsigned long flags;
249         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
250
251         if (l2len < 0)
252                 l2len = 0;
253
254         spin_lock_irqsave(&stats->lock, flags);
255
256         stats->transfers++;
257         stats->transfer_bytes_histo[l2len]++;
258
259         stats->bytes += xfer->len;
260         if ((xfer->tx_buf) &&
261             (xfer->tx_buf != master->dummy_tx))
262                 stats->bytes_tx += xfer->len;
263         if ((xfer->rx_buf) &&
264             (xfer->rx_buf != master->dummy_rx))
265                 stats->bytes_rx += xfer->len;
266
267         spin_unlock_irqrestore(&stats->lock, flags);
268 }
269 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
270
271 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
272  * and the sysfs version makes coldplug work too.
273  */
274
275 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
276                                                 const struct spi_device *sdev)
277 {
278         while (id->name[0]) {
279                 if (!strcmp(sdev->modalias, id->name))
280                         return id;
281                 id++;
282         }
283         return NULL;
284 }
285
286 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
287 {
288         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
289
290         return spi_match_id(sdrv->id_table, sdev);
291 }
292 EXPORT_SYMBOL_GPL(spi_get_device_id);
293
294 static int spi_match_device(struct device *dev, struct device_driver *drv)
295 {
296         const struct spi_device *spi = to_spi_device(dev);
297         const struct spi_driver *sdrv = to_spi_driver(drv);
298
299         /* Attempt an OF style match */
300         if (of_driver_match_device(dev, drv))
301                 return 1;
302
303         /* Then try ACPI */
304         if (acpi_driver_match_device(dev, drv))
305                 return 1;
306
307         if (sdrv->id_table)
308                 return !!spi_match_id(sdrv->id_table, spi);
309
310         return strcmp(spi->modalias, drv->name) == 0;
311 }
312
313 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
314 {
315         const struct spi_device         *spi = to_spi_device(dev);
316         int rc;
317
318         rc = acpi_device_uevent_modalias(dev, env);
319         if (rc != -ENODEV)
320                 return rc;
321
322         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
323         return 0;
324 }
325
326 struct bus_type spi_bus_type = {
327         .name           = "spi",
328         .dev_groups     = spi_dev_groups,
329         .match          = spi_match_device,
330         .uevent         = spi_uevent,
331 };
332 EXPORT_SYMBOL_GPL(spi_bus_type);
333
334
335 static int spi_drv_probe(struct device *dev)
336 {
337         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
338         struct spi_device               *spi = to_spi_device(dev);
339         int ret;
340
341         ret = of_clk_set_defaults(dev->of_node, false);
342         if (ret)
343                 return ret;
344
345         if (dev->of_node) {
346                 spi->irq = of_irq_get(dev->of_node, 0);
347                 if (spi->irq == -EPROBE_DEFER)
348                         return -EPROBE_DEFER;
349                 if (spi->irq < 0)
350                         spi->irq = 0;
351         }
352
353         ret = dev_pm_domain_attach(dev, true);
354         if (ret != -EPROBE_DEFER) {
355                 ret = sdrv->probe(spi);
356                 if (ret)
357                         dev_pm_domain_detach(dev, true);
358         }
359
360         return ret;
361 }
362
363 static int spi_drv_remove(struct device *dev)
364 {
365         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
366         int ret;
367
368         ret = sdrv->remove(to_spi_device(dev));
369         dev_pm_domain_detach(dev, true);
370
371         return ret;
372 }
373
374 static void spi_drv_shutdown(struct device *dev)
375 {
376         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
377
378         sdrv->shutdown(to_spi_device(dev));
379 }
380
381 /**
382  * __spi_register_driver - register a SPI driver
383  * @owner: owner module of the driver to register
384  * @sdrv: the driver to register
385  * Context: can sleep
386  *
387  * Return: zero on success, else a negative error code.
388  */
389 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
390 {
391         sdrv->driver.owner = owner;
392         sdrv->driver.bus = &spi_bus_type;
393         if (sdrv->probe)
394                 sdrv->driver.probe = spi_drv_probe;
395         if (sdrv->remove)
396                 sdrv->driver.remove = spi_drv_remove;
397         if (sdrv->shutdown)
398                 sdrv->driver.shutdown = spi_drv_shutdown;
399         return driver_register(&sdrv->driver);
400 }
401 EXPORT_SYMBOL_GPL(__spi_register_driver);
402
403 /*-------------------------------------------------------------------------*/
404
405 /* SPI devices should normally not be created by SPI device drivers; that
406  * would make them board-specific.  Similarly with SPI master drivers.
407  * Device registration normally goes into like arch/.../mach.../board-YYY.c
408  * with other readonly (flashable) information about mainboard devices.
409  */
410
411 struct boardinfo {
412         struct list_head        list;
413         struct spi_board_info   board_info;
414 };
415
416 static LIST_HEAD(board_list);
417 static LIST_HEAD(spi_master_list);
418
419 /*
420  * Used to protect add/del opertion for board_info list and
421  * spi_master list, and their matching process
422  */
423 static DEFINE_MUTEX(board_lock);
424
425 /**
426  * spi_alloc_device - Allocate a new SPI device
427  * @master: Controller to which device is connected
428  * Context: can sleep
429  *
430  * Allows a driver to allocate and initialize a spi_device without
431  * registering it immediately.  This allows a driver to directly
432  * fill the spi_device with device parameters before calling
433  * spi_add_device() on it.
434  *
435  * Caller is responsible to call spi_add_device() on the returned
436  * spi_device structure to add it to the SPI master.  If the caller
437  * needs to discard the spi_device without adding it, then it should
438  * call spi_dev_put() on it.
439  *
440  * Return: a pointer to the new device, or NULL.
441  */
442 struct spi_device *spi_alloc_device(struct spi_master *master)
443 {
444         struct spi_device       *spi;
445
446         if (!spi_master_get(master))
447                 return NULL;
448
449         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
450         if (!spi) {
451                 spi_master_put(master);
452                 return NULL;
453         }
454
455         spi->master = master;
456         spi->dev.parent = &master->dev;
457         spi->dev.bus = &spi_bus_type;
458         spi->dev.release = spidev_release;
459         spi->cs_gpio = -ENOENT;
460
461         spin_lock_init(&spi->statistics.lock);
462
463         device_initialize(&spi->dev);
464         return spi;
465 }
466 EXPORT_SYMBOL_GPL(spi_alloc_device);
467
468 static void spi_dev_set_name(struct spi_device *spi)
469 {
470         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
471
472         if (adev) {
473                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
474                 return;
475         }
476
477         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
478                      spi->chip_select);
479 }
480
481 static int spi_dev_check(struct device *dev, void *data)
482 {
483         struct spi_device *spi = to_spi_device(dev);
484         struct spi_device *new_spi = data;
485
486         if (spi->master == new_spi->master &&
487             spi->chip_select == new_spi->chip_select)
488                 return -EBUSY;
489         return 0;
490 }
491
492 /**
493  * spi_add_device - Add spi_device allocated with spi_alloc_device
494  * @spi: spi_device to register
495  *
496  * Companion function to spi_alloc_device.  Devices allocated with
497  * spi_alloc_device can be added onto the spi bus with this function.
498  *
499  * Return: 0 on success; negative errno on failure
500  */
501 int spi_add_device(struct spi_device *spi)
502 {
503         static DEFINE_MUTEX(spi_add_lock);
504         struct spi_master *master = spi->master;
505         struct device *dev = master->dev.parent;
506         int status;
507
508         /* Chipselects are numbered 0..max; validate. */
509         if (spi->chip_select >= master->num_chipselect) {
510                 dev_err(dev, "cs%d >= max %d\n",
511                         spi->chip_select,
512                         master->num_chipselect);
513                 return -EINVAL;
514         }
515
516         /* Set the bus ID string */
517         spi_dev_set_name(spi);
518
519         /* We need to make sure there's no other device with this
520          * chipselect **BEFORE** we call setup(), else we'll trash
521          * its configuration.  Lock against concurrent add() calls.
522          */
523         mutex_lock(&spi_add_lock);
524
525         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
526         if (status) {
527                 dev_err(dev, "chipselect %d already in use\n",
528                                 spi->chip_select);
529                 goto done;
530         }
531
532         if (master->cs_gpios)
533                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
534
535         /* Drivers may modify this initial i/o setup, but will
536          * normally rely on the device being setup.  Devices
537          * using SPI_CS_HIGH can't coexist well otherwise...
538          */
539         status = spi_setup(spi);
540         if (status < 0) {
541                 dev_err(dev, "can't setup %s, status %d\n",
542                                 dev_name(&spi->dev), status);
543                 goto done;
544         }
545
546         /* Device may be bound to an active driver when this returns */
547         status = device_add(&spi->dev);
548         if (status < 0)
549                 dev_err(dev, "can't add %s, status %d\n",
550                                 dev_name(&spi->dev), status);
551         else
552                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
553
554 done:
555         mutex_unlock(&spi_add_lock);
556         return status;
557 }
558 EXPORT_SYMBOL_GPL(spi_add_device);
559
560 /**
561  * spi_new_device - instantiate one new SPI device
562  * @master: Controller to which device is connected
563  * @chip: Describes the SPI device
564  * Context: can sleep
565  *
566  * On typical mainboards, this is purely internal; and it's not needed
567  * after board init creates the hard-wired devices.  Some development
568  * platforms may not be able to use spi_register_board_info though, and
569  * this is exported so that for example a USB or parport based adapter
570  * driver could add devices (which it would learn about out-of-band).
571  *
572  * Return: the new device, or NULL.
573  */
574 struct spi_device *spi_new_device(struct spi_master *master,
575                                   struct spi_board_info *chip)
576 {
577         struct spi_device       *proxy;
578         int                     status;
579
580         /* NOTE:  caller did any chip->bus_num checks necessary.
581          *
582          * Also, unless we change the return value convention to use
583          * error-or-pointer (not NULL-or-pointer), troubleshootability
584          * suggests syslogged diagnostics are best here (ugh).
585          */
586
587         proxy = spi_alloc_device(master);
588         if (!proxy)
589                 return NULL;
590
591         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
592
593         proxy->chip_select = chip->chip_select;
594         proxy->max_speed_hz = chip->max_speed_hz;
595         proxy->mode = chip->mode;
596         proxy->irq = chip->irq;
597         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
598         proxy->dev.platform_data = (void *) chip->platform_data;
599         proxy->controller_data = chip->controller_data;
600         proxy->controller_state = NULL;
601
602         status = spi_add_device(proxy);
603         if (status < 0) {
604                 spi_dev_put(proxy);
605                 return NULL;
606         }
607
608         return proxy;
609 }
610 EXPORT_SYMBOL_GPL(spi_new_device);
611
612 /**
613  * spi_unregister_device - unregister a single SPI device
614  * @spi: spi_device to unregister
615  *
616  * Start making the passed SPI device vanish. Normally this would be handled
617  * by spi_unregister_master().
618  */
619 void spi_unregister_device(struct spi_device *spi)
620 {
621         if (!spi)
622                 return;
623
624         if (spi->dev.of_node)
625                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
626         if (ACPI_COMPANION(&spi->dev))
627                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
628         device_unregister(&spi->dev);
629 }
630 EXPORT_SYMBOL_GPL(spi_unregister_device);
631
632 static void spi_match_master_to_boardinfo(struct spi_master *master,
633                                 struct spi_board_info *bi)
634 {
635         struct spi_device *dev;
636
637         if (master->bus_num != bi->bus_num)
638                 return;
639
640         dev = spi_new_device(master, bi);
641         if (!dev)
642                 dev_err(master->dev.parent, "can't create new device for %s\n",
643                         bi->modalias);
644 }
645
646 /**
647  * spi_register_board_info - register SPI devices for a given board
648  * @info: array of chip descriptors
649  * @n: how many descriptors are provided
650  * Context: can sleep
651  *
652  * Board-specific early init code calls this (probably during arch_initcall)
653  * with segments of the SPI device table.  Any device nodes are created later,
654  * after the relevant parent SPI controller (bus_num) is defined.  We keep
655  * this table of devices forever, so that reloading a controller driver will
656  * not make Linux forget about these hard-wired devices.
657  *
658  * Other code can also call this, e.g. a particular add-on board might provide
659  * SPI devices through its expansion connector, so code initializing that board
660  * would naturally declare its SPI devices.
661  *
662  * The board info passed can safely be __initdata ... but be careful of
663  * any embedded pointers (platform_data, etc), they're copied as-is.
664  *
665  * Return: zero on success, else a negative error code.
666  */
667 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
668 {
669         struct boardinfo *bi;
670         int i;
671
672         if (!n)
673                 return -EINVAL;
674
675         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
676         if (!bi)
677                 return -ENOMEM;
678
679         for (i = 0; i < n; i++, bi++, info++) {
680                 struct spi_master *master;
681
682                 memcpy(&bi->board_info, info, sizeof(*info));
683                 mutex_lock(&board_lock);
684                 list_add_tail(&bi->list, &board_list);
685                 list_for_each_entry(master, &spi_master_list, list)
686                         spi_match_master_to_boardinfo(master, &bi->board_info);
687                 mutex_unlock(&board_lock);
688         }
689
690         return 0;
691 }
692
693 /*-------------------------------------------------------------------------*/
694
695 static void spi_set_cs(struct spi_device *spi, bool enable)
696 {
697         if (spi->mode & SPI_CS_HIGH)
698                 enable = !enable;
699
700         if (gpio_is_valid(spi->cs_gpio))
701                 gpio_set_value(spi->cs_gpio, !enable);
702         else if (spi->master->set_cs)
703                 spi->master->set_cs(spi, !enable);
704 }
705
706 #ifdef CONFIG_HAS_DMA
707 static int spi_map_buf(struct spi_master *master, struct device *dev,
708                        struct sg_table *sgt, void *buf, size_t len,
709                        enum dma_data_direction dir)
710 {
711         const bool vmalloced_buf = is_vmalloc_addr(buf);
712         unsigned int max_seg_size = dma_get_max_seg_size(dev);
713 #ifdef CONFIG_HIGHMEM
714         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
715                                 (unsigned long)buf < (PKMAP_BASE +
716                                         (LAST_PKMAP * PAGE_SIZE)));
717 #else
718         const bool kmap_buf = false;
719 #endif
720         int desc_len;
721         int sgs;
722         struct page *vm_page;
723         void *sg_buf;
724         size_t min;
725         int i, ret;
726
727         if (vmalloced_buf || kmap_buf) {
728                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
729                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
730         } else if (virt_addr_valid(buf)) {
731                 desc_len = min_t(int, max_seg_size, master->max_dma_len);
732                 sgs = DIV_ROUND_UP(len, desc_len);
733         } else {
734                 return -EINVAL;
735         }
736
737         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
738         if (ret != 0)
739                 return ret;
740
741         for (i = 0; i < sgs; i++) {
742
743                 if (vmalloced_buf || kmap_buf) {
744                         min = min_t(size_t,
745                                     len, desc_len - offset_in_page(buf));
746                         if (vmalloced_buf)
747                                 vm_page = vmalloc_to_page(buf);
748                         else
749                                 vm_page = kmap_to_page(buf);
750                         if (!vm_page) {
751                                 sg_free_table(sgt);
752                                 return -ENOMEM;
753                         }
754                         sg_set_page(&sgt->sgl[i], vm_page,
755                                     min, offset_in_page(buf));
756                 } else {
757                         min = min_t(size_t, len, desc_len);
758                         sg_buf = buf;
759                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
760                 }
761
762                 buf += min;
763                 len -= min;
764         }
765
766         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
767         if (!ret)
768                 ret = -ENOMEM;
769         if (ret < 0) {
770                 sg_free_table(sgt);
771                 return ret;
772         }
773
774         sgt->nents = ret;
775
776         return 0;
777 }
778
779 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
780                           struct sg_table *sgt, enum dma_data_direction dir)
781 {
782         if (sgt->orig_nents) {
783                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
784                 sg_free_table(sgt);
785         }
786 }
787
788 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
789 {
790         struct device *tx_dev, *rx_dev;
791         struct spi_transfer *xfer;
792         int ret;
793
794         if (!master->can_dma)
795                 return 0;
796
797         if (master->dma_tx)
798                 tx_dev = master->dma_tx->device->dev;
799         else
800                 tx_dev = &master->dev;
801
802         if (master->dma_rx)
803                 rx_dev = master->dma_rx->device->dev;
804         else
805                 rx_dev = &master->dev;
806
807         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
808                 if (!master->can_dma(master, msg->spi, xfer))
809                         continue;
810
811                 if (xfer->tx_buf != NULL) {
812                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
813                                           (void *)xfer->tx_buf, xfer->len,
814                                           DMA_TO_DEVICE);
815                         if (ret != 0)
816                                 return ret;
817                 }
818
819                 if (xfer->rx_buf != NULL) {
820                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
821                                           xfer->rx_buf, xfer->len,
822                                           DMA_FROM_DEVICE);
823                         if (ret != 0) {
824                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
825                                               DMA_TO_DEVICE);
826                                 return ret;
827                         }
828                 }
829         }
830
831         master->cur_msg_mapped = true;
832
833         return 0;
834 }
835
836 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
837 {
838         struct spi_transfer *xfer;
839         struct device *tx_dev, *rx_dev;
840
841         if (!master->cur_msg_mapped || !master->can_dma)
842                 return 0;
843
844         if (master->dma_tx)
845                 tx_dev = master->dma_tx->device->dev;
846         else
847                 tx_dev = &master->dev;
848
849         if (master->dma_rx)
850                 rx_dev = master->dma_rx->device->dev;
851         else
852                 rx_dev = &master->dev;
853
854         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
855                 if (!master->can_dma(master, msg->spi, xfer))
856                         continue;
857
858                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
859                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
860         }
861
862         return 0;
863 }
864 #else /* !CONFIG_HAS_DMA */
865 static inline int spi_map_buf(struct spi_master *master,
866                               struct device *dev, struct sg_table *sgt,
867                               void *buf, size_t len,
868                               enum dma_data_direction dir)
869 {
870         return -EINVAL;
871 }
872
873 static inline void spi_unmap_buf(struct spi_master *master,
874                                  struct device *dev, struct sg_table *sgt,
875                                  enum dma_data_direction dir)
876 {
877 }
878
879 static inline int __spi_map_msg(struct spi_master *master,
880                                 struct spi_message *msg)
881 {
882         return 0;
883 }
884
885 static inline int __spi_unmap_msg(struct spi_master *master,
886                                   struct spi_message *msg)
887 {
888         return 0;
889 }
890 #endif /* !CONFIG_HAS_DMA */
891
892 static inline int spi_unmap_msg(struct spi_master *master,
893                                 struct spi_message *msg)
894 {
895         struct spi_transfer *xfer;
896
897         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
898                 /*
899                  * Restore the original value of tx_buf or rx_buf if they are
900                  * NULL.
901                  */
902                 if (xfer->tx_buf == master->dummy_tx)
903                         xfer->tx_buf = NULL;
904                 if (xfer->rx_buf == master->dummy_rx)
905                         xfer->rx_buf = NULL;
906         }
907
908         return __spi_unmap_msg(master, msg);
909 }
910
911 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
912 {
913         struct spi_transfer *xfer;
914         void *tmp;
915         unsigned int max_tx, max_rx;
916
917         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
918                 max_tx = 0;
919                 max_rx = 0;
920
921                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
922                         if ((master->flags & SPI_MASTER_MUST_TX) &&
923                             !xfer->tx_buf)
924                                 max_tx = max(xfer->len, max_tx);
925                         if ((master->flags & SPI_MASTER_MUST_RX) &&
926                             !xfer->rx_buf)
927                                 max_rx = max(xfer->len, max_rx);
928                 }
929
930                 if (max_tx) {
931                         tmp = krealloc(master->dummy_tx, max_tx,
932                                        GFP_KERNEL | GFP_DMA);
933                         if (!tmp)
934                                 return -ENOMEM;
935                         master->dummy_tx = tmp;
936                         memset(tmp, 0, max_tx);
937                 }
938
939                 if (max_rx) {
940                         tmp = krealloc(master->dummy_rx, max_rx,
941                                        GFP_KERNEL | GFP_DMA);
942                         if (!tmp)
943                                 return -ENOMEM;
944                         master->dummy_rx = tmp;
945                 }
946
947                 if (max_tx || max_rx) {
948                         list_for_each_entry(xfer, &msg->transfers,
949                                             transfer_list) {
950                                 if (!xfer->tx_buf)
951                                         xfer->tx_buf = master->dummy_tx;
952                                 if (!xfer->rx_buf)
953                                         xfer->rx_buf = master->dummy_rx;
954                         }
955                 }
956         }
957
958         return __spi_map_msg(master, msg);
959 }
960
961 /*
962  * spi_transfer_one_message - Default implementation of transfer_one_message()
963  *
964  * This is a standard implementation of transfer_one_message() for
965  * drivers which implement a transfer_one() operation.  It provides
966  * standard handling of delays and chip select management.
967  */
968 static int spi_transfer_one_message(struct spi_master *master,
969                                     struct spi_message *msg)
970 {
971         struct spi_transfer *xfer;
972         bool keep_cs = false;
973         int ret = 0;
974         unsigned long long ms = 1;
975         struct spi_statistics *statm = &master->statistics;
976         struct spi_statistics *stats = &msg->spi->statistics;
977
978         spi_set_cs(msg->spi, true);
979
980         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
981         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
982
983         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
984                 trace_spi_transfer_start(msg, xfer);
985
986                 spi_statistics_add_transfer_stats(statm, xfer, master);
987                 spi_statistics_add_transfer_stats(stats, xfer, master);
988
989                 if (xfer->tx_buf || xfer->rx_buf) {
990                         reinit_completion(&master->xfer_completion);
991
992                         ret = master->transfer_one(master, msg->spi, xfer);
993                         if (ret < 0) {
994                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
995                                                                errors);
996                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
997                                                                errors);
998                                 dev_err(&msg->spi->dev,
999                                         "SPI transfer failed: %d\n", ret);
1000                                 goto out;
1001                         }
1002
1003                         if (ret > 0) {
1004                                 ret = 0;
1005                                 ms = 8LL * 1000LL * xfer->len;
1006                                 do_div(ms, xfer->speed_hz);
1007                                 ms += ms + 100; /* some tolerance */
1008
1009                                 if (ms > UINT_MAX)
1010                                         ms = UINT_MAX;
1011
1012                                 ms = wait_for_completion_timeout(&master->xfer_completion,
1013                                                                  msecs_to_jiffies(ms));
1014                         }
1015
1016                         if (ms == 0) {
1017                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1018                                                                timedout);
1019                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1020                                                                timedout);
1021                                 dev_err(&msg->spi->dev,
1022                                         "SPI transfer timed out\n");
1023                                 msg->status = -ETIMEDOUT;
1024                         }
1025                 } else {
1026                         if (xfer->len)
1027                                 dev_err(&msg->spi->dev,
1028                                         "Bufferless transfer has length %u\n",
1029                                         xfer->len);
1030                 }
1031
1032                 trace_spi_transfer_stop(msg, xfer);
1033
1034                 if (msg->status != -EINPROGRESS)
1035                         goto out;
1036
1037                 if (xfer->delay_usecs)
1038                         udelay(xfer->delay_usecs);
1039
1040                 if (xfer->cs_change) {
1041                         if (list_is_last(&xfer->transfer_list,
1042                                          &msg->transfers)) {
1043                                 keep_cs = true;
1044                         } else {
1045                                 spi_set_cs(msg->spi, false);
1046                                 udelay(10);
1047                                 spi_set_cs(msg->spi, true);
1048                         }
1049                 }
1050
1051                 msg->actual_length += xfer->len;
1052         }
1053
1054 out:
1055         if (ret != 0 || !keep_cs)
1056                 spi_set_cs(msg->spi, false);
1057
1058         if (msg->status == -EINPROGRESS)
1059                 msg->status = ret;
1060
1061         if (msg->status && master->handle_err)
1062                 master->handle_err(master, msg);
1063
1064         spi_res_release(master, msg);
1065
1066         spi_finalize_current_message(master);
1067
1068         return ret;
1069 }
1070
1071 /**
1072  * spi_finalize_current_transfer - report completion of a transfer
1073  * @master: the master reporting completion
1074  *
1075  * Called by SPI drivers using the core transfer_one_message()
1076  * implementation to notify it that the current interrupt driven
1077  * transfer has finished and the next one may be scheduled.
1078  */
1079 void spi_finalize_current_transfer(struct spi_master *master)
1080 {
1081         complete(&master->xfer_completion);
1082 }
1083 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1084
1085 /**
1086  * __spi_pump_messages - function which processes spi message queue
1087  * @master: master to process queue for
1088  * @in_kthread: true if we are in the context of the message pump thread
1089  *
1090  * This function checks if there is any spi message in the queue that
1091  * needs processing and if so call out to the driver to initialize hardware
1092  * and transfer each message.
1093  *
1094  * Note that it is called both from the kthread itself and also from
1095  * inside spi_sync(); the queue extraction handling at the top of the
1096  * function should deal with this safely.
1097  */
1098 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1099 {
1100         unsigned long flags;
1101         bool was_busy = false;
1102         int ret;
1103
1104         /* Lock queue */
1105         spin_lock_irqsave(&master->queue_lock, flags);
1106
1107         /* Make sure we are not already running a message */
1108         if (master->cur_msg) {
1109                 spin_unlock_irqrestore(&master->queue_lock, flags);
1110                 return;
1111         }
1112
1113         /* If another context is idling the device then defer */
1114         if (master->idling) {
1115                 kthread_queue_work(&master->kworker, &master->pump_messages);
1116                 spin_unlock_irqrestore(&master->queue_lock, flags);
1117                 return;
1118         }
1119
1120         /* Check if the queue is idle */
1121         if (list_empty(&master->queue) || !master->running) {
1122                 if (!master->busy) {
1123                         spin_unlock_irqrestore(&master->queue_lock, flags);
1124                         return;
1125                 }
1126
1127                 /* Only do teardown in the thread */
1128                 if (!in_kthread) {
1129                         kthread_queue_work(&master->kworker,
1130                                            &master->pump_messages);
1131                         spin_unlock_irqrestore(&master->queue_lock, flags);
1132                         return;
1133                 }
1134
1135                 master->busy = false;
1136                 master->idling = true;
1137                 spin_unlock_irqrestore(&master->queue_lock, flags);
1138
1139                 kfree(master->dummy_rx);
1140                 master->dummy_rx = NULL;
1141                 kfree(master->dummy_tx);
1142                 master->dummy_tx = NULL;
1143                 if (master->unprepare_transfer_hardware &&
1144                     master->unprepare_transfer_hardware(master))
1145                         dev_err(&master->dev,
1146                                 "failed to unprepare transfer hardware\n");
1147                 if (master->auto_runtime_pm) {
1148                         pm_runtime_mark_last_busy(master->dev.parent);
1149                         pm_runtime_put_autosuspend(master->dev.parent);
1150                 }
1151                 trace_spi_master_idle(master);
1152
1153                 spin_lock_irqsave(&master->queue_lock, flags);
1154                 master->idling = false;
1155                 spin_unlock_irqrestore(&master->queue_lock, flags);
1156                 return;
1157         }
1158
1159         /* Extract head of queue */
1160         master->cur_msg =
1161                 list_first_entry(&master->queue, struct spi_message, queue);
1162
1163         list_del_init(&master->cur_msg->queue);
1164         if (master->busy)
1165                 was_busy = true;
1166         else
1167                 master->busy = true;
1168         spin_unlock_irqrestore(&master->queue_lock, flags);
1169
1170         mutex_lock(&master->io_mutex);
1171
1172         if (!was_busy && master->auto_runtime_pm) {
1173                 ret = pm_runtime_get_sync(master->dev.parent);
1174                 if (ret < 0) {
1175                         dev_err(&master->dev, "Failed to power device: %d\n",
1176                                 ret);
1177                         mutex_unlock(&master->io_mutex);
1178                         return;
1179                 }
1180         }
1181
1182         if (!was_busy)
1183                 trace_spi_master_busy(master);
1184
1185         if (!was_busy && master->prepare_transfer_hardware) {
1186                 ret = master->prepare_transfer_hardware(master);
1187                 if (ret) {
1188                         dev_err(&master->dev,
1189                                 "failed to prepare transfer hardware\n");
1190
1191                         if (master->auto_runtime_pm)
1192                                 pm_runtime_put(master->dev.parent);
1193                         mutex_unlock(&master->io_mutex);
1194                         return;
1195                 }
1196         }
1197
1198         trace_spi_message_start(master->cur_msg);
1199
1200         if (master->prepare_message) {
1201                 ret = master->prepare_message(master, master->cur_msg);
1202                 if (ret) {
1203                         dev_err(&master->dev,
1204                                 "failed to prepare message: %d\n", ret);
1205                         master->cur_msg->status = ret;
1206                         spi_finalize_current_message(master);
1207                         goto out;
1208                 }
1209                 master->cur_msg_prepared = true;
1210         }
1211
1212         ret = spi_map_msg(master, master->cur_msg);
1213         if (ret) {
1214                 master->cur_msg->status = ret;
1215                 spi_finalize_current_message(master);
1216                 goto out;
1217         }
1218
1219         ret = master->transfer_one_message(master, master->cur_msg);
1220         if (ret) {
1221                 dev_err(&master->dev,
1222                         "failed to transfer one message from queue\n");
1223                 goto out;
1224         }
1225
1226 out:
1227         mutex_unlock(&master->io_mutex);
1228
1229         /* Prod the scheduler in case transfer_one() was busy waiting */
1230         if (!ret)
1231                 cond_resched();
1232 }
1233
1234 /**
1235  * spi_pump_messages - kthread work function which processes spi message queue
1236  * @work: pointer to kthread work struct contained in the master struct
1237  */
1238 static void spi_pump_messages(struct kthread_work *work)
1239 {
1240         struct spi_master *master =
1241                 container_of(work, struct spi_master, pump_messages);
1242
1243         __spi_pump_messages(master, true);
1244 }
1245
1246 static int spi_init_queue(struct spi_master *master)
1247 {
1248         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1249
1250         master->running = false;
1251         master->busy = false;
1252
1253         kthread_init_worker(&master->kworker);
1254         master->kworker_task = kthread_run(kthread_worker_fn,
1255                                            &master->kworker, "%s",
1256                                            dev_name(&master->dev));
1257         if (IS_ERR(master->kworker_task)) {
1258                 dev_err(&master->dev, "failed to create message pump task\n");
1259                 return PTR_ERR(master->kworker_task);
1260         }
1261         kthread_init_work(&master->pump_messages, spi_pump_messages);
1262
1263         /*
1264          * Master config will indicate if this controller should run the
1265          * message pump with high (realtime) priority to reduce the transfer
1266          * latency on the bus by minimising the delay between a transfer
1267          * request and the scheduling of the message pump thread. Without this
1268          * setting the message pump thread will remain at default priority.
1269          */
1270         if (master->rt) {
1271                 dev_info(&master->dev,
1272                         "will run message pump with realtime priority\n");
1273                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1274         }
1275
1276         return 0;
1277 }
1278
1279 /**
1280  * spi_get_next_queued_message() - called by driver to check for queued
1281  * messages
1282  * @master: the master to check for queued messages
1283  *
1284  * If there are more messages in the queue, the next message is returned from
1285  * this call.
1286  *
1287  * Return: the next message in the queue, else NULL if the queue is empty.
1288  */
1289 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1290 {
1291         struct spi_message *next;
1292         unsigned long flags;
1293
1294         /* get a pointer to the next message, if any */
1295         spin_lock_irqsave(&master->queue_lock, flags);
1296         next = list_first_entry_or_null(&master->queue, struct spi_message,
1297                                         queue);
1298         spin_unlock_irqrestore(&master->queue_lock, flags);
1299
1300         return next;
1301 }
1302 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1303
1304 /**
1305  * spi_finalize_current_message() - the current message is complete
1306  * @master: the master to return the message to
1307  *
1308  * Called by the driver to notify the core that the message in the front of the
1309  * queue is complete and can be removed from the queue.
1310  */
1311 void spi_finalize_current_message(struct spi_master *master)
1312 {
1313         struct spi_message *mesg;
1314         unsigned long flags;
1315         int ret;
1316
1317         spin_lock_irqsave(&master->queue_lock, flags);
1318         mesg = master->cur_msg;
1319         spin_unlock_irqrestore(&master->queue_lock, flags);
1320
1321         spi_unmap_msg(master, mesg);
1322
1323         if (master->cur_msg_prepared && master->unprepare_message) {
1324                 ret = master->unprepare_message(master, mesg);
1325                 if (ret) {
1326                         dev_err(&master->dev,
1327                                 "failed to unprepare message: %d\n", ret);
1328                 }
1329         }
1330
1331         spin_lock_irqsave(&master->queue_lock, flags);
1332         master->cur_msg = NULL;
1333         master->cur_msg_prepared = false;
1334         kthread_queue_work(&master->kworker, &master->pump_messages);
1335         spin_unlock_irqrestore(&master->queue_lock, flags);
1336
1337         trace_spi_message_done(mesg);
1338
1339         mesg->state = NULL;
1340         if (mesg->complete)
1341                 mesg->complete(mesg->context);
1342 }
1343 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1344
1345 static int spi_start_queue(struct spi_master *master)
1346 {
1347         unsigned long flags;
1348
1349         spin_lock_irqsave(&master->queue_lock, flags);
1350
1351         if (master->running || master->busy) {
1352                 spin_unlock_irqrestore(&master->queue_lock, flags);
1353                 return -EBUSY;
1354         }
1355
1356         master->running = true;
1357         master->cur_msg = NULL;
1358         spin_unlock_irqrestore(&master->queue_lock, flags);
1359
1360         kthread_queue_work(&master->kworker, &master->pump_messages);
1361
1362         return 0;
1363 }
1364
1365 static int spi_stop_queue(struct spi_master *master)
1366 {
1367         unsigned long flags;
1368         unsigned limit = 500;
1369         int ret = 0;
1370
1371         spin_lock_irqsave(&master->queue_lock, flags);
1372
1373         /*
1374          * This is a bit lame, but is optimized for the common execution path.
1375          * A wait_queue on the master->busy could be used, but then the common
1376          * execution path (pump_messages) would be required to call wake_up or
1377          * friends on every SPI message. Do this instead.
1378          */
1379         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1380                 spin_unlock_irqrestore(&master->queue_lock, flags);
1381                 usleep_range(10000, 11000);
1382                 spin_lock_irqsave(&master->queue_lock, flags);
1383         }
1384
1385         if (!list_empty(&master->queue) || master->busy)
1386                 ret = -EBUSY;
1387         else
1388                 master->running = false;
1389
1390         spin_unlock_irqrestore(&master->queue_lock, flags);
1391
1392         if (ret) {
1393                 dev_warn(&master->dev,
1394                          "could not stop message queue\n");
1395                 return ret;
1396         }
1397         return ret;
1398 }
1399
1400 static int spi_destroy_queue(struct spi_master *master)
1401 {
1402         int ret;
1403
1404         ret = spi_stop_queue(master);
1405
1406         /*
1407          * kthread_flush_worker will block until all work is done.
1408          * If the reason that stop_queue timed out is that the work will never
1409          * finish, then it does no good to call flush/stop thread, so
1410          * return anyway.
1411          */
1412         if (ret) {
1413                 dev_err(&master->dev, "problem destroying queue\n");
1414                 return ret;
1415         }
1416
1417         kthread_flush_worker(&master->kworker);
1418         kthread_stop(master->kworker_task);
1419
1420         return 0;
1421 }
1422
1423 static int __spi_queued_transfer(struct spi_device *spi,
1424                                  struct spi_message *msg,
1425                                  bool need_pump)
1426 {
1427         struct spi_master *master = spi->master;
1428         unsigned long flags;
1429
1430         spin_lock_irqsave(&master->queue_lock, flags);
1431
1432         if (!master->running) {
1433                 spin_unlock_irqrestore(&master->queue_lock, flags);
1434                 return -ESHUTDOWN;
1435         }
1436         msg->actual_length = 0;
1437         msg->status = -EINPROGRESS;
1438
1439         list_add_tail(&msg->queue, &master->queue);
1440         if (!master->busy && need_pump)
1441                 kthread_queue_work(&master->kworker, &master->pump_messages);
1442
1443         spin_unlock_irqrestore(&master->queue_lock, flags);
1444         return 0;
1445 }
1446
1447 /**
1448  * spi_queued_transfer - transfer function for queued transfers
1449  * @spi: spi device which is requesting transfer
1450  * @msg: spi message which is to handled is queued to driver queue
1451  *
1452  * Return: zero on success, else a negative error code.
1453  */
1454 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1455 {
1456         return __spi_queued_transfer(spi, msg, true);
1457 }
1458
1459 static int spi_master_initialize_queue(struct spi_master *master)
1460 {
1461         int ret;
1462
1463         master->transfer = spi_queued_transfer;
1464         if (!master->transfer_one_message)
1465                 master->transfer_one_message = spi_transfer_one_message;
1466
1467         /* Initialize and start queue */
1468         ret = spi_init_queue(master);
1469         if (ret) {
1470                 dev_err(&master->dev, "problem initializing queue\n");
1471                 goto err_init_queue;
1472         }
1473         master->queued = true;
1474         ret = spi_start_queue(master);
1475         if (ret) {
1476                 dev_err(&master->dev, "problem starting queue\n");
1477                 goto err_start_queue;
1478         }
1479
1480         return 0;
1481
1482 err_start_queue:
1483         spi_destroy_queue(master);
1484 err_init_queue:
1485         return ret;
1486 }
1487
1488 /*-------------------------------------------------------------------------*/
1489
1490 #if defined(CONFIG_OF)
1491 static struct spi_device *
1492 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1493 {
1494         struct spi_device *spi;
1495         int rc;
1496         u32 value;
1497
1498         /* Alloc an spi_device */
1499         spi = spi_alloc_device(master);
1500         if (!spi) {
1501                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1502                         nc->full_name);
1503                 rc = -ENOMEM;
1504                 goto err_out;
1505         }
1506
1507         /* Select device driver */
1508         rc = of_modalias_node(nc, spi->modalias,
1509                                 sizeof(spi->modalias));
1510         if (rc < 0) {
1511                 dev_err(&master->dev, "cannot find modalias for %s\n",
1512                         nc->full_name);
1513                 goto err_out;
1514         }
1515
1516         /* Device address */
1517         rc = of_property_read_u32(nc, "reg", &value);
1518         if (rc) {
1519                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1520                         nc->full_name, rc);
1521                 goto err_out;
1522         }
1523         spi->chip_select = value;
1524
1525         /* Mode (clock phase/polarity/etc.) */
1526         if (of_find_property(nc, "spi-cpha", NULL))
1527                 spi->mode |= SPI_CPHA;
1528         if (of_find_property(nc, "spi-cpol", NULL))
1529                 spi->mode |= SPI_CPOL;
1530         if (of_find_property(nc, "spi-cs-high", NULL))
1531                 spi->mode |= SPI_CS_HIGH;
1532         if (of_find_property(nc, "spi-3wire", NULL))
1533                 spi->mode |= SPI_3WIRE;
1534         if (of_find_property(nc, "spi-lsb-first", NULL))
1535                 spi->mode |= SPI_LSB_FIRST;
1536
1537         /* Device DUAL/QUAD mode */
1538         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1539                 switch (value) {
1540                 case 1:
1541                         break;
1542                 case 2:
1543                         spi->mode |= SPI_TX_DUAL;
1544                         break;
1545                 case 4:
1546                         spi->mode |= SPI_TX_QUAD;
1547                         break;
1548                 default:
1549                         dev_warn(&master->dev,
1550                                 "spi-tx-bus-width %d not supported\n",
1551                                 value);
1552                         break;
1553                 }
1554         }
1555
1556         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1557                 switch (value) {
1558                 case 1:
1559                         break;
1560                 case 2:
1561                         spi->mode |= SPI_RX_DUAL;
1562                         break;
1563                 case 4:
1564                         spi->mode |= SPI_RX_QUAD;
1565                         break;
1566                 default:
1567                         dev_warn(&master->dev,
1568                                 "spi-rx-bus-width %d not supported\n",
1569                                 value);
1570                         break;
1571                 }
1572         }
1573
1574         /* Device speed */
1575         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1576         if (rc) {
1577                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1578                         nc->full_name, rc);
1579                 goto err_out;
1580         }
1581         spi->max_speed_hz = value;
1582
1583         /* Store a pointer to the node in the device structure */
1584         of_node_get(nc);
1585         spi->dev.of_node = nc;
1586
1587         /* Register the new device */
1588         rc = spi_add_device(spi);
1589         if (rc) {
1590                 dev_err(&master->dev, "spi_device register error %s\n",
1591                         nc->full_name);
1592                 goto err_out;
1593         }
1594
1595         return spi;
1596
1597 err_out:
1598         spi_dev_put(spi);
1599         return ERR_PTR(rc);
1600 }
1601
1602 /**
1603  * of_register_spi_devices() - Register child devices onto the SPI bus
1604  * @master:     Pointer to spi_master device
1605  *
1606  * Registers an spi_device for each child node of master node which has a 'reg'
1607  * property.
1608  */
1609 static void of_register_spi_devices(struct spi_master *master)
1610 {
1611         struct spi_device *spi;
1612         struct device_node *nc;
1613
1614         if (!master->dev.of_node)
1615                 return;
1616
1617         for_each_available_child_of_node(master->dev.of_node, nc) {
1618                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1619                         continue;
1620                 spi = of_register_spi_device(master, nc);
1621                 if (IS_ERR(spi)) {
1622                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1623                                 nc->full_name);
1624                         of_node_clear_flag(nc, OF_POPULATED);
1625                 }
1626         }
1627 }
1628 #else
1629 static void of_register_spi_devices(struct spi_master *master) { }
1630 #endif
1631
1632 #ifdef CONFIG_ACPI
1633 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1634 {
1635         struct spi_device *spi = data;
1636         struct spi_master *master = spi->master;
1637
1638         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1639                 struct acpi_resource_spi_serialbus *sb;
1640
1641                 sb = &ares->data.spi_serial_bus;
1642                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1643                         /*
1644                          * ACPI DeviceSelection numbering is handled by the
1645                          * host controller driver in Windows and can vary
1646                          * from driver to driver. In Linux we always expect
1647                          * 0 .. max - 1 so we need to ask the driver to
1648                          * translate between the two schemes.
1649                          */
1650                         if (master->fw_translate_cs) {
1651                                 int cs = master->fw_translate_cs(master,
1652                                                 sb->device_selection);
1653                                 if (cs < 0)
1654                                         return cs;
1655                                 spi->chip_select = cs;
1656                         } else {
1657                                 spi->chip_select = sb->device_selection;
1658                         }
1659
1660                         spi->max_speed_hz = sb->connection_speed;
1661
1662                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1663                                 spi->mode |= SPI_CPHA;
1664                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1665                                 spi->mode |= SPI_CPOL;
1666                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1667                                 spi->mode |= SPI_CS_HIGH;
1668                 }
1669         } else if (spi->irq < 0) {
1670                 struct resource r;
1671
1672                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1673                         spi->irq = r.start;
1674         }
1675
1676         /* Always tell the ACPI core to skip this resource */
1677         return 1;
1678 }
1679
1680 static acpi_status acpi_register_spi_device(struct spi_master *master,
1681                                             struct acpi_device *adev)
1682 {
1683         struct list_head resource_list;
1684         struct spi_device *spi;
1685         int ret;
1686
1687         if (acpi_bus_get_status(adev) || !adev->status.present ||
1688             acpi_device_enumerated(adev))
1689                 return AE_OK;
1690
1691         spi = spi_alloc_device(master);
1692         if (!spi) {
1693                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1694                         dev_name(&adev->dev));
1695                 return AE_NO_MEMORY;
1696         }
1697
1698         ACPI_COMPANION_SET(&spi->dev, adev);
1699         spi->irq = -1;
1700
1701         INIT_LIST_HEAD(&resource_list);
1702         ret = acpi_dev_get_resources(adev, &resource_list,
1703                                      acpi_spi_add_resource, spi);
1704         acpi_dev_free_resource_list(&resource_list);
1705
1706         if (ret < 0 || !spi->max_speed_hz) {
1707                 spi_dev_put(spi);
1708                 return AE_OK;
1709         }
1710
1711         if (spi->irq < 0)
1712                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1713
1714         acpi_device_set_enumerated(adev);
1715
1716         adev->power.flags.ignore_parent = true;
1717         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1718         if (spi_add_device(spi)) {
1719                 adev->power.flags.ignore_parent = false;
1720                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1721                         dev_name(&adev->dev));
1722                 spi_dev_put(spi);
1723         }
1724
1725         return AE_OK;
1726 }
1727
1728 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1729                                        void *data, void **return_value)
1730 {
1731         struct spi_master *master = data;
1732         struct acpi_device *adev;
1733
1734         if (acpi_bus_get_device(handle, &adev))
1735                 return AE_OK;
1736
1737         return acpi_register_spi_device(master, adev);
1738 }
1739
1740 static void acpi_register_spi_devices(struct spi_master *master)
1741 {
1742         acpi_status status;
1743         acpi_handle handle;
1744
1745         handle = ACPI_HANDLE(master->dev.parent);
1746         if (!handle)
1747                 return;
1748
1749         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1750                                      acpi_spi_add_device, NULL,
1751                                      master, NULL);
1752         if (ACPI_FAILURE(status))
1753                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1754 }
1755 #else
1756 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1757 #endif /* CONFIG_ACPI */
1758
1759 static void spi_master_release(struct device *dev)
1760 {
1761         struct spi_master *master;
1762
1763         master = container_of(dev, struct spi_master, dev);
1764         kfree(master);
1765 }
1766
1767 static struct class spi_master_class = {
1768         .name           = "spi_master",
1769         .owner          = THIS_MODULE,
1770         .dev_release    = spi_master_release,
1771         .dev_groups     = spi_master_groups,
1772 };
1773
1774
1775 /**
1776  * spi_alloc_master - allocate SPI master controller
1777  * @dev: the controller, possibly using the platform_bus
1778  * @size: how much zeroed driver-private data to allocate; the pointer to this
1779  *      memory is in the driver_data field of the returned device,
1780  *      accessible with spi_master_get_devdata().
1781  * Context: can sleep
1782  *
1783  * This call is used only by SPI master controller drivers, which are the
1784  * only ones directly touching chip registers.  It's how they allocate
1785  * an spi_master structure, prior to calling spi_register_master().
1786  *
1787  * This must be called from context that can sleep.
1788  *
1789  * The caller is responsible for assigning the bus number and initializing
1790  * the master's methods before calling spi_register_master(); and (after errors
1791  * adding the device) calling spi_master_put() to prevent a memory leak.
1792  *
1793  * Return: the SPI master structure on success, else NULL.
1794  */
1795 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1796 {
1797         struct spi_master       *master;
1798
1799         if (!dev)
1800                 return NULL;
1801
1802         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1803         if (!master)
1804                 return NULL;
1805
1806         device_initialize(&master->dev);
1807         master->bus_num = -1;
1808         master->num_chipselect = 1;
1809         master->dev.class = &spi_master_class;
1810         master->dev.parent = dev;
1811         pm_suspend_ignore_children(&master->dev, true);
1812         spi_master_set_devdata(master, &master[1]);
1813
1814         return master;
1815 }
1816 EXPORT_SYMBOL_GPL(spi_alloc_master);
1817
1818 #ifdef CONFIG_OF
1819 static int of_spi_register_master(struct spi_master *master)
1820 {
1821         int nb, i, *cs;
1822         struct device_node *np = master->dev.of_node;
1823
1824         if (!np)
1825                 return 0;
1826
1827         nb = of_gpio_named_count(np, "cs-gpios");
1828         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1829
1830         /* Return error only for an incorrectly formed cs-gpios property */
1831         if (nb == 0 || nb == -ENOENT)
1832                 return 0;
1833         else if (nb < 0)
1834                 return nb;
1835
1836         cs = devm_kzalloc(&master->dev,
1837                           sizeof(int) * master->num_chipselect,
1838                           GFP_KERNEL);
1839         master->cs_gpios = cs;
1840
1841         if (!master->cs_gpios)
1842                 return -ENOMEM;
1843
1844         for (i = 0; i < master->num_chipselect; i++)
1845                 cs[i] = -ENOENT;
1846
1847         for (i = 0; i < nb; i++)
1848                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1849
1850         return 0;
1851 }
1852 #else
1853 static int of_spi_register_master(struct spi_master *master)
1854 {
1855         return 0;
1856 }
1857 #endif
1858
1859 /**
1860  * spi_register_master - register SPI master controller
1861  * @master: initialized master, originally from spi_alloc_master()
1862  * Context: can sleep
1863  *
1864  * SPI master controllers connect to their drivers using some non-SPI bus,
1865  * such as the platform bus.  The final stage of probe() in that code
1866  * includes calling spi_register_master() to hook up to this SPI bus glue.
1867  *
1868  * SPI controllers use board specific (often SOC specific) bus numbers,
1869  * and board-specific addressing for SPI devices combines those numbers
1870  * with chip select numbers.  Since SPI does not directly support dynamic
1871  * device identification, boards need configuration tables telling which
1872  * chip is at which address.
1873  *
1874  * This must be called from context that can sleep.  It returns zero on
1875  * success, else a negative error code (dropping the master's refcount).
1876  * After a successful return, the caller is responsible for calling
1877  * spi_unregister_master().
1878  *
1879  * Return: zero on success, else a negative error code.
1880  */
1881 int spi_register_master(struct spi_master *master)
1882 {
1883         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1884         struct device           *dev = master->dev.parent;
1885         struct boardinfo        *bi;
1886         int                     status = -ENODEV;
1887         int                     dynamic = 0;
1888
1889         if (!dev)
1890                 return -ENODEV;
1891
1892         status = of_spi_register_master(master);
1893         if (status)
1894                 return status;
1895
1896         /* even if it's just one always-selected device, there must
1897          * be at least one chipselect
1898          */
1899         if (master->num_chipselect == 0)
1900                 return -EINVAL;
1901
1902         if ((master->bus_num < 0) && master->dev.of_node)
1903                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1904
1905         /* convention:  dynamically assigned bus IDs count down from the max */
1906         if (master->bus_num < 0) {
1907                 /* FIXME switch to an IDR based scheme, something like
1908                  * I2C now uses, so we can't run out of "dynamic" IDs
1909                  */
1910                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1911                 dynamic = 1;
1912         }
1913
1914         INIT_LIST_HEAD(&master->queue);
1915         spin_lock_init(&master->queue_lock);
1916         spin_lock_init(&master->bus_lock_spinlock);
1917         mutex_init(&master->bus_lock_mutex);
1918         mutex_init(&master->io_mutex);
1919         master->bus_lock_flag = 0;
1920         init_completion(&master->xfer_completion);
1921         if (!master->max_dma_len)
1922                 master->max_dma_len = INT_MAX;
1923
1924         /* register the device, then userspace will see it.
1925          * registration fails if the bus ID is in use.
1926          */
1927         dev_set_name(&master->dev, "spi%u", master->bus_num);
1928         status = device_add(&master->dev);
1929         if (status < 0)
1930                 goto done;
1931         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1932                         dynamic ? " (dynamic)" : "");
1933
1934         /* If we're using a queued driver, start the queue */
1935         if (master->transfer)
1936                 dev_info(dev, "master is unqueued, this is deprecated\n");
1937         else {
1938                 status = spi_master_initialize_queue(master);
1939                 if (status) {
1940                         device_del(&master->dev);
1941                         goto done;
1942                 }
1943         }
1944         /* add statistics */
1945         spin_lock_init(&master->statistics.lock);
1946
1947         mutex_lock(&board_lock);
1948         list_add_tail(&master->list, &spi_master_list);
1949         list_for_each_entry(bi, &board_list, list)
1950                 spi_match_master_to_boardinfo(master, &bi->board_info);
1951         mutex_unlock(&board_lock);
1952
1953         /* Register devices from the device tree and ACPI */
1954         of_register_spi_devices(master);
1955         acpi_register_spi_devices(master);
1956 done:
1957         return status;
1958 }
1959 EXPORT_SYMBOL_GPL(spi_register_master);
1960
1961 static void devm_spi_unregister(struct device *dev, void *res)
1962 {
1963         spi_unregister_master(*(struct spi_master **)res);
1964 }
1965
1966 /**
1967  * dev_spi_register_master - register managed SPI master controller
1968  * @dev:    device managing SPI master
1969  * @master: initialized master, originally from spi_alloc_master()
1970  * Context: can sleep
1971  *
1972  * Register a SPI device as with spi_register_master() which will
1973  * automatically be unregister
1974  *
1975  * Return: zero on success, else a negative error code.
1976  */
1977 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1978 {
1979         struct spi_master **ptr;
1980         int ret;
1981
1982         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1983         if (!ptr)
1984                 return -ENOMEM;
1985
1986         ret = spi_register_master(master);
1987         if (!ret) {
1988                 *ptr = master;
1989                 devres_add(dev, ptr);
1990         } else {
1991                 devres_free(ptr);
1992         }
1993
1994         return ret;
1995 }
1996 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1997
1998 static int __unregister(struct device *dev, void *null)
1999 {
2000         spi_unregister_device(to_spi_device(dev));
2001         return 0;
2002 }
2003
2004 /**
2005  * spi_unregister_master - unregister SPI master controller
2006  * @master: the master being unregistered
2007  * Context: can sleep
2008  *
2009  * This call is used only by SPI master controller drivers, which are the
2010  * only ones directly touching chip registers.
2011  *
2012  * This must be called from context that can sleep.
2013  */
2014 void spi_unregister_master(struct spi_master *master)
2015 {
2016         int dummy;
2017
2018         if (master->queued) {
2019                 if (spi_destroy_queue(master))
2020                         dev_err(&master->dev, "queue remove failed\n");
2021         }
2022
2023         mutex_lock(&board_lock);
2024         list_del(&master->list);
2025         mutex_unlock(&board_lock);
2026
2027         dummy = device_for_each_child(&master->dev, NULL, __unregister);
2028         device_unregister(&master->dev);
2029 }
2030 EXPORT_SYMBOL_GPL(spi_unregister_master);
2031
2032 int spi_master_suspend(struct spi_master *master)
2033 {
2034         int ret;
2035
2036         /* Basically no-ops for non-queued masters */
2037         if (!master->queued)
2038                 return 0;
2039
2040         ret = spi_stop_queue(master);
2041         if (ret)
2042                 dev_err(&master->dev, "queue stop failed\n");
2043
2044         return ret;
2045 }
2046 EXPORT_SYMBOL_GPL(spi_master_suspend);
2047
2048 int spi_master_resume(struct spi_master *master)
2049 {
2050         int ret;
2051
2052         if (!master->queued)
2053                 return 0;
2054
2055         ret = spi_start_queue(master);
2056         if (ret)
2057                 dev_err(&master->dev, "queue restart failed\n");
2058
2059         return ret;
2060 }
2061 EXPORT_SYMBOL_GPL(spi_master_resume);
2062
2063 static int __spi_master_match(struct device *dev, const void *data)
2064 {
2065         struct spi_master *m;
2066         const u16 *bus_num = data;
2067
2068         m = container_of(dev, struct spi_master, dev);
2069         return m->bus_num == *bus_num;
2070 }
2071
2072 /**
2073  * spi_busnum_to_master - look up master associated with bus_num
2074  * @bus_num: the master's bus number
2075  * Context: can sleep
2076  *
2077  * This call may be used with devices that are registered after
2078  * arch init time.  It returns a refcounted pointer to the relevant
2079  * spi_master (which the caller must release), or NULL if there is
2080  * no such master registered.
2081  *
2082  * Return: the SPI master structure on success, else NULL.
2083  */
2084 struct spi_master *spi_busnum_to_master(u16 bus_num)
2085 {
2086         struct device           *dev;
2087         struct spi_master       *master = NULL;
2088
2089         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2090                                 __spi_master_match);
2091         if (dev)
2092                 master = container_of(dev, struct spi_master, dev);
2093         /* reference got in class_find_device */
2094         return master;
2095 }
2096 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2097
2098 /*-------------------------------------------------------------------------*/
2099
2100 /* Core methods for SPI resource management */
2101
2102 /**
2103  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2104  *                 during the processing of a spi_message while using
2105  *                 spi_transfer_one
2106  * @spi:     the spi device for which we allocate memory
2107  * @release: the release code to execute for this resource
2108  * @size:    size to alloc and return
2109  * @gfp:     GFP allocation flags
2110  *
2111  * Return: the pointer to the allocated data
2112  *
2113  * This may get enhanced in the future to allocate from a memory pool
2114  * of the @spi_device or @spi_master to avoid repeated allocations.
2115  */
2116 void *spi_res_alloc(struct spi_device *spi,
2117                     spi_res_release_t release,
2118                     size_t size, gfp_t gfp)
2119 {
2120         struct spi_res *sres;
2121
2122         sres = kzalloc(sizeof(*sres) + size, gfp);
2123         if (!sres)
2124                 return NULL;
2125
2126         INIT_LIST_HEAD(&sres->entry);
2127         sres->release = release;
2128
2129         return sres->data;
2130 }
2131 EXPORT_SYMBOL_GPL(spi_res_alloc);
2132
2133 /**
2134  * spi_res_free - free an spi resource
2135  * @res: pointer to the custom data of a resource
2136  *
2137  */
2138 void spi_res_free(void *res)
2139 {
2140         struct spi_res *sres = container_of(res, struct spi_res, data);
2141
2142         if (!res)
2143                 return;
2144
2145         WARN_ON(!list_empty(&sres->entry));
2146         kfree(sres);
2147 }
2148 EXPORT_SYMBOL_GPL(spi_res_free);
2149
2150 /**
2151  * spi_res_add - add a spi_res to the spi_message
2152  * @message: the spi message
2153  * @res:     the spi_resource
2154  */
2155 void spi_res_add(struct spi_message *message, void *res)
2156 {
2157         struct spi_res *sres = container_of(res, struct spi_res, data);
2158
2159         WARN_ON(!list_empty(&sres->entry));
2160         list_add_tail(&sres->entry, &message->resources);
2161 }
2162 EXPORT_SYMBOL_GPL(spi_res_add);
2163
2164 /**
2165  * spi_res_release - release all spi resources for this message
2166  * @master:  the @spi_master
2167  * @message: the @spi_message
2168  */
2169 void spi_res_release(struct spi_master *master,
2170                      struct spi_message *message)
2171 {
2172         struct spi_res *res;
2173
2174         while (!list_empty(&message->resources)) {
2175                 res = list_last_entry(&message->resources,
2176                                       struct spi_res, entry);
2177
2178                 if (res->release)
2179                         res->release(master, message, res->data);
2180
2181                 list_del(&res->entry);
2182
2183                 kfree(res);
2184         }
2185 }
2186 EXPORT_SYMBOL_GPL(spi_res_release);
2187
2188 /*-------------------------------------------------------------------------*/
2189
2190 /* Core methods for spi_message alterations */
2191
2192 static void __spi_replace_transfers_release(struct spi_master *master,
2193                                             struct spi_message *msg,
2194                                             void *res)
2195 {
2196         struct spi_replaced_transfers *rxfer = res;
2197         size_t i;
2198
2199         /* call extra callback if requested */
2200         if (rxfer->release)
2201                 rxfer->release(master, msg, res);
2202
2203         /* insert replaced transfers back into the message */
2204         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2205
2206         /* remove the formerly inserted entries */
2207         for (i = 0; i < rxfer->inserted; i++)
2208                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2209 }
2210
2211 /**
2212  * spi_replace_transfers - replace transfers with several transfers
2213  *                         and register change with spi_message.resources
2214  * @msg:           the spi_message we work upon
2215  * @xfer_first:    the first spi_transfer we want to replace
2216  * @remove:        number of transfers to remove
2217  * @insert:        the number of transfers we want to insert instead
2218  * @release:       extra release code necessary in some circumstances
2219  * @extradatasize: extra data to allocate (with alignment guarantees
2220  *                 of struct @spi_transfer)
2221  * @gfp:           gfp flags
2222  *
2223  * Returns: pointer to @spi_replaced_transfers,
2224  *          PTR_ERR(...) in case of errors.
2225  */
2226 struct spi_replaced_transfers *spi_replace_transfers(
2227         struct spi_message *msg,
2228         struct spi_transfer *xfer_first,
2229         size_t remove,
2230         size_t insert,
2231         spi_replaced_release_t release,
2232         size_t extradatasize,
2233         gfp_t gfp)
2234 {
2235         struct spi_replaced_transfers *rxfer;
2236         struct spi_transfer *xfer;
2237         size_t i;
2238
2239         /* allocate the structure using spi_res */
2240         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2241                               insert * sizeof(struct spi_transfer)
2242                               + sizeof(struct spi_replaced_transfers)
2243                               + extradatasize,
2244                               gfp);
2245         if (!rxfer)
2246                 return ERR_PTR(-ENOMEM);
2247
2248         /* the release code to invoke before running the generic release */
2249         rxfer->release = release;
2250
2251         /* assign extradata */
2252         if (extradatasize)
2253                 rxfer->extradata =
2254                         &rxfer->inserted_transfers[insert];
2255
2256         /* init the replaced_transfers list */
2257         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2258
2259         /* assign the list_entry after which we should reinsert
2260          * the @replaced_transfers - it may be spi_message.messages!
2261          */
2262         rxfer->replaced_after = xfer_first->transfer_list.prev;
2263
2264         /* remove the requested number of transfers */
2265         for (i = 0; i < remove; i++) {
2266                 /* if the entry after replaced_after it is msg->transfers
2267                  * then we have been requested to remove more transfers
2268                  * than are in the list
2269                  */
2270                 if (rxfer->replaced_after->next == &msg->transfers) {
2271                         dev_err(&msg->spi->dev,
2272                                 "requested to remove more spi_transfers than are available\n");
2273                         /* insert replaced transfers back into the message */
2274                         list_splice(&rxfer->replaced_transfers,
2275                                     rxfer->replaced_after);
2276
2277                         /* free the spi_replace_transfer structure */
2278                         spi_res_free(rxfer);
2279
2280                         /* and return with an error */
2281                         return ERR_PTR(-EINVAL);
2282                 }
2283
2284                 /* remove the entry after replaced_after from list of
2285                  * transfers and add it to list of replaced_transfers
2286                  */
2287                 list_move_tail(rxfer->replaced_after->next,
2288                                &rxfer->replaced_transfers);
2289         }
2290
2291         /* create copy of the given xfer with identical settings
2292          * based on the first transfer to get removed
2293          */
2294         for (i = 0; i < insert; i++) {
2295                 /* we need to run in reverse order */
2296                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2297
2298                 /* copy all spi_transfer data */
2299                 memcpy(xfer, xfer_first, sizeof(*xfer));
2300
2301                 /* add to list */
2302                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2303
2304                 /* clear cs_change and delay_usecs for all but the last */
2305                 if (i) {
2306                         xfer->cs_change = false;
2307                         xfer->delay_usecs = 0;
2308                 }
2309         }
2310
2311         /* set up inserted */
2312         rxfer->inserted = insert;
2313
2314         /* and register it with spi_res/spi_message */
2315         spi_res_add(msg, rxfer);
2316
2317         return rxfer;
2318 }
2319 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2320
2321 static int __spi_split_transfer_maxsize(struct spi_master *master,
2322                                         struct spi_message *msg,
2323                                         struct spi_transfer **xferp,
2324                                         size_t maxsize,
2325                                         gfp_t gfp)
2326 {
2327         struct spi_transfer *xfer = *xferp, *xfers;
2328         struct spi_replaced_transfers *srt;
2329         size_t offset;
2330         size_t count, i;
2331
2332         /* warn once about this fact that we are splitting a transfer */
2333         dev_warn_once(&msg->spi->dev,
2334                       "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2335                       xfer->len, maxsize);
2336
2337         /* calculate how many we have to replace */
2338         count = DIV_ROUND_UP(xfer->len, maxsize);
2339
2340         /* create replacement */
2341         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2342         if (IS_ERR(srt))
2343                 return PTR_ERR(srt);
2344         xfers = srt->inserted_transfers;
2345
2346         /* now handle each of those newly inserted spi_transfers
2347          * note that the replacements spi_transfers all are preset
2348          * to the same values as *xferp, so tx_buf, rx_buf and len
2349          * are all identical (as well as most others)
2350          * so we just have to fix up len and the pointers.
2351          *
2352          * this also includes support for the depreciated
2353          * spi_message.is_dma_mapped interface
2354          */
2355
2356         /* the first transfer just needs the length modified, so we
2357          * run it outside the loop
2358          */
2359         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2360
2361         /* all the others need rx_buf/tx_buf also set */
2362         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2363                 /* update rx_buf, tx_buf and dma */
2364                 if (xfers[i].rx_buf)
2365                         xfers[i].rx_buf += offset;
2366                 if (xfers[i].rx_dma)
2367                         xfers[i].rx_dma += offset;
2368                 if (xfers[i].tx_buf)
2369                         xfers[i].tx_buf += offset;
2370                 if (xfers[i].tx_dma)
2371                         xfers[i].tx_dma += offset;
2372
2373                 /* update length */
2374                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2375         }
2376
2377         /* we set up xferp to the last entry we have inserted,
2378          * so that we skip those already split transfers
2379          */
2380         *xferp = &xfers[count - 1];
2381
2382         /* increment statistics counters */
2383         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2384                                        transfers_split_maxsize);
2385         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2386                                        transfers_split_maxsize);
2387
2388         return 0;
2389 }
2390
2391 /**
2392  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2393  *                              when an individual transfer exceeds a
2394  *                              certain size
2395  * @master:    the @spi_master for this transfer
2396  * @msg:   the @spi_message to transform
2397  * @maxsize:  the maximum when to apply this
2398  * @gfp: GFP allocation flags
2399  *
2400  * Return: status of transformation
2401  */
2402 int spi_split_transfers_maxsize(struct spi_master *master,
2403                                 struct spi_message *msg,
2404                                 size_t maxsize,
2405                                 gfp_t gfp)
2406 {
2407         struct spi_transfer *xfer;
2408         int ret;
2409
2410         /* iterate over the transfer_list,
2411          * but note that xfer is advanced to the last transfer inserted
2412          * to avoid checking sizes again unnecessarily (also xfer does
2413          * potentiall belong to a different list by the time the
2414          * replacement has happened
2415          */
2416         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2417                 if (xfer->len > maxsize) {
2418                         ret = __spi_split_transfer_maxsize(
2419                                 master, msg, &xfer, maxsize, gfp);
2420                         if (ret)
2421                                 return ret;
2422                 }
2423         }
2424
2425         return 0;
2426 }
2427 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2428
2429 /*-------------------------------------------------------------------------*/
2430
2431 /* Core methods for SPI master protocol drivers.  Some of the
2432  * other core methods are currently defined as inline functions.
2433  */
2434
2435 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2436 {
2437         if (master->bits_per_word_mask) {
2438                 /* Only 32 bits fit in the mask */
2439                 if (bits_per_word > 32)
2440                         return -EINVAL;
2441                 if (!(master->bits_per_word_mask &
2442                                 SPI_BPW_MASK(bits_per_word)))
2443                         return -EINVAL;
2444         }
2445
2446         return 0;
2447 }
2448
2449 /**
2450  * spi_setup - setup SPI mode and clock rate
2451  * @spi: the device whose settings are being modified
2452  * Context: can sleep, and no requests are queued to the device
2453  *
2454  * SPI protocol drivers may need to update the transfer mode if the
2455  * device doesn't work with its default.  They may likewise need
2456  * to update clock rates or word sizes from initial values.  This function
2457  * changes those settings, and must be called from a context that can sleep.
2458  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2459  * effect the next time the device is selected and data is transferred to
2460  * or from it.  When this function returns, the spi device is deselected.
2461  *
2462  * Note that this call will fail if the protocol driver specifies an option
2463  * that the underlying controller or its driver does not support.  For
2464  * example, not all hardware supports wire transfers using nine bit words,
2465  * LSB-first wire encoding, or active-high chipselects.
2466  *
2467  * Return: zero on success, else a negative error code.
2468  */
2469 int spi_setup(struct spi_device *spi)
2470 {
2471         unsigned        bad_bits, ugly_bits;
2472         int             status;
2473
2474         /* check mode to prevent that DUAL and QUAD set at the same time
2475          */
2476         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2477                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2478                 dev_err(&spi->dev,
2479                 "setup: can not select dual and quad at the same time\n");
2480                 return -EINVAL;
2481         }
2482         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2483          */
2484         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2485                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2486                 return -EINVAL;
2487         /* help drivers fail *cleanly* when they need options
2488          * that aren't supported with their current master
2489          */
2490         bad_bits = spi->mode & ~spi->master->mode_bits;
2491         ugly_bits = bad_bits &
2492                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2493         if (ugly_bits) {
2494                 dev_warn(&spi->dev,
2495                          "setup: ignoring unsupported mode bits %x\n",
2496                          ugly_bits);
2497                 spi->mode &= ~ugly_bits;
2498                 bad_bits &= ~ugly_bits;
2499         }
2500         if (bad_bits) {
2501                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2502                         bad_bits);
2503                 return -EINVAL;
2504         }
2505
2506         if (!spi->bits_per_word)
2507                 spi->bits_per_word = 8;
2508
2509         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2510         if (status)
2511                 return status;
2512
2513         if (!spi->max_speed_hz)
2514                 spi->max_speed_hz = spi->master->max_speed_hz;
2515
2516         if (spi->master->setup)
2517                 status = spi->master->setup(spi);
2518
2519         spi_set_cs(spi, false);
2520
2521         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2522                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2523                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2524                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2525                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2526                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2527                         spi->bits_per_word, spi->max_speed_hz,
2528                         status);
2529
2530         return status;
2531 }
2532 EXPORT_SYMBOL_GPL(spi_setup);
2533
2534 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2535 {
2536         struct spi_master *master = spi->master;
2537         struct spi_transfer *xfer;
2538         int w_size;
2539
2540         if (list_empty(&message->transfers))
2541                 return -EINVAL;
2542
2543         /* Half-duplex links include original MicroWire, and ones with
2544          * only one data pin like SPI_3WIRE (switches direction) or where
2545          * either MOSI or MISO is missing.  They can also be caused by
2546          * software limitations.
2547          */
2548         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2549                         || (spi->mode & SPI_3WIRE)) {
2550                 unsigned flags = master->flags;
2551
2552                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2553                         if (xfer->rx_buf && xfer->tx_buf)
2554                                 return -EINVAL;
2555                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2556                                 return -EINVAL;
2557                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2558                                 return -EINVAL;
2559                 }
2560         }
2561
2562         /**
2563          * Set transfer bits_per_word and max speed as spi device default if
2564          * it is not set for this transfer.
2565          * Set transfer tx_nbits and rx_nbits as single transfer default
2566          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2567          */
2568         message->frame_length = 0;
2569         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2570                 message->frame_length += xfer->len;
2571                 if (!xfer->bits_per_word)
2572                         xfer->bits_per_word = spi->bits_per_word;
2573
2574                 if (!xfer->speed_hz)
2575                         xfer->speed_hz = spi->max_speed_hz;
2576                 if (!xfer->speed_hz)
2577                         xfer->speed_hz = master->max_speed_hz;
2578
2579                 if (master->max_speed_hz &&
2580                     xfer->speed_hz > master->max_speed_hz)
2581                         xfer->speed_hz = master->max_speed_hz;
2582
2583                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2584                         return -EINVAL;
2585
2586                 /*
2587                  * SPI transfer length should be multiple of SPI word size
2588                  * where SPI word size should be power-of-two multiple
2589                  */
2590                 if (xfer->bits_per_word <= 8)
2591                         w_size = 1;
2592                 else if (xfer->bits_per_word <= 16)
2593                         w_size = 2;
2594                 else
2595                         w_size = 4;
2596
2597                 /* No partial transfers accepted */
2598                 if (xfer->len % w_size)
2599                         return -EINVAL;
2600
2601                 if (xfer->speed_hz && master->min_speed_hz &&
2602                     xfer->speed_hz < master->min_speed_hz)
2603                         return -EINVAL;
2604
2605                 if (xfer->tx_buf && !xfer->tx_nbits)
2606                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2607                 if (xfer->rx_buf && !xfer->rx_nbits)
2608                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2609                 /* check transfer tx/rx_nbits:
2610                  * 1. check the value matches one of single, dual and quad
2611                  * 2. check tx/rx_nbits match the mode in spi_device
2612                  */
2613                 if (xfer->tx_buf) {
2614                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2615                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2616                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2617                                 return -EINVAL;
2618                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2619                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2620                                 return -EINVAL;
2621                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2622                                 !(spi->mode & SPI_TX_QUAD))
2623                                 return -EINVAL;
2624                 }
2625                 /* check transfer rx_nbits */
2626                 if (xfer->rx_buf) {
2627                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2628                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2629                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2630                                 return -EINVAL;
2631                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2632                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2633                                 return -EINVAL;
2634                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2635                                 !(spi->mode & SPI_RX_QUAD))
2636                                 return -EINVAL;
2637                 }
2638         }
2639
2640         message->status = -EINPROGRESS;
2641
2642         return 0;
2643 }
2644
2645 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2646 {
2647         struct spi_master *master = spi->master;
2648
2649         message->spi = spi;
2650
2651         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2652         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2653
2654         trace_spi_message_submit(message);
2655
2656         return master->transfer(spi, message);
2657 }
2658
2659 /**
2660  * spi_async - asynchronous SPI transfer
2661  * @spi: device with which data will be exchanged
2662  * @message: describes the data transfers, including completion callback
2663  * Context: any (irqs may be blocked, etc)
2664  *
2665  * This call may be used in_irq and other contexts which can't sleep,
2666  * as well as from task contexts which can sleep.
2667  *
2668  * The completion callback is invoked in a context which can't sleep.
2669  * Before that invocation, the value of message->status is undefined.
2670  * When the callback is issued, message->status holds either zero (to
2671  * indicate complete success) or a negative error code.  After that
2672  * callback returns, the driver which issued the transfer request may
2673  * deallocate the associated memory; it's no longer in use by any SPI
2674  * core or controller driver code.
2675  *
2676  * Note that although all messages to a spi_device are handled in
2677  * FIFO order, messages may go to different devices in other orders.
2678  * Some device might be higher priority, or have various "hard" access
2679  * time requirements, for example.
2680  *
2681  * On detection of any fault during the transfer, processing of
2682  * the entire message is aborted, and the device is deselected.
2683  * Until returning from the associated message completion callback,
2684  * no other spi_message queued to that device will be processed.
2685  * (This rule applies equally to all the synchronous transfer calls,
2686  * which are wrappers around this core asynchronous primitive.)
2687  *
2688  * Return: zero on success, else a negative error code.
2689  */
2690 int spi_async(struct spi_device *spi, struct spi_message *message)
2691 {
2692         struct spi_master *master = spi->master;
2693         int ret;
2694         unsigned long flags;
2695
2696         ret = __spi_validate(spi, message);
2697         if (ret != 0)
2698                 return ret;
2699
2700         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2701
2702         if (master->bus_lock_flag)
2703                 ret = -EBUSY;
2704         else
2705                 ret = __spi_async(spi, message);
2706
2707         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2708
2709         return ret;
2710 }
2711 EXPORT_SYMBOL_GPL(spi_async);
2712
2713 /**
2714  * spi_async_locked - version of spi_async with exclusive bus usage
2715  * @spi: device with which data will be exchanged
2716  * @message: describes the data transfers, including completion callback
2717  * Context: any (irqs may be blocked, etc)
2718  *
2719  * This call may be used in_irq and other contexts which can't sleep,
2720  * as well as from task contexts which can sleep.
2721  *
2722  * The completion callback is invoked in a context which can't sleep.
2723  * Before that invocation, the value of message->status is undefined.
2724  * When the callback is issued, message->status holds either zero (to
2725  * indicate complete success) or a negative error code.  After that
2726  * callback returns, the driver which issued the transfer request may
2727  * deallocate the associated memory; it's no longer in use by any SPI
2728  * core or controller driver code.
2729  *
2730  * Note that although all messages to a spi_device are handled in
2731  * FIFO order, messages may go to different devices in other orders.
2732  * Some device might be higher priority, or have various "hard" access
2733  * time requirements, for example.
2734  *
2735  * On detection of any fault during the transfer, processing of
2736  * the entire message is aborted, and the device is deselected.
2737  * Until returning from the associated message completion callback,
2738  * no other spi_message queued to that device will be processed.
2739  * (This rule applies equally to all the synchronous transfer calls,
2740  * which are wrappers around this core asynchronous primitive.)
2741  *
2742  * Return: zero on success, else a negative error code.
2743  */
2744 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2745 {
2746         struct spi_master *master = spi->master;
2747         int ret;
2748         unsigned long flags;
2749
2750         ret = __spi_validate(spi, message);
2751         if (ret != 0)
2752                 return ret;
2753
2754         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2755
2756         ret = __spi_async(spi, message);
2757
2758         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2759
2760         return ret;
2761
2762 }
2763 EXPORT_SYMBOL_GPL(spi_async_locked);
2764
2765
2766 int spi_flash_read(struct spi_device *spi,
2767                    struct spi_flash_read_message *msg)
2768
2769 {
2770         struct spi_master *master = spi->master;
2771         struct device *rx_dev = NULL;
2772         int ret;
2773
2774         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2775              msg->addr_nbits == SPI_NBITS_DUAL) &&
2776             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2777                 return -EINVAL;
2778         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2779              msg->addr_nbits == SPI_NBITS_QUAD) &&
2780             !(spi->mode & SPI_TX_QUAD))
2781                 return -EINVAL;
2782         if (msg->data_nbits == SPI_NBITS_DUAL &&
2783             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2784                 return -EINVAL;
2785         if (msg->data_nbits == SPI_NBITS_QUAD &&
2786             !(spi->mode &  SPI_RX_QUAD))
2787                 return -EINVAL;
2788
2789         if (master->auto_runtime_pm) {
2790                 ret = pm_runtime_get_sync(master->dev.parent);
2791                 if (ret < 0) {
2792                         dev_err(&master->dev, "Failed to power device: %d\n",
2793                                 ret);
2794                         return ret;
2795                 }
2796         }
2797
2798         mutex_lock(&master->bus_lock_mutex);
2799         mutex_lock(&master->io_mutex);
2800         if (master->dma_rx) {
2801                 rx_dev = master->dma_rx->device->dev;
2802                 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2803                                   msg->buf, msg->len,
2804                                   DMA_FROM_DEVICE);
2805                 if (!ret)
2806                         msg->cur_msg_mapped = true;
2807         }
2808         ret = master->spi_flash_read(spi, msg);
2809         if (msg->cur_msg_mapped)
2810                 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2811                               DMA_FROM_DEVICE);
2812         mutex_unlock(&master->io_mutex);
2813         mutex_unlock(&master->bus_lock_mutex);
2814
2815         if (master->auto_runtime_pm)
2816                 pm_runtime_put(master->dev.parent);
2817
2818         return ret;
2819 }
2820 EXPORT_SYMBOL_GPL(spi_flash_read);
2821
2822 /*-------------------------------------------------------------------------*/
2823
2824 /* Utility methods for SPI master protocol drivers, layered on
2825  * top of the core.  Some other utility methods are defined as
2826  * inline functions.
2827  */
2828
2829 static void spi_complete(void *arg)
2830 {
2831         complete(arg);
2832 }
2833
2834 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2835 {
2836         DECLARE_COMPLETION_ONSTACK(done);
2837         int status;
2838         struct spi_master *master = spi->master;
2839         unsigned long flags;
2840
2841         status = __spi_validate(spi, message);
2842         if (status != 0)
2843                 return status;
2844
2845         message->complete = spi_complete;
2846         message->context = &done;
2847         message->spi = spi;
2848
2849         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2850         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2851
2852         /* If we're not using the legacy transfer method then we will
2853          * try to transfer in the calling context so special case.
2854          * This code would be less tricky if we could remove the
2855          * support for driver implemented message queues.
2856          */
2857         if (master->transfer == spi_queued_transfer) {
2858                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2859
2860                 trace_spi_message_submit(message);
2861
2862                 status = __spi_queued_transfer(spi, message, false);
2863
2864                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2865         } else {
2866                 status = spi_async_locked(spi, message);
2867         }
2868
2869         if (status == 0) {
2870                 /* Push out the messages in the calling context if we
2871                  * can.
2872                  */
2873                 if (master->transfer == spi_queued_transfer) {
2874                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2875                                                        spi_sync_immediate);
2876                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2877                                                        spi_sync_immediate);
2878                         __spi_pump_messages(master, false);
2879                 }
2880
2881                 wait_for_completion(&done);
2882                 status = message->status;
2883         }
2884         message->context = NULL;
2885         return status;
2886 }
2887
2888 /**
2889  * spi_sync - blocking/synchronous SPI data transfers
2890  * @spi: device with which data will be exchanged
2891  * @message: describes the data transfers
2892  * Context: can sleep
2893  *
2894  * This call may only be used from a context that may sleep.  The sleep
2895  * is non-interruptible, and has no timeout.  Low-overhead controller
2896  * drivers may DMA directly into and out of the message buffers.
2897  *
2898  * Note that the SPI device's chip select is active during the message,
2899  * and then is normally disabled between messages.  Drivers for some
2900  * frequently-used devices may want to minimize costs of selecting a chip,
2901  * by leaving it selected in anticipation that the next message will go
2902  * to the same chip.  (That may increase power usage.)
2903  *
2904  * Also, the caller is guaranteeing that the memory associated with the
2905  * message will not be freed before this call returns.
2906  *
2907  * Return: zero on success, else a negative error code.
2908  */
2909 int spi_sync(struct spi_device *spi, struct spi_message *message)
2910 {
2911         int ret;
2912
2913         mutex_lock(&spi->master->bus_lock_mutex);
2914         ret = __spi_sync(spi, message);
2915         mutex_unlock(&spi->master->bus_lock_mutex);
2916
2917         return ret;
2918 }
2919 EXPORT_SYMBOL_GPL(spi_sync);
2920
2921 /**
2922  * spi_sync_locked - version of spi_sync with exclusive bus usage
2923  * @spi: device with which data will be exchanged
2924  * @message: describes the data transfers
2925  * Context: can sleep
2926  *
2927  * This call may only be used from a context that may sleep.  The sleep
2928  * is non-interruptible, and has no timeout.  Low-overhead controller
2929  * drivers may DMA directly into and out of the message buffers.
2930  *
2931  * This call should be used by drivers that require exclusive access to the
2932  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2933  * be released by a spi_bus_unlock call when the exclusive access is over.
2934  *
2935  * Return: zero on success, else a negative error code.
2936  */
2937 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2938 {
2939         return __spi_sync(spi, message);
2940 }
2941 EXPORT_SYMBOL_GPL(spi_sync_locked);
2942
2943 /**
2944  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2945  * @master: SPI bus master that should be locked for exclusive bus access
2946  * Context: can sleep
2947  *
2948  * This call may only be used from a context that may sleep.  The sleep
2949  * is non-interruptible, and has no timeout.
2950  *
2951  * This call should be used by drivers that require exclusive access to the
2952  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2953  * exclusive access is over. Data transfer must be done by spi_sync_locked
2954  * and spi_async_locked calls when the SPI bus lock is held.
2955  *
2956  * Return: always zero.
2957  */
2958 int spi_bus_lock(struct spi_master *master)
2959 {
2960         unsigned long flags;
2961
2962         mutex_lock(&master->bus_lock_mutex);
2963
2964         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2965         master->bus_lock_flag = 1;
2966         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2967
2968         /* mutex remains locked until spi_bus_unlock is called */
2969
2970         return 0;
2971 }
2972 EXPORT_SYMBOL_GPL(spi_bus_lock);
2973
2974 /**
2975  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2976  * @master: SPI bus master that was locked for exclusive bus access
2977  * Context: can sleep
2978  *
2979  * This call may only be used from a context that may sleep.  The sleep
2980  * is non-interruptible, and has no timeout.
2981  *
2982  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2983  * call.
2984  *
2985  * Return: always zero.
2986  */
2987 int spi_bus_unlock(struct spi_master *master)
2988 {
2989         master->bus_lock_flag = 0;
2990
2991         mutex_unlock(&master->bus_lock_mutex);
2992
2993         return 0;
2994 }
2995 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2996
2997 /* portable code must never pass more than 32 bytes */
2998 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2999
3000 static u8       *buf;
3001
3002 /**
3003  * spi_write_then_read - SPI synchronous write followed by read
3004  * @spi: device with which data will be exchanged
3005  * @txbuf: data to be written (need not be dma-safe)
3006  * @n_tx: size of txbuf, in bytes
3007  * @rxbuf: buffer into which data will be read (need not be dma-safe)
3008  * @n_rx: size of rxbuf, in bytes
3009  * Context: can sleep
3010  *
3011  * This performs a half duplex MicroWire style transaction with the
3012  * device, sending txbuf and then reading rxbuf.  The return value
3013  * is zero for success, else a negative errno status code.
3014  * This call may only be used from a context that may sleep.
3015  *
3016  * Parameters to this routine are always copied using a small buffer;
3017  * portable code should never use this for more than 32 bytes.
3018  * Performance-sensitive or bulk transfer code should instead use
3019  * spi_{async,sync}() calls with dma-safe buffers.
3020  *
3021  * Return: zero on success, else a negative error code.
3022  */
3023 int spi_write_then_read(struct spi_device *spi,
3024                 const void *txbuf, unsigned n_tx,
3025                 void *rxbuf, unsigned n_rx)
3026 {
3027         static DEFINE_MUTEX(lock);
3028
3029         int                     status;
3030         struct spi_message      message;
3031         struct spi_transfer     x[2];
3032         u8                      *local_buf;
3033
3034         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3035          * copying here, (as a pure convenience thing), but we can
3036          * keep heap costs out of the hot path unless someone else is
3037          * using the pre-allocated buffer or the transfer is too large.
3038          */
3039         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3040                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3041                                     GFP_KERNEL | GFP_DMA);
3042                 if (!local_buf)
3043                         return -ENOMEM;
3044         } else {
3045                 local_buf = buf;
3046         }
3047
3048         spi_message_init(&message);
3049         memset(x, 0, sizeof(x));
3050         if (n_tx) {
3051                 x[0].len = n_tx;
3052                 spi_message_add_tail(&x[0], &message);
3053         }
3054         if (n_rx) {
3055                 x[1].len = n_rx;
3056                 spi_message_add_tail(&x[1], &message);
3057         }
3058
3059         memcpy(local_buf, txbuf, n_tx);
3060         x[0].tx_buf = local_buf;
3061         x[1].rx_buf = local_buf + n_tx;
3062
3063         /* do the i/o */
3064         status = spi_sync(spi, &message);
3065         if (status == 0)
3066                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3067
3068         if (x[0].tx_buf == buf)
3069                 mutex_unlock(&lock);
3070         else
3071                 kfree(local_buf);
3072
3073         return status;
3074 }
3075 EXPORT_SYMBOL_GPL(spi_write_then_read);
3076
3077 /*-------------------------------------------------------------------------*/
3078
3079 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3080 static int __spi_of_device_match(struct device *dev, void *data)
3081 {
3082         return dev->of_node == data;
3083 }
3084
3085 /* must call put_device() when done with returned spi_device device */
3086 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3087 {
3088         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3089                                                 __spi_of_device_match);
3090         return dev ? to_spi_device(dev) : NULL;
3091 }
3092
3093 static int __spi_of_master_match(struct device *dev, const void *data)
3094 {
3095         return dev->of_node == data;
3096 }
3097
3098 /* the spi masters are not using spi_bus, so we find it with another way */
3099 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3100 {
3101         struct device *dev;
3102
3103         dev = class_find_device(&spi_master_class, NULL, node,
3104                                 __spi_of_master_match);
3105         if (!dev)
3106                 return NULL;
3107
3108         /* reference got in class_find_device */
3109         return container_of(dev, struct spi_master, dev);
3110 }
3111
3112 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3113                          void *arg)
3114 {
3115         struct of_reconfig_data *rd = arg;
3116         struct spi_master *master;
3117         struct spi_device *spi;
3118
3119         switch (of_reconfig_get_state_change(action, arg)) {
3120         case OF_RECONFIG_CHANGE_ADD:
3121                 master = of_find_spi_master_by_node(rd->dn->parent);
3122                 if (master == NULL)
3123                         return NOTIFY_OK;       /* not for us */
3124
3125                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3126                         put_device(&master->dev);
3127                         return NOTIFY_OK;
3128                 }
3129
3130                 spi = of_register_spi_device(master, rd->dn);
3131                 put_device(&master->dev);
3132
3133                 if (IS_ERR(spi)) {
3134                         pr_err("%s: failed to create for '%s'\n",
3135                                         __func__, rd->dn->full_name);
3136                         of_node_clear_flag(rd->dn, OF_POPULATED);
3137                         return notifier_from_errno(PTR_ERR(spi));
3138                 }
3139                 break;
3140
3141         case OF_RECONFIG_CHANGE_REMOVE:
3142                 /* already depopulated? */
3143                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3144                         return NOTIFY_OK;
3145
3146                 /* find our device by node */
3147                 spi = of_find_spi_device_by_node(rd->dn);
3148                 if (spi == NULL)
3149                         return NOTIFY_OK;       /* no? not meant for us */
3150
3151                 /* unregister takes one ref away */
3152                 spi_unregister_device(spi);
3153
3154                 /* and put the reference of the find */
3155                 put_device(&spi->dev);
3156                 break;
3157         }
3158
3159         return NOTIFY_OK;
3160 }
3161
3162 static struct notifier_block spi_of_notifier = {
3163         .notifier_call = of_spi_notify,
3164 };
3165 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3166 extern struct notifier_block spi_of_notifier;
3167 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3168
3169 #if IS_ENABLED(CONFIG_ACPI)
3170 static int spi_acpi_master_match(struct device *dev, const void *data)
3171 {
3172         return ACPI_COMPANION(dev->parent) == data;
3173 }
3174
3175 static int spi_acpi_device_match(struct device *dev, void *data)
3176 {
3177         return ACPI_COMPANION(dev) == data;
3178 }
3179
3180 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3181 {
3182         struct device *dev;
3183
3184         dev = class_find_device(&spi_master_class, NULL, adev,
3185                                 spi_acpi_master_match);
3186         if (!dev)
3187                 return NULL;
3188
3189         return container_of(dev, struct spi_master, dev);
3190 }
3191
3192 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3193 {
3194         struct device *dev;
3195
3196         dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3197
3198         return dev ? to_spi_device(dev) : NULL;
3199 }
3200
3201 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3202                            void *arg)
3203 {
3204         struct acpi_device *adev = arg;
3205         struct spi_master *master;
3206         struct spi_device *spi;
3207
3208         switch (value) {
3209         case ACPI_RECONFIG_DEVICE_ADD:
3210                 master = acpi_spi_find_master_by_adev(adev->parent);
3211                 if (!master)
3212                         break;
3213
3214                 acpi_register_spi_device(master, adev);
3215                 put_device(&master->dev);
3216                 break;
3217         case ACPI_RECONFIG_DEVICE_REMOVE:
3218                 if (!acpi_device_enumerated(adev))
3219                         break;
3220
3221                 spi = acpi_spi_find_device_by_adev(adev);
3222                 if (!spi)
3223                         break;
3224
3225                 spi_unregister_device(spi);
3226                 put_device(&spi->dev);
3227                 break;
3228         }
3229
3230         return NOTIFY_OK;
3231 }
3232
3233 static struct notifier_block spi_acpi_notifier = {
3234         .notifier_call = acpi_spi_notify,
3235 };
3236 #else
3237 extern struct notifier_block spi_acpi_notifier;
3238 #endif
3239
3240 static int __init spi_init(void)
3241 {
3242         int     status;
3243
3244         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3245         if (!buf) {
3246                 status = -ENOMEM;
3247                 goto err0;
3248         }
3249
3250         status = bus_register(&spi_bus_type);
3251         if (status < 0)
3252                 goto err1;
3253
3254         status = class_register(&spi_master_class);
3255         if (status < 0)
3256                 goto err2;
3257
3258         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3259                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3260         if (IS_ENABLED(CONFIG_ACPI))
3261                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3262
3263         return 0;
3264
3265 err2:
3266         bus_unregister(&spi_bus_type);
3267 err1:
3268         kfree(buf);
3269         buf = NULL;
3270 err0:
3271         return status;
3272 }
3273
3274 /* board_info is normally registered in arch_initcall(),
3275  * but even essential drivers wait till later
3276  *
3277  * REVISIT only boardinfo really needs static linking. the rest (device and
3278  * driver registration) _could_ be dynamically linked (modular) ... costs
3279  * include needing to have boardinfo data structures be much more public.
3280  */
3281 postcore_initcall(spi_init);
3282