kthread: kthread worker API cleanup
[cascardo/linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40 #include <linux/highmem.h>
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/spi.h>
44
45 static void spidev_release(struct device *dev)
46 {
47         struct spi_device       *spi = to_spi_device(dev);
48
49         /* spi masters may cleanup for released devices */
50         if (spi->master->cleanup)
51                 spi->master->cleanup(spi);
52
53         spi_master_put(spi->master);
54         kfree(spi);
55 }
56
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60         const struct spi_device *spi = to_spi_device(dev);
61         int len;
62
63         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64         if (len != -ENODEV)
65                 return len;
66
67         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
71 #define SPI_STATISTICS_ATTRS(field, file)                               \
72 static ssize_t spi_master_##field##_show(struct device *dev,            \
73                                          struct device_attribute *attr, \
74                                          char *buf)                     \
75 {                                                                       \
76         struct spi_master *master = container_of(dev,                   \
77                                                  struct spi_master, dev); \
78         return spi_statistics_##field##_show(&master->statistics, buf); \
79 }                                                                       \
80 static struct device_attribute dev_attr_spi_master_##field = {          \
81         .attr = { .name = file, .mode = S_IRUGO },                      \
82         .show = spi_master_##field##_show,                              \
83 };                                                                      \
84 static ssize_t spi_device_##field##_show(struct device *dev,            \
85                                          struct device_attribute *attr, \
86                                         char *buf)                      \
87 {                                                                       \
88         struct spi_device *spi = to_spi_device(dev);                    \
89         return spi_statistics_##field##_show(&spi->statistics, buf);    \
90 }                                                                       \
91 static struct device_attribute dev_attr_spi_device_##field = {          \
92         .attr = { .name = file, .mode = S_IRUGO },                      \
93         .show = spi_device_##field##_show,                              \
94 }
95
96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98                                             char *buf)                  \
99 {                                                                       \
100         unsigned long flags;                                            \
101         ssize_t len;                                                    \
102         spin_lock_irqsave(&stat->lock, flags);                          \
103         len = sprintf(buf, format_string, stat->field);                 \
104         spin_unlock_irqrestore(&stat->lock, flags);                     \
105         return len;                                                     \
106 }                                                                       \
107 SPI_STATISTICS_ATTRS(name, file)
108
109 #define SPI_STATISTICS_SHOW(field, format_string)                       \
110         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
111                                  field, format_string)
112
113 SPI_STATISTICS_SHOW(messages, "%lu");
114 SPI_STATISTICS_SHOW(transfers, "%lu");
115 SPI_STATISTICS_SHOW(errors, "%lu");
116 SPI_STATISTICS_SHOW(timedout, "%lu");
117
118 SPI_STATISTICS_SHOW(spi_sync, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120 SPI_STATISTICS_SHOW(spi_async, "%lu");
121
122 SPI_STATISTICS_SHOW(bytes, "%llu");
123 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125
126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
127         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
128                                  "transfer_bytes_histo_" number,        \
129                                  transfer_bytes_histo[index],  "%lu")
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147
148 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
149
150 static struct attribute *spi_dev_attrs[] = {
151         &dev_attr_modalias.attr,
152         NULL,
153 };
154
155 static const struct attribute_group spi_dev_group = {
156         .attrs  = spi_dev_attrs,
157 };
158
159 static struct attribute *spi_device_statistics_attrs[] = {
160         &dev_attr_spi_device_messages.attr,
161         &dev_attr_spi_device_transfers.attr,
162         &dev_attr_spi_device_errors.attr,
163         &dev_attr_spi_device_timedout.attr,
164         &dev_attr_spi_device_spi_sync.attr,
165         &dev_attr_spi_device_spi_sync_immediate.attr,
166         &dev_attr_spi_device_spi_async.attr,
167         &dev_attr_spi_device_bytes.attr,
168         &dev_attr_spi_device_bytes_rx.attr,
169         &dev_attr_spi_device_bytes_tx.attr,
170         &dev_attr_spi_device_transfer_bytes_histo0.attr,
171         &dev_attr_spi_device_transfer_bytes_histo1.attr,
172         &dev_attr_spi_device_transfer_bytes_histo2.attr,
173         &dev_attr_spi_device_transfer_bytes_histo3.attr,
174         &dev_attr_spi_device_transfer_bytes_histo4.attr,
175         &dev_attr_spi_device_transfer_bytes_histo5.attr,
176         &dev_attr_spi_device_transfer_bytes_histo6.attr,
177         &dev_attr_spi_device_transfer_bytes_histo7.attr,
178         &dev_attr_spi_device_transfer_bytes_histo8.attr,
179         &dev_attr_spi_device_transfer_bytes_histo9.attr,
180         &dev_attr_spi_device_transfer_bytes_histo10.attr,
181         &dev_attr_spi_device_transfer_bytes_histo11.attr,
182         &dev_attr_spi_device_transfer_bytes_histo12.attr,
183         &dev_attr_spi_device_transfer_bytes_histo13.attr,
184         &dev_attr_spi_device_transfer_bytes_histo14.attr,
185         &dev_attr_spi_device_transfer_bytes_histo15.attr,
186         &dev_attr_spi_device_transfer_bytes_histo16.attr,
187         &dev_attr_spi_device_transfers_split_maxsize.attr,
188         NULL,
189 };
190
191 static const struct attribute_group spi_device_statistics_group = {
192         .name  = "statistics",
193         .attrs  = spi_device_statistics_attrs,
194 };
195
196 static const struct attribute_group *spi_dev_groups[] = {
197         &spi_dev_group,
198         &spi_device_statistics_group,
199         NULL,
200 };
201
202 static struct attribute *spi_master_statistics_attrs[] = {
203         &dev_attr_spi_master_messages.attr,
204         &dev_attr_spi_master_transfers.attr,
205         &dev_attr_spi_master_errors.attr,
206         &dev_attr_spi_master_timedout.attr,
207         &dev_attr_spi_master_spi_sync.attr,
208         &dev_attr_spi_master_spi_sync_immediate.attr,
209         &dev_attr_spi_master_spi_async.attr,
210         &dev_attr_spi_master_bytes.attr,
211         &dev_attr_spi_master_bytes_rx.attr,
212         &dev_attr_spi_master_bytes_tx.attr,
213         &dev_attr_spi_master_transfer_bytes_histo0.attr,
214         &dev_attr_spi_master_transfer_bytes_histo1.attr,
215         &dev_attr_spi_master_transfer_bytes_histo2.attr,
216         &dev_attr_spi_master_transfer_bytes_histo3.attr,
217         &dev_attr_spi_master_transfer_bytes_histo4.attr,
218         &dev_attr_spi_master_transfer_bytes_histo5.attr,
219         &dev_attr_spi_master_transfer_bytes_histo6.attr,
220         &dev_attr_spi_master_transfer_bytes_histo7.attr,
221         &dev_attr_spi_master_transfer_bytes_histo8.attr,
222         &dev_attr_spi_master_transfer_bytes_histo9.attr,
223         &dev_attr_spi_master_transfer_bytes_histo10.attr,
224         &dev_attr_spi_master_transfer_bytes_histo11.attr,
225         &dev_attr_spi_master_transfer_bytes_histo12.attr,
226         &dev_attr_spi_master_transfer_bytes_histo13.attr,
227         &dev_attr_spi_master_transfer_bytes_histo14.attr,
228         &dev_attr_spi_master_transfer_bytes_histo15.attr,
229         &dev_attr_spi_master_transfer_bytes_histo16.attr,
230         &dev_attr_spi_master_transfers_split_maxsize.attr,
231         NULL,
232 };
233
234 static const struct attribute_group spi_master_statistics_group = {
235         .name  = "statistics",
236         .attrs  = spi_master_statistics_attrs,
237 };
238
239 static const struct attribute_group *spi_master_groups[] = {
240         &spi_master_statistics_group,
241         NULL,
242 };
243
244 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
245                                        struct spi_transfer *xfer,
246                                        struct spi_master *master)
247 {
248         unsigned long flags;
249         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
250
251         if (l2len < 0)
252                 l2len = 0;
253
254         spin_lock_irqsave(&stats->lock, flags);
255
256         stats->transfers++;
257         stats->transfer_bytes_histo[l2len]++;
258
259         stats->bytes += xfer->len;
260         if ((xfer->tx_buf) &&
261             (xfer->tx_buf != master->dummy_tx))
262                 stats->bytes_tx += xfer->len;
263         if ((xfer->rx_buf) &&
264             (xfer->rx_buf != master->dummy_rx))
265                 stats->bytes_rx += xfer->len;
266
267         spin_unlock_irqrestore(&stats->lock, flags);
268 }
269 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
270
271 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
272  * and the sysfs version makes coldplug work too.
273  */
274
275 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
276                                                 const struct spi_device *sdev)
277 {
278         while (id->name[0]) {
279                 if (!strcmp(sdev->modalias, id->name))
280                         return id;
281                 id++;
282         }
283         return NULL;
284 }
285
286 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
287 {
288         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
289
290         return spi_match_id(sdrv->id_table, sdev);
291 }
292 EXPORT_SYMBOL_GPL(spi_get_device_id);
293
294 static int spi_match_device(struct device *dev, struct device_driver *drv)
295 {
296         const struct spi_device *spi = to_spi_device(dev);
297         const struct spi_driver *sdrv = to_spi_driver(drv);
298
299         /* Attempt an OF style match */
300         if (of_driver_match_device(dev, drv))
301                 return 1;
302
303         /* Then try ACPI */
304         if (acpi_driver_match_device(dev, drv))
305                 return 1;
306
307         if (sdrv->id_table)
308                 return !!spi_match_id(sdrv->id_table, spi);
309
310         return strcmp(spi->modalias, drv->name) == 0;
311 }
312
313 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
314 {
315         const struct spi_device         *spi = to_spi_device(dev);
316         int rc;
317
318         rc = acpi_device_uevent_modalias(dev, env);
319         if (rc != -ENODEV)
320                 return rc;
321
322         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
323         return 0;
324 }
325
326 struct bus_type spi_bus_type = {
327         .name           = "spi",
328         .dev_groups     = spi_dev_groups,
329         .match          = spi_match_device,
330         .uevent         = spi_uevent,
331 };
332 EXPORT_SYMBOL_GPL(spi_bus_type);
333
334
335 static int spi_drv_probe(struct device *dev)
336 {
337         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
338         struct spi_device               *spi = to_spi_device(dev);
339         int ret;
340
341         ret = of_clk_set_defaults(dev->of_node, false);
342         if (ret)
343                 return ret;
344
345         if (dev->of_node) {
346                 spi->irq = of_irq_get(dev->of_node, 0);
347                 if (spi->irq == -EPROBE_DEFER)
348                         return -EPROBE_DEFER;
349                 if (spi->irq < 0)
350                         spi->irq = 0;
351         }
352
353         ret = dev_pm_domain_attach(dev, true);
354         if (ret != -EPROBE_DEFER) {
355                 ret = sdrv->probe(spi);
356                 if (ret)
357                         dev_pm_domain_detach(dev, true);
358         }
359
360         return ret;
361 }
362
363 static int spi_drv_remove(struct device *dev)
364 {
365         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
366         int ret;
367
368         ret = sdrv->remove(to_spi_device(dev));
369         dev_pm_domain_detach(dev, true);
370
371         return ret;
372 }
373
374 static void spi_drv_shutdown(struct device *dev)
375 {
376         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
377
378         sdrv->shutdown(to_spi_device(dev));
379 }
380
381 /**
382  * __spi_register_driver - register a SPI driver
383  * @owner: owner module of the driver to register
384  * @sdrv: the driver to register
385  * Context: can sleep
386  *
387  * Return: zero on success, else a negative error code.
388  */
389 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
390 {
391         sdrv->driver.owner = owner;
392         sdrv->driver.bus = &spi_bus_type;
393         if (sdrv->probe)
394                 sdrv->driver.probe = spi_drv_probe;
395         if (sdrv->remove)
396                 sdrv->driver.remove = spi_drv_remove;
397         if (sdrv->shutdown)
398                 sdrv->driver.shutdown = spi_drv_shutdown;
399         return driver_register(&sdrv->driver);
400 }
401 EXPORT_SYMBOL_GPL(__spi_register_driver);
402
403 /*-------------------------------------------------------------------------*/
404
405 /* SPI devices should normally not be created by SPI device drivers; that
406  * would make them board-specific.  Similarly with SPI master drivers.
407  * Device registration normally goes into like arch/.../mach.../board-YYY.c
408  * with other readonly (flashable) information about mainboard devices.
409  */
410
411 struct boardinfo {
412         struct list_head        list;
413         struct spi_board_info   board_info;
414 };
415
416 static LIST_HEAD(board_list);
417 static LIST_HEAD(spi_master_list);
418
419 /*
420  * Used to protect add/del opertion for board_info list and
421  * spi_master list, and their matching process
422  */
423 static DEFINE_MUTEX(board_lock);
424
425 /**
426  * spi_alloc_device - Allocate a new SPI device
427  * @master: Controller to which device is connected
428  * Context: can sleep
429  *
430  * Allows a driver to allocate and initialize a spi_device without
431  * registering it immediately.  This allows a driver to directly
432  * fill the spi_device with device parameters before calling
433  * spi_add_device() on it.
434  *
435  * Caller is responsible to call spi_add_device() on the returned
436  * spi_device structure to add it to the SPI master.  If the caller
437  * needs to discard the spi_device without adding it, then it should
438  * call spi_dev_put() on it.
439  *
440  * Return: a pointer to the new device, or NULL.
441  */
442 struct spi_device *spi_alloc_device(struct spi_master *master)
443 {
444         struct spi_device       *spi;
445
446         if (!spi_master_get(master))
447                 return NULL;
448
449         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
450         if (!spi) {
451                 spi_master_put(master);
452                 return NULL;
453         }
454
455         spi->master = master;
456         spi->dev.parent = &master->dev;
457         spi->dev.bus = &spi_bus_type;
458         spi->dev.release = spidev_release;
459         spi->cs_gpio = -ENOENT;
460
461         spin_lock_init(&spi->statistics.lock);
462
463         device_initialize(&spi->dev);
464         return spi;
465 }
466 EXPORT_SYMBOL_GPL(spi_alloc_device);
467
468 static void spi_dev_set_name(struct spi_device *spi)
469 {
470         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
471
472         if (adev) {
473                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
474                 return;
475         }
476
477         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
478                      spi->chip_select);
479 }
480
481 static int spi_dev_check(struct device *dev, void *data)
482 {
483         struct spi_device *spi = to_spi_device(dev);
484         struct spi_device *new_spi = data;
485
486         if (spi->master == new_spi->master &&
487             spi->chip_select == new_spi->chip_select)
488                 return -EBUSY;
489         return 0;
490 }
491
492 /**
493  * spi_add_device - Add spi_device allocated with spi_alloc_device
494  * @spi: spi_device to register
495  *
496  * Companion function to spi_alloc_device.  Devices allocated with
497  * spi_alloc_device can be added onto the spi bus with this function.
498  *
499  * Return: 0 on success; negative errno on failure
500  */
501 int spi_add_device(struct spi_device *spi)
502 {
503         static DEFINE_MUTEX(spi_add_lock);
504         struct spi_master *master = spi->master;
505         struct device *dev = master->dev.parent;
506         int status;
507
508         /* Chipselects are numbered 0..max; validate. */
509         if (spi->chip_select >= master->num_chipselect) {
510                 dev_err(dev, "cs%d >= max %d\n",
511                         spi->chip_select,
512                         master->num_chipselect);
513                 return -EINVAL;
514         }
515
516         /* Set the bus ID string */
517         spi_dev_set_name(spi);
518
519         /* We need to make sure there's no other device with this
520          * chipselect **BEFORE** we call setup(), else we'll trash
521          * its configuration.  Lock against concurrent add() calls.
522          */
523         mutex_lock(&spi_add_lock);
524
525         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
526         if (status) {
527                 dev_err(dev, "chipselect %d already in use\n",
528                                 spi->chip_select);
529                 goto done;
530         }
531
532         if (master->cs_gpios)
533                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
534
535         /* Drivers may modify this initial i/o setup, but will
536          * normally rely on the device being setup.  Devices
537          * using SPI_CS_HIGH can't coexist well otherwise...
538          */
539         status = spi_setup(spi);
540         if (status < 0) {
541                 dev_err(dev, "can't setup %s, status %d\n",
542                                 dev_name(&spi->dev), status);
543                 goto done;
544         }
545
546         /* Device may be bound to an active driver when this returns */
547         status = device_add(&spi->dev);
548         if (status < 0)
549                 dev_err(dev, "can't add %s, status %d\n",
550                                 dev_name(&spi->dev), status);
551         else
552                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
553
554 done:
555         mutex_unlock(&spi_add_lock);
556         return status;
557 }
558 EXPORT_SYMBOL_GPL(spi_add_device);
559
560 /**
561  * spi_new_device - instantiate one new SPI device
562  * @master: Controller to which device is connected
563  * @chip: Describes the SPI device
564  * Context: can sleep
565  *
566  * On typical mainboards, this is purely internal; and it's not needed
567  * after board init creates the hard-wired devices.  Some development
568  * platforms may not be able to use spi_register_board_info though, and
569  * this is exported so that for example a USB or parport based adapter
570  * driver could add devices (which it would learn about out-of-band).
571  *
572  * Return: the new device, or NULL.
573  */
574 struct spi_device *spi_new_device(struct spi_master *master,
575                                   struct spi_board_info *chip)
576 {
577         struct spi_device       *proxy;
578         int                     status;
579
580         /* NOTE:  caller did any chip->bus_num checks necessary.
581          *
582          * Also, unless we change the return value convention to use
583          * error-or-pointer (not NULL-or-pointer), troubleshootability
584          * suggests syslogged diagnostics are best here (ugh).
585          */
586
587         proxy = spi_alloc_device(master);
588         if (!proxy)
589                 return NULL;
590
591         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
592
593         proxy->chip_select = chip->chip_select;
594         proxy->max_speed_hz = chip->max_speed_hz;
595         proxy->mode = chip->mode;
596         proxy->irq = chip->irq;
597         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
598         proxy->dev.platform_data = (void *) chip->platform_data;
599         proxy->controller_data = chip->controller_data;
600         proxy->controller_state = NULL;
601
602         status = spi_add_device(proxy);
603         if (status < 0) {
604                 spi_dev_put(proxy);
605                 return NULL;
606         }
607
608         return proxy;
609 }
610 EXPORT_SYMBOL_GPL(spi_new_device);
611
612 /**
613  * spi_unregister_device - unregister a single SPI device
614  * @spi: spi_device to unregister
615  *
616  * Start making the passed SPI device vanish. Normally this would be handled
617  * by spi_unregister_master().
618  */
619 void spi_unregister_device(struct spi_device *spi)
620 {
621         if (!spi)
622                 return;
623
624         if (spi->dev.of_node)
625                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
626         if (ACPI_COMPANION(&spi->dev))
627                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
628         device_unregister(&spi->dev);
629 }
630 EXPORT_SYMBOL_GPL(spi_unregister_device);
631
632 static void spi_match_master_to_boardinfo(struct spi_master *master,
633                                 struct spi_board_info *bi)
634 {
635         struct spi_device *dev;
636
637         if (master->bus_num != bi->bus_num)
638                 return;
639
640         dev = spi_new_device(master, bi);
641         if (!dev)
642                 dev_err(master->dev.parent, "can't create new device for %s\n",
643                         bi->modalias);
644 }
645
646 /**
647  * spi_register_board_info - register SPI devices for a given board
648  * @info: array of chip descriptors
649  * @n: how many descriptors are provided
650  * Context: can sleep
651  *
652  * Board-specific early init code calls this (probably during arch_initcall)
653  * with segments of the SPI device table.  Any device nodes are created later,
654  * after the relevant parent SPI controller (bus_num) is defined.  We keep
655  * this table of devices forever, so that reloading a controller driver will
656  * not make Linux forget about these hard-wired devices.
657  *
658  * Other code can also call this, e.g. a particular add-on board might provide
659  * SPI devices through its expansion connector, so code initializing that board
660  * would naturally declare its SPI devices.
661  *
662  * The board info passed can safely be __initdata ... but be careful of
663  * any embedded pointers (platform_data, etc), they're copied as-is.
664  *
665  * Return: zero on success, else a negative error code.
666  */
667 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
668 {
669         struct boardinfo *bi;
670         int i;
671
672         if (!n)
673                 return -EINVAL;
674
675         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
676         if (!bi)
677                 return -ENOMEM;
678
679         for (i = 0; i < n; i++, bi++, info++) {
680                 struct spi_master *master;
681
682                 memcpy(&bi->board_info, info, sizeof(*info));
683                 mutex_lock(&board_lock);
684                 list_add_tail(&bi->list, &board_list);
685                 list_for_each_entry(master, &spi_master_list, list)
686                         spi_match_master_to_boardinfo(master, &bi->board_info);
687                 mutex_unlock(&board_lock);
688         }
689
690         return 0;
691 }
692
693 /*-------------------------------------------------------------------------*/
694
695 static void spi_set_cs(struct spi_device *spi, bool enable)
696 {
697         if (spi->mode & SPI_CS_HIGH)
698                 enable = !enable;
699
700         if (gpio_is_valid(spi->cs_gpio))
701                 gpio_set_value(spi->cs_gpio, !enable);
702         else if (spi->master->set_cs)
703                 spi->master->set_cs(spi, !enable);
704 }
705
706 #ifdef CONFIG_HAS_DMA
707 static int spi_map_buf(struct spi_master *master, struct device *dev,
708                        struct sg_table *sgt, void *buf, size_t len,
709                        enum dma_data_direction dir)
710 {
711         const bool vmalloced_buf = is_vmalloc_addr(buf);
712         unsigned int max_seg_size = dma_get_max_seg_size(dev);
713 #ifdef CONFIG_HIGHMEM
714         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
715                                 (unsigned long)buf < (PKMAP_BASE +
716                                         (LAST_PKMAP * PAGE_SIZE)));
717 #else
718         const bool kmap_buf = false;
719 #endif
720         int desc_len;
721         int sgs;
722         struct page *vm_page;
723         void *sg_buf;
724         size_t min;
725         int i, ret;
726
727         if (vmalloced_buf || kmap_buf) {
728                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
729                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
730         } else if (virt_addr_valid(buf)) {
731                 desc_len = min_t(int, max_seg_size, master->max_dma_len);
732                 sgs = DIV_ROUND_UP(len, desc_len);
733         } else {
734                 return -EINVAL;
735         }
736
737         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
738         if (ret != 0)
739                 return ret;
740
741         for (i = 0; i < sgs; i++) {
742
743                 if (vmalloced_buf || kmap_buf) {
744                         min = min_t(size_t,
745                                     len, desc_len - offset_in_page(buf));
746                         if (vmalloced_buf)
747                                 vm_page = vmalloc_to_page(buf);
748                         else
749                                 vm_page = kmap_to_page(buf);
750                         if (!vm_page) {
751                                 sg_free_table(sgt);
752                                 return -ENOMEM;
753                         }
754                         sg_set_page(&sgt->sgl[i], vm_page,
755                                     min, offset_in_page(buf));
756                 } else {
757                         min = min_t(size_t, len, desc_len);
758                         sg_buf = buf;
759                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
760                 }
761
762                 buf += min;
763                 len -= min;
764         }
765
766         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
767         if (!ret)
768                 ret = -ENOMEM;
769         if (ret < 0) {
770                 sg_free_table(sgt);
771                 return ret;
772         }
773
774         sgt->nents = ret;
775
776         return 0;
777 }
778
779 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
780                           struct sg_table *sgt, enum dma_data_direction dir)
781 {
782         if (sgt->orig_nents) {
783                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
784                 sg_free_table(sgt);
785         }
786 }
787
788 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
789 {
790         struct device *tx_dev, *rx_dev;
791         struct spi_transfer *xfer;
792         int ret;
793
794         if (!master->can_dma)
795                 return 0;
796
797         if (master->dma_tx)
798                 tx_dev = master->dma_tx->device->dev;
799         else
800                 tx_dev = &master->dev;
801
802         if (master->dma_rx)
803                 rx_dev = master->dma_rx->device->dev;
804         else
805                 rx_dev = &master->dev;
806
807         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
808                 if (!master->can_dma(master, msg->spi, xfer))
809                         continue;
810
811                 if (xfer->tx_buf != NULL) {
812                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
813                                           (void *)xfer->tx_buf, xfer->len,
814                                           DMA_TO_DEVICE);
815                         if (ret != 0)
816                                 return ret;
817                 }
818
819                 if (xfer->rx_buf != NULL) {
820                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
821                                           xfer->rx_buf, xfer->len,
822                                           DMA_FROM_DEVICE);
823                         if (ret != 0) {
824                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
825                                               DMA_TO_DEVICE);
826                                 return ret;
827                         }
828                 }
829         }
830
831         master->cur_msg_mapped = true;
832
833         return 0;
834 }
835
836 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
837 {
838         struct spi_transfer *xfer;
839         struct device *tx_dev, *rx_dev;
840
841         if (!master->cur_msg_mapped || !master->can_dma)
842                 return 0;
843
844         if (master->dma_tx)
845                 tx_dev = master->dma_tx->device->dev;
846         else
847                 tx_dev = &master->dev;
848
849         if (master->dma_rx)
850                 rx_dev = master->dma_rx->device->dev;
851         else
852                 rx_dev = &master->dev;
853
854         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
855                 if (!master->can_dma(master, msg->spi, xfer))
856                         continue;
857
858                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
859                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
860         }
861
862         return 0;
863 }
864 #else /* !CONFIG_HAS_DMA */
865 static inline int spi_map_buf(struct spi_master *master,
866                               struct device *dev, struct sg_table *sgt,
867                               void *buf, size_t len,
868                               enum dma_data_direction dir)
869 {
870         return -EINVAL;
871 }
872
873 static inline void spi_unmap_buf(struct spi_master *master,
874                                  struct device *dev, struct sg_table *sgt,
875                                  enum dma_data_direction dir)
876 {
877 }
878
879 static inline int __spi_map_msg(struct spi_master *master,
880                                 struct spi_message *msg)
881 {
882         return 0;
883 }
884
885 static inline int __spi_unmap_msg(struct spi_master *master,
886                                   struct spi_message *msg)
887 {
888         return 0;
889 }
890 #endif /* !CONFIG_HAS_DMA */
891
892 static inline int spi_unmap_msg(struct spi_master *master,
893                                 struct spi_message *msg)
894 {
895         struct spi_transfer *xfer;
896
897         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
898                 /*
899                  * Restore the original value of tx_buf or rx_buf if they are
900                  * NULL.
901                  */
902                 if (xfer->tx_buf == master->dummy_tx)
903                         xfer->tx_buf = NULL;
904                 if (xfer->rx_buf == master->dummy_rx)
905                         xfer->rx_buf = NULL;
906         }
907
908         return __spi_unmap_msg(master, msg);
909 }
910
911 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
912 {
913         struct spi_transfer *xfer;
914         void *tmp;
915         unsigned int max_tx, max_rx;
916
917         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
918                 max_tx = 0;
919                 max_rx = 0;
920
921                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
922                         if ((master->flags & SPI_MASTER_MUST_TX) &&
923                             !xfer->tx_buf)
924                                 max_tx = max(xfer->len, max_tx);
925                         if ((master->flags & SPI_MASTER_MUST_RX) &&
926                             !xfer->rx_buf)
927                                 max_rx = max(xfer->len, max_rx);
928                 }
929
930                 if (max_tx) {
931                         tmp = krealloc(master->dummy_tx, max_tx,
932                                        GFP_KERNEL | GFP_DMA);
933                         if (!tmp)
934                                 return -ENOMEM;
935                         master->dummy_tx = tmp;
936                         memset(tmp, 0, max_tx);
937                 }
938
939                 if (max_rx) {
940                         tmp = krealloc(master->dummy_rx, max_rx,
941                                        GFP_KERNEL | GFP_DMA);
942                         if (!tmp)
943                                 return -ENOMEM;
944                         master->dummy_rx = tmp;
945                 }
946
947                 if (max_tx || max_rx) {
948                         list_for_each_entry(xfer, &msg->transfers,
949                                             transfer_list) {
950                                 if (!xfer->tx_buf)
951                                         xfer->tx_buf = master->dummy_tx;
952                                 if (!xfer->rx_buf)
953                                         xfer->rx_buf = master->dummy_rx;
954                         }
955                 }
956         }
957
958         return __spi_map_msg(master, msg);
959 }
960
961 /*
962  * spi_transfer_one_message - Default implementation of transfer_one_message()
963  *
964  * This is a standard implementation of transfer_one_message() for
965  * drivers which implement a transfer_one() operation.  It provides
966  * standard handling of delays and chip select management.
967  */
968 static int spi_transfer_one_message(struct spi_master *master,
969                                     struct spi_message *msg)
970 {
971         struct spi_transfer *xfer;
972         bool keep_cs = false;
973         int ret = 0;
974         unsigned long long ms = 1;
975         struct spi_statistics *statm = &master->statistics;
976         struct spi_statistics *stats = &msg->spi->statistics;
977
978         spi_set_cs(msg->spi, true);
979
980         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
981         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
982
983         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
984                 trace_spi_transfer_start(msg, xfer);
985
986                 spi_statistics_add_transfer_stats(statm, xfer, master);
987                 spi_statistics_add_transfer_stats(stats, xfer, master);
988
989                 if (xfer->tx_buf || xfer->rx_buf) {
990                         reinit_completion(&master->xfer_completion);
991
992                         ret = master->transfer_one(master, msg->spi, xfer);
993                         if (ret < 0) {
994                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
995                                                                errors);
996                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
997                                                                errors);
998                                 dev_err(&msg->spi->dev,
999                                         "SPI transfer failed: %d\n", ret);
1000                                 goto out;
1001                         }
1002
1003                         if (ret > 0) {
1004                                 ret = 0;
1005                                 ms = 8LL * 1000LL * xfer->len;
1006                                 do_div(ms, xfer->speed_hz);
1007                                 ms += ms + 100; /* some tolerance */
1008
1009                                 if (ms > UINT_MAX)
1010                                         ms = UINT_MAX;
1011
1012                                 ms = wait_for_completion_timeout(&master->xfer_completion,
1013                                                                  msecs_to_jiffies(ms));
1014                         }
1015
1016                         if (ms == 0) {
1017                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1018                                                                timedout);
1019                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1020                                                                timedout);
1021                                 dev_err(&msg->spi->dev,
1022                                         "SPI transfer timed out\n");
1023                                 msg->status = -ETIMEDOUT;
1024                         }
1025                 } else {
1026                         if (xfer->len)
1027                                 dev_err(&msg->spi->dev,
1028                                         "Bufferless transfer has length %u\n",
1029                                         xfer->len);
1030                 }
1031
1032                 trace_spi_transfer_stop(msg, xfer);
1033
1034                 if (msg->status != -EINPROGRESS)
1035                         goto out;
1036
1037                 if (xfer->delay_usecs)
1038                         udelay(xfer->delay_usecs);
1039
1040                 if (xfer->cs_change) {
1041                         if (list_is_last(&xfer->transfer_list,
1042                                          &msg->transfers)) {
1043                                 keep_cs = true;
1044                         } else {
1045                                 spi_set_cs(msg->spi, false);
1046                                 udelay(10);
1047                                 spi_set_cs(msg->spi, true);
1048                         }
1049                 }
1050
1051                 msg->actual_length += xfer->len;
1052         }
1053
1054 out:
1055         if (ret != 0 || !keep_cs)
1056                 spi_set_cs(msg->spi, false);
1057
1058         if (msg->status == -EINPROGRESS)
1059                 msg->status = ret;
1060
1061         if (msg->status && master->handle_err)
1062                 master->handle_err(master, msg);
1063
1064         spi_res_release(master, msg);
1065
1066         spi_finalize_current_message(master);
1067
1068         return ret;
1069 }
1070
1071 /**
1072  * spi_finalize_current_transfer - report completion of a transfer
1073  * @master: the master reporting completion
1074  *
1075  * Called by SPI drivers using the core transfer_one_message()
1076  * implementation to notify it that the current interrupt driven
1077  * transfer has finished and the next one may be scheduled.
1078  */
1079 void spi_finalize_current_transfer(struct spi_master *master)
1080 {
1081         complete(&master->xfer_completion);
1082 }
1083 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1084
1085 /**
1086  * __spi_pump_messages - function which processes spi message queue
1087  * @master: master to process queue for
1088  * @in_kthread: true if we are in the context of the message pump thread
1089  *
1090  * This function checks if there is any spi message in the queue that
1091  * needs processing and if so call out to the driver to initialize hardware
1092  * and transfer each message.
1093  *
1094  * Note that it is called both from the kthread itself and also from
1095  * inside spi_sync(); the queue extraction handling at the top of the
1096  * function should deal with this safely.
1097  */
1098 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1099 {
1100         unsigned long flags;
1101         bool was_busy = false;
1102         int ret;
1103
1104         /* Lock queue */
1105         spin_lock_irqsave(&master->queue_lock, flags);
1106
1107         /* Make sure we are not already running a message */
1108         if (master->cur_msg) {
1109                 spin_unlock_irqrestore(&master->queue_lock, flags);
1110                 return;
1111         }
1112
1113         /* If another context is idling the device then defer */
1114         if (master->idling) {
1115                 kthread_queue_work(&master->kworker, &master->pump_messages);
1116                 spin_unlock_irqrestore(&master->queue_lock, flags);
1117                 return;
1118         }
1119
1120         /* Check if the queue is idle */
1121         if (list_empty(&master->queue) || !master->running) {
1122                 if (!master->busy) {
1123                         spin_unlock_irqrestore(&master->queue_lock, flags);
1124                         return;
1125                 }
1126
1127                 /* Only do teardown in the thread */
1128                 if (!in_kthread) {
1129                         kthread_queue_work(&master->kworker,
1130                                            &master->pump_messages);
1131                         spin_unlock_irqrestore(&master->queue_lock, flags);
1132                         return;
1133                 }
1134
1135                 master->busy = false;
1136                 master->idling = true;
1137                 spin_unlock_irqrestore(&master->queue_lock, flags);
1138
1139                 kfree(master->dummy_rx);
1140                 master->dummy_rx = NULL;
1141                 kfree(master->dummy_tx);
1142                 master->dummy_tx = NULL;
1143                 if (master->unprepare_transfer_hardware &&
1144                     master->unprepare_transfer_hardware(master))
1145                         dev_err(&master->dev,
1146                                 "failed to unprepare transfer hardware\n");
1147                 if (master->auto_runtime_pm) {
1148                         pm_runtime_mark_last_busy(master->dev.parent);
1149                         pm_runtime_put_autosuspend(master->dev.parent);
1150                 }
1151                 trace_spi_master_idle(master);
1152
1153                 spin_lock_irqsave(&master->queue_lock, flags);
1154                 master->idling = false;
1155                 spin_unlock_irqrestore(&master->queue_lock, flags);
1156                 return;
1157         }
1158
1159         /* Extract head of queue */
1160         master->cur_msg =
1161                 list_first_entry(&master->queue, struct spi_message, queue);
1162
1163         list_del_init(&master->cur_msg->queue);
1164         if (master->busy)
1165                 was_busy = true;
1166         else
1167                 master->busy = true;
1168         spin_unlock_irqrestore(&master->queue_lock, flags);
1169
1170         mutex_lock(&master->io_mutex);
1171
1172         if (!was_busy && master->auto_runtime_pm) {
1173                 ret = pm_runtime_get_sync(master->dev.parent);
1174                 if (ret < 0) {
1175                         dev_err(&master->dev, "Failed to power device: %d\n",
1176                                 ret);
1177                         mutex_unlock(&master->io_mutex);
1178                         return;
1179                 }
1180         }
1181
1182         if (!was_busy)
1183                 trace_spi_master_busy(master);
1184
1185         if (!was_busy && master->prepare_transfer_hardware) {
1186                 ret = master->prepare_transfer_hardware(master);
1187                 if (ret) {
1188                         dev_err(&master->dev,
1189                                 "failed to prepare transfer hardware\n");
1190
1191                         if (master->auto_runtime_pm)
1192                                 pm_runtime_put(master->dev.parent);
1193                         mutex_unlock(&master->io_mutex);
1194                         return;
1195                 }
1196         }
1197
1198         trace_spi_message_start(master->cur_msg);
1199
1200         if (master->prepare_message) {
1201                 ret = master->prepare_message(master, master->cur_msg);
1202                 if (ret) {
1203                         dev_err(&master->dev,
1204                                 "failed to prepare message: %d\n", ret);
1205                         master->cur_msg->status = ret;
1206                         spi_finalize_current_message(master);
1207                         goto out;
1208                 }
1209                 master->cur_msg_prepared = true;
1210         }
1211
1212         ret = spi_map_msg(master, master->cur_msg);
1213         if (ret) {
1214                 master->cur_msg->status = ret;
1215                 spi_finalize_current_message(master);
1216                 goto out;
1217         }
1218
1219         ret = master->transfer_one_message(master, master->cur_msg);
1220         if (ret) {
1221                 dev_err(&master->dev,
1222                         "failed to transfer one message from queue\n");
1223                 goto out;
1224         }
1225
1226 out:
1227         mutex_unlock(&master->io_mutex);
1228
1229         /* Prod the scheduler in case transfer_one() was busy waiting */
1230         if (!ret)
1231                 cond_resched();
1232 }
1233
1234 /**
1235  * spi_pump_messages - kthread work function which processes spi message queue
1236  * @work: pointer to kthread work struct contained in the master struct
1237  */
1238 static void spi_pump_messages(struct kthread_work *work)
1239 {
1240         struct spi_master *master =
1241                 container_of(work, struct spi_master, pump_messages);
1242
1243         __spi_pump_messages(master, true);
1244 }
1245
1246 static int spi_init_queue(struct spi_master *master)
1247 {
1248         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1249
1250         master->running = false;
1251         master->busy = false;
1252
1253         kthread_init_worker(&master->kworker);
1254         master->kworker_task = kthread_run(kthread_worker_fn,
1255                                            &master->kworker, "%s",
1256                                            dev_name(&master->dev));
1257         if (IS_ERR(master->kworker_task)) {
1258                 dev_err(&master->dev, "failed to create message pump task\n");
1259                 return PTR_ERR(master->kworker_task);
1260         }
1261         kthread_init_work(&master->pump_messages, spi_pump_messages);
1262
1263         /*
1264          * Master config will indicate if this controller should run the
1265          * message pump with high (realtime) priority to reduce the transfer
1266          * latency on the bus by minimising the delay between a transfer
1267          * request and the scheduling of the message pump thread. Without this
1268          * setting the message pump thread will remain at default priority.
1269          */
1270         if (master->rt) {
1271                 dev_info(&master->dev,
1272                         "will run message pump with realtime priority\n");
1273                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1274         }
1275
1276         return 0;
1277 }
1278
1279 /**
1280  * spi_get_next_queued_message() - called by driver to check for queued
1281  * messages
1282  * @master: the master to check for queued messages
1283  *
1284  * If there are more messages in the queue, the next message is returned from
1285  * this call.
1286  *
1287  * Return: the next message in the queue, else NULL if the queue is empty.
1288  */
1289 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1290 {
1291         struct spi_message *next;
1292         unsigned long flags;
1293
1294         /* get a pointer to the next message, if any */
1295         spin_lock_irqsave(&master->queue_lock, flags);
1296         next = list_first_entry_or_null(&master->queue, struct spi_message,
1297                                         queue);
1298         spin_unlock_irqrestore(&master->queue_lock, flags);
1299
1300         return next;
1301 }
1302 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1303
1304 /**
1305  * spi_finalize_current_message() - the current message is complete
1306  * @master: the master to return the message to
1307  *
1308  * Called by the driver to notify the core that the message in the front of the
1309  * queue is complete and can be removed from the queue.
1310  */
1311 void spi_finalize_current_message(struct spi_master *master)
1312 {
1313         struct spi_message *mesg;
1314         unsigned long flags;
1315         int ret;
1316
1317         spin_lock_irqsave(&master->queue_lock, flags);
1318         mesg = master->cur_msg;
1319         spin_unlock_irqrestore(&master->queue_lock, flags);
1320
1321         spi_unmap_msg(master, mesg);
1322
1323         if (master->cur_msg_prepared && master->unprepare_message) {
1324                 ret = master->unprepare_message(master, mesg);
1325                 if (ret) {
1326                         dev_err(&master->dev,
1327                                 "failed to unprepare message: %d\n", ret);
1328                 }
1329         }
1330
1331         spin_lock_irqsave(&master->queue_lock, flags);
1332         master->cur_msg = NULL;
1333         master->cur_msg_prepared = false;
1334         kthread_queue_work(&master->kworker, &master->pump_messages);
1335         spin_unlock_irqrestore(&master->queue_lock, flags);
1336
1337         trace_spi_message_done(mesg);
1338
1339         mesg->state = NULL;
1340         if (mesg->complete)
1341                 mesg->complete(mesg->context);
1342 }
1343 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1344
1345 static int spi_start_queue(struct spi_master *master)
1346 {
1347         unsigned long flags;
1348
1349         spin_lock_irqsave(&master->queue_lock, flags);
1350
1351         if (master->running || master->busy) {
1352                 spin_unlock_irqrestore(&master->queue_lock, flags);
1353                 return -EBUSY;
1354         }
1355
1356         master->running = true;
1357         master->cur_msg = NULL;
1358         spin_unlock_irqrestore(&master->queue_lock, flags);
1359
1360         kthread_queue_work(&master->kworker, &master->pump_messages);
1361
1362         return 0;
1363 }
1364
1365 static int spi_stop_queue(struct spi_master *master)
1366 {
1367         unsigned long flags;
1368         unsigned limit = 500;
1369         int ret = 0;
1370
1371         spin_lock_irqsave(&master->queue_lock, flags);
1372
1373         /*
1374          * This is a bit lame, but is optimized for the common execution path.
1375          * A wait_queue on the master->busy could be used, but then the common
1376          * execution path (pump_messages) would be required to call wake_up or
1377          * friends on every SPI message. Do this instead.
1378          */
1379         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1380                 spin_unlock_irqrestore(&master->queue_lock, flags);
1381                 usleep_range(10000, 11000);
1382                 spin_lock_irqsave(&master->queue_lock, flags);
1383         }
1384
1385         if (!list_empty(&master->queue) || master->busy)
1386                 ret = -EBUSY;
1387         else
1388                 master->running = false;
1389
1390         spin_unlock_irqrestore(&master->queue_lock, flags);
1391
1392         if (ret) {
1393                 dev_warn(&master->dev,
1394                          "could not stop message queue\n");
1395                 return ret;
1396         }
1397         return ret;
1398 }
1399
1400 static int spi_destroy_queue(struct spi_master *master)
1401 {
1402         int ret;
1403
1404         ret = spi_stop_queue(master);
1405
1406         /*
1407          * kthread_flush_worker will block until all work is done.
1408          * If the reason that stop_queue timed out is that the work will never
1409          * finish, then it does no good to call flush/stop thread, so
1410          * return anyway.
1411          */
1412         if (ret) {
1413                 dev_err(&master->dev, "problem destroying queue\n");
1414                 return ret;
1415         }
1416
1417         kthread_flush_worker(&master->kworker);
1418         kthread_stop(master->kworker_task);
1419
1420         return 0;
1421 }
1422
1423 static int __spi_queued_transfer(struct spi_device *spi,
1424                                  struct spi_message *msg,
1425                                  bool need_pump)
1426 {
1427         struct spi_master *master = spi->master;
1428         unsigned long flags;
1429
1430         spin_lock_irqsave(&master->queue_lock, flags);
1431
1432         if (!master->running) {
1433                 spin_unlock_irqrestore(&master->queue_lock, flags);
1434                 return -ESHUTDOWN;
1435         }
1436         msg->actual_length = 0;
1437         msg->status = -EINPROGRESS;
1438
1439         list_add_tail(&msg->queue, &master->queue);
1440         if (!master->busy && need_pump)
1441                 kthread_queue_work(&master->kworker, &master->pump_messages);
1442
1443         spin_unlock_irqrestore(&master->queue_lock, flags);
1444         return 0;
1445 }
1446
1447 /**
1448  * spi_queued_transfer - transfer function for queued transfers
1449  * @spi: spi device which is requesting transfer
1450  * @msg: spi message which is to handled is queued to driver queue
1451  *
1452  * Return: zero on success, else a negative error code.
1453  */
1454 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1455 {
1456         return __spi_queued_transfer(spi, msg, true);
1457 }
1458
1459 static int spi_master_initialize_queue(struct spi_master *master)
1460 {
1461         int ret;
1462
1463         master->transfer = spi_queued_transfer;
1464         if (!master->transfer_one_message)
1465                 master->transfer_one_message = spi_transfer_one_message;
1466
1467         /* Initialize and start queue */
1468         ret = spi_init_queue(master);
1469         if (ret) {
1470                 dev_err(&master->dev, "problem initializing queue\n");
1471                 goto err_init_queue;
1472         }
1473         master->queued = true;
1474         ret = spi_start_queue(master);
1475         if (ret) {
1476                 dev_err(&master->dev, "problem starting queue\n");
1477                 goto err_start_queue;
1478         }
1479
1480         return 0;
1481
1482 err_start_queue:
1483         spi_destroy_queue(master);
1484 err_init_queue:
1485         return ret;
1486 }
1487
1488 /*-------------------------------------------------------------------------*/
1489
1490 #if defined(CONFIG_OF)
1491 static struct spi_device *
1492 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1493 {
1494         struct spi_device *spi;
1495         int rc;
1496         u32 value;
1497
1498         /* Alloc an spi_device */
1499         spi = spi_alloc_device(master);
1500         if (!spi) {
1501                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1502                         nc->full_name);
1503                 rc = -ENOMEM;
1504                 goto err_out;
1505         }
1506
1507         /* Select device driver */
1508         rc = of_modalias_node(nc, spi->modalias,
1509                                 sizeof(spi->modalias));
1510         if (rc < 0) {
1511                 dev_err(&master->dev, "cannot find modalias for %s\n",
1512                         nc->full_name);
1513                 goto err_out;
1514         }
1515
1516         /* Device address */
1517         rc = of_property_read_u32(nc, "reg", &value);
1518         if (rc) {
1519                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1520                         nc->full_name, rc);
1521                 goto err_out;
1522         }
1523         spi->chip_select = value;
1524
1525         /* Mode (clock phase/polarity/etc.) */
1526         if (of_find_property(nc, "spi-cpha", NULL))
1527                 spi->mode |= SPI_CPHA;
1528         if (of_find_property(nc, "spi-cpol", NULL))
1529                 spi->mode |= SPI_CPOL;
1530         if (of_find_property(nc, "spi-cs-high", NULL))
1531                 spi->mode |= SPI_CS_HIGH;
1532         if (of_find_property(nc, "spi-3wire", NULL))
1533                 spi->mode |= SPI_3WIRE;
1534         if (of_find_property(nc, "spi-lsb-first", NULL))
1535                 spi->mode |= SPI_LSB_FIRST;
1536
1537         /* Device DUAL/QUAD mode */
1538         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1539                 switch (value) {
1540                 case 1:
1541                         break;
1542                 case 2:
1543                         spi->mode |= SPI_TX_DUAL;
1544                         break;
1545                 case 4:
1546                         spi->mode |= SPI_TX_QUAD;
1547                         break;
1548                 default:
1549                         dev_warn(&master->dev,
1550                                 "spi-tx-bus-width %d not supported\n",
1551                                 value);
1552                         break;
1553                 }
1554         }
1555
1556         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1557                 switch (value) {
1558                 case 1:
1559                         break;
1560                 case 2:
1561                         spi->mode |= SPI_RX_DUAL;
1562                         break;
1563                 case 4:
1564                         spi->mode |= SPI_RX_QUAD;
1565                         break;
1566                 default:
1567                         dev_warn(&master->dev,
1568                                 "spi-rx-bus-width %d not supported\n",
1569                                 value);
1570                         break;
1571                 }
1572         }
1573
1574         /* Device speed */
1575         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1576         if (rc) {
1577                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1578                         nc->full_name, rc);
1579                 goto err_out;
1580         }
1581         spi->max_speed_hz = value;
1582
1583         /* Store a pointer to the node in the device structure */
1584         of_node_get(nc);
1585         spi->dev.of_node = nc;
1586
1587         /* Register the new device */
1588         rc = spi_add_device(spi);
1589         if (rc) {
1590                 dev_err(&master->dev, "spi_device register error %s\n",
1591                         nc->full_name);
1592                 goto err_out;
1593         }
1594
1595         return spi;
1596
1597 err_out:
1598         spi_dev_put(spi);
1599         return ERR_PTR(rc);
1600 }
1601
1602 /**
1603  * of_register_spi_devices() - Register child devices onto the SPI bus
1604  * @master:     Pointer to spi_master device
1605  *
1606  * Registers an spi_device for each child node of master node which has a 'reg'
1607  * property.
1608  */
1609 static void of_register_spi_devices(struct spi_master *master)
1610 {
1611         struct spi_device *spi;
1612         struct device_node *nc;
1613
1614         if (!master->dev.of_node)
1615                 return;
1616
1617         for_each_available_child_of_node(master->dev.of_node, nc) {
1618                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1619                         continue;
1620                 spi = of_register_spi_device(master, nc);
1621                 if (IS_ERR(spi))
1622                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1623                                 nc->full_name);
1624         }
1625 }
1626 #else
1627 static void of_register_spi_devices(struct spi_master *master) { }
1628 #endif
1629
1630 #ifdef CONFIG_ACPI
1631 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1632 {
1633         struct spi_device *spi = data;
1634         struct spi_master *master = spi->master;
1635
1636         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1637                 struct acpi_resource_spi_serialbus *sb;
1638
1639                 sb = &ares->data.spi_serial_bus;
1640                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1641                         /*
1642                          * ACPI DeviceSelection numbering is handled by the
1643                          * host controller driver in Windows and can vary
1644                          * from driver to driver. In Linux we always expect
1645                          * 0 .. max - 1 so we need to ask the driver to
1646                          * translate between the two schemes.
1647                          */
1648                         if (master->fw_translate_cs) {
1649                                 int cs = master->fw_translate_cs(master,
1650                                                 sb->device_selection);
1651                                 if (cs < 0)
1652                                         return cs;
1653                                 spi->chip_select = cs;
1654                         } else {
1655                                 spi->chip_select = sb->device_selection;
1656                         }
1657
1658                         spi->max_speed_hz = sb->connection_speed;
1659
1660                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1661                                 spi->mode |= SPI_CPHA;
1662                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1663                                 spi->mode |= SPI_CPOL;
1664                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1665                                 spi->mode |= SPI_CS_HIGH;
1666                 }
1667         } else if (spi->irq < 0) {
1668                 struct resource r;
1669
1670                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1671                         spi->irq = r.start;
1672         }
1673
1674         /* Always tell the ACPI core to skip this resource */
1675         return 1;
1676 }
1677
1678 static acpi_status acpi_register_spi_device(struct spi_master *master,
1679                                             struct acpi_device *adev)
1680 {
1681         struct list_head resource_list;
1682         struct spi_device *spi;
1683         int ret;
1684
1685         if (acpi_bus_get_status(adev) || !adev->status.present ||
1686             acpi_device_enumerated(adev))
1687                 return AE_OK;
1688
1689         spi = spi_alloc_device(master);
1690         if (!spi) {
1691                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1692                         dev_name(&adev->dev));
1693                 return AE_NO_MEMORY;
1694         }
1695
1696         ACPI_COMPANION_SET(&spi->dev, adev);
1697         spi->irq = -1;
1698
1699         INIT_LIST_HEAD(&resource_list);
1700         ret = acpi_dev_get_resources(adev, &resource_list,
1701                                      acpi_spi_add_resource, spi);
1702         acpi_dev_free_resource_list(&resource_list);
1703
1704         if (ret < 0 || !spi->max_speed_hz) {
1705                 spi_dev_put(spi);
1706                 return AE_OK;
1707         }
1708
1709         if (spi->irq < 0)
1710                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1711
1712         acpi_device_set_enumerated(adev);
1713
1714         adev->power.flags.ignore_parent = true;
1715         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1716         if (spi_add_device(spi)) {
1717                 adev->power.flags.ignore_parent = false;
1718                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1719                         dev_name(&adev->dev));
1720                 spi_dev_put(spi);
1721         }
1722
1723         return AE_OK;
1724 }
1725
1726 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1727                                        void *data, void **return_value)
1728 {
1729         struct spi_master *master = data;
1730         struct acpi_device *adev;
1731
1732         if (acpi_bus_get_device(handle, &adev))
1733                 return AE_OK;
1734
1735         return acpi_register_spi_device(master, adev);
1736 }
1737
1738 static void acpi_register_spi_devices(struct spi_master *master)
1739 {
1740         acpi_status status;
1741         acpi_handle handle;
1742
1743         handle = ACPI_HANDLE(master->dev.parent);
1744         if (!handle)
1745                 return;
1746
1747         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1748                                      acpi_spi_add_device, NULL,
1749                                      master, NULL);
1750         if (ACPI_FAILURE(status))
1751                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1752 }
1753 #else
1754 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1755 #endif /* CONFIG_ACPI */
1756
1757 static void spi_master_release(struct device *dev)
1758 {
1759         struct spi_master *master;
1760
1761         master = container_of(dev, struct spi_master, dev);
1762         kfree(master);
1763 }
1764
1765 static struct class spi_master_class = {
1766         .name           = "spi_master",
1767         .owner          = THIS_MODULE,
1768         .dev_release    = spi_master_release,
1769         .dev_groups     = spi_master_groups,
1770 };
1771
1772
1773 /**
1774  * spi_alloc_master - allocate SPI master controller
1775  * @dev: the controller, possibly using the platform_bus
1776  * @size: how much zeroed driver-private data to allocate; the pointer to this
1777  *      memory is in the driver_data field of the returned device,
1778  *      accessible with spi_master_get_devdata().
1779  * Context: can sleep
1780  *
1781  * This call is used only by SPI master controller drivers, which are the
1782  * only ones directly touching chip registers.  It's how they allocate
1783  * an spi_master structure, prior to calling spi_register_master().
1784  *
1785  * This must be called from context that can sleep.
1786  *
1787  * The caller is responsible for assigning the bus number and initializing
1788  * the master's methods before calling spi_register_master(); and (after errors
1789  * adding the device) calling spi_master_put() to prevent a memory leak.
1790  *
1791  * Return: the SPI master structure on success, else NULL.
1792  */
1793 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1794 {
1795         struct spi_master       *master;
1796
1797         if (!dev)
1798                 return NULL;
1799
1800         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1801         if (!master)
1802                 return NULL;
1803
1804         device_initialize(&master->dev);
1805         master->bus_num = -1;
1806         master->num_chipselect = 1;
1807         master->dev.class = &spi_master_class;
1808         master->dev.parent = dev;
1809         pm_suspend_ignore_children(&master->dev, true);
1810         spi_master_set_devdata(master, &master[1]);
1811
1812         return master;
1813 }
1814 EXPORT_SYMBOL_GPL(spi_alloc_master);
1815
1816 #ifdef CONFIG_OF
1817 static int of_spi_register_master(struct spi_master *master)
1818 {
1819         int nb, i, *cs;
1820         struct device_node *np = master->dev.of_node;
1821
1822         if (!np)
1823                 return 0;
1824
1825         nb = of_gpio_named_count(np, "cs-gpios");
1826         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1827
1828         /* Return error only for an incorrectly formed cs-gpios property */
1829         if (nb == 0 || nb == -ENOENT)
1830                 return 0;
1831         else if (nb < 0)
1832                 return nb;
1833
1834         cs = devm_kzalloc(&master->dev,
1835                           sizeof(int) * master->num_chipselect,
1836                           GFP_KERNEL);
1837         master->cs_gpios = cs;
1838
1839         if (!master->cs_gpios)
1840                 return -ENOMEM;
1841
1842         for (i = 0; i < master->num_chipselect; i++)
1843                 cs[i] = -ENOENT;
1844
1845         for (i = 0; i < nb; i++)
1846                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1847
1848         return 0;
1849 }
1850 #else
1851 static int of_spi_register_master(struct spi_master *master)
1852 {
1853         return 0;
1854 }
1855 #endif
1856
1857 /**
1858  * spi_register_master - register SPI master controller
1859  * @master: initialized master, originally from spi_alloc_master()
1860  * Context: can sleep
1861  *
1862  * SPI master controllers connect to their drivers using some non-SPI bus,
1863  * such as the platform bus.  The final stage of probe() in that code
1864  * includes calling spi_register_master() to hook up to this SPI bus glue.
1865  *
1866  * SPI controllers use board specific (often SOC specific) bus numbers,
1867  * and board-specific addressing for SPI devices combines those numbers
1868  * with chip select numbers.  Since SPI does not directly support dynamic
1869  * device identification, boards need configuration tables telling which
1870  * chip is at which address.
1871  *
1872  * This must be called from context that can sleep.  It returns zero on
1873  * success, else a negative error code (dropping the master's refcount).
1874  * After a successful return, the caller is responsible for calling
1875  * spi_unregister_master().
1876  *
1877  * Return: zero on success, else a negative error code.
1878  */
1879 int spi_register_master(struct spi_master *master)
1880 {
1881         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1882         struct device           *dev = master->dev.parent;
1883         struct boardinfo        *bi;
1884         int                     status = -ENODEV;
1885         int                     dynamic = 0;
1886
1887         if (!dev)
1888                 return -ENODEV;
1889
1890         status = of_spi_register_master(master);
1891         if (status)
1892                 return status;
1893
1894         /* even if it's just one always-selected device, there must
1895          * be at least one chipselect
1896          */
1897         if (master->num_chipselect == 0)
1898                 return -EINVAL;
1899
1900         if ((master->bus_num < 0) && master->dev.of_node)
1901                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1902
1903         /* convention:  dynamically assigned bus IDs count down from the max */
1904         if (master->bus_num < 0) {
1905                 /* FIXME switch to an IDR based scheme, something like
1906                  * I2C now uses, so we can't run out of "dynamic" IDs
1907                  */
1908                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1909                 dynamic = 1;
1910         }
1911
1912         INIT_LIST_HEAD(&master->queue);
1913         spin_lock_init(&master->queue_lock);
1914         spin_lock_init(&master->bus_lock_spinlock);
1915         mutex_init(&master->bus_lock_mutex);
1916         mutex_init(&master->io_mutex);
1917         master->bus_lock_flag = 0;
1918         init_completion(&master->xfer_completion);
1919         if (!master->max_dma_len)
1920                 master->max_dma_len = INT_MAX;
1921
1922         /* register the device, then userspace will see it.
1923          * registration fails if the bus ID is in use.
1924          */
1925         dev_set_name(&master->dev, "spi%u", master->bus_num);
1926         status = device_add(&master->dev);
1927         if (status < 0)
1928                 goto done;
1929         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1930                         dynamic ? " (dynamic)" : "");
1931
1932         /* If we're using a queued driver, start the queue */
1933         if (master->transfer)
1934                 dev_info(dev, "master is unqueued, this is deprecated\n");
1935         else {
1936                 status = spi_master_initialize_queue(master);
1937                 if (status) {
1938                         device_del(&master->dev);
1939                         goto done;
1940                 }
1941         }
1942         /* add statistics */
1943         spin_lock_init(&master->statistics.lock);
1944
1945         mutex_lock(&board_lock);
1946         list_add_tail(&master->list, &spi_master_list);
1947         list_for_each_entry(bi, &board_list, list)
1948                 spi_match_master_to_boardinfo(master, &bi->board_info);
1949         mutex_unlock(&board_lock);
1950
1951         /* Register devices from the device tree and ACPI */
1952         of_register_spi_devices(master);
1953         acpi_register_spi_devices(master);
1954 done:
1955         return status;
1956 }
1957 EXPORT_SYMBOL_GPL(spi_register_master);
1958
1959 static void devm_spi_unregister(struct device *dev, void *res)
1960 {
1961         spi_unregister_master(*(struct spi_master **)res);
1962 }
1963
1964 /**
1965  * dev_spi_register_master - register managed SPI master controller
1966  * @dev:    device managing SPI master
1967  * @master: initialized master, originally from spi_alloc_master()
1968  * Context: can sleep
1969  *
1970  * Register a SPI device as with spi_register_master() which will
1971  * automatically be unregister
1972  *
1973  * Return: zero on success, else a negative error code.
1974  */
1975 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1976 {
1977         struct spi_master **ptr;
1978         int ret;
1979
1980         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1981         if (!ptr)
1982                 return -ENOMEM;
1983
1984         ret = spi_register_master(master);
1985         if (!ret) {
1986                 *ptr = master;
1987                 devres_add(dev, ptr);
1988         } else {
1989                 devres_free(ptr);
1990         }
1991
1992         return ret;
1993 }
1994 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1995
1996 static int __unregister(struct device *dev, void *null)
1997 {
1998         spi_unregister_device(to_spi_device(dev));
1999         return 0;
2000 }
2001
2002 /**
2003  * spi_unregister_master - unregister SPI master controller
2004  * @master: the master being unregistered
2005  * Context: can sleep
2006  *
2007  * This call is used only by SPI master controller drivers, which are the
2008  * only ones directly touching chip registers.
2009  *
2010  * This must be called from context that can sleep.
2011  */
2012 void spi_unregister_master(struct spi_master *master)
2013 {
2014         int dummy;
2015
2016         if (master->queued) {
2017                 if (spi_destroy_queue(master))
2018                         dev_err(&master->dev, "queue remove failed\n");
2019         }
2020
2021         mutex_lock(&board_lock);
2022         list_del(&master->list);
2023         mutex_unlock(&board_lock);
2024
2025         dummy = device_for_each_child(&master->dev, NULL, __unregister);
2026         device_unregister(&master->dev);
2027 }
2028 EXPORT_SYMBOL_GPL(spi_unregister_master);
2029
2030 int spi_master_suspend(struct spi_master *master)
2031 {
2032         int ret;
2033
2034         /* Basically no-ops for non-queued masters */
2035         if (!master->queued)
2036                 return 0;
2037
2038         ret = spi_stop_queue(master);
2039         if (ret)
2040                 dev_err(&master->dev, "queue stop failed\n");
2041
2042         return ret;
2043 }
2044 EXPORT_SYMBOL_GPL(spi_master_suspend);
2045
2046 int spi_master_resume(struct spi_master *master)
2047 {
2048         int ret;
2049
2050         if (!master->queued)
2051                 return 0;
2052
2053         ret = spi_start_queue(master);
2054         if (ret)
2055                 dev_err(&master->dev, "queue restart failed\n");
2056
2057         return ret;
2058 }
2059 EXPORT_SYMBOL_GPL(spi_master_resume);
2060
2061 static int __spi_master_match(struct device *dev, const void *data)
2062 {
2063         struct spi_master *m;
2064         const u16 *bus_num = data;
2065
2066         m = container_of(dev, struct spi_master, dev);
2067         return m->bus_num == *bus_num;
2068 }
2069
2070 /**
2071  * spi_busnum_to_master - look up master associated with bus_num
2072  * @bus_num: the master's bus number
2073  * Context: can sleep
2074  *
2075  * This call may be used with devices that are registered after
2076  * arch init time.  It returns a refcounted pointer to the relevant
2077  * spi_master (which the caller must release), or NULL if there is
2078  * no such master registered.
2079  *
2080  * Return: the SPI master structure on success, else NULL.
2081  */
2082 struct spi_master *spi_busnum_to_master(u16 bus_num)
2083 {
2084         struct device           *dev;
2085         struct spi_master       *master = NULL;
2086
2087         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2088                                 __spi_master_match);
2089         if (dev)
2090                 master = container_of(dev, struct spi_master, dev);
2091         /* reference got in class_find_device */
2092         return master;
2093 }
2094 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2095
2096 /*-------------------------------------------------------------------------*/
2097
2098 /* Core methods for SPI resource management */
2099
2100 /**
2101  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2102  *                 during the processing of a spi_message while using
2103  *                 spi_transfer_one
2104  * @spi:     the spi device for which we allocate memory
2105  * @release: the release code to execute for this resource
2106  * @size:    size to alloc and return
2107  * @gfp:     GFP allocation flags
2108  *
2109  * Return: the pointer to the allocated data
2110  *
2111  * This may get enhanced in the future to allocate from a memory pool
2112  * of the @spi_device or @spi_master to avoid repeated allocations.
2113  */
2114 void *spi_res_alloc(struct spi_device *spi,
2115                     spi_res_release_t release,
2116                     size_t size, gfp_t gfp)
2117 {
2118         struct spi_res *sres;
2119
2120         sres = kzalloc(sizeof(*sres) + size, gfp);
2121         if (!sres)
2122                 return NULL;
2123
2124         INIT_LIST_HEAD(&sres->entry);
2125         sres->release = release;
2126
2127         return sres->data;
2128 }
2129 EXPORT_SYMBOL_GPL(spi_res_alloc);
2130
2131 /**
2132  * spi_res_free - free an spi resource
2133  * @res: pointer to the custom data of a resource
2134  *
2135  */
2136 void spi_res_free(void *res)
2137 {
2138         struct spi_res *sres = container_of(res, struct spi_res, data);
2139
2140         if (!res)
2141                 return;
2142
2143         WARN_ON(!list_empty(&sres->entry));
2144         kfree(sres);
2145 }
2146 EXPORT_SYMBOL_GPL(spi_res_free);
2147
2148 /**
2149  * spi_res_add - add a spi_res to the spi_message
2150  * @message: the spi message
2151  * @res:     the spi_resource
2152  */
2153 void spi_res_add(struct spi_message *message, void *res)
2154 {
2155         struct spi_res *sres = container_of(res, struct spi_res, data);
2156
2157         WARN_ON(!list_empty(&sres->entry));
2158         list_add_tail(&sres->entry, &message->resources);
2159 }
2160 EXPORT_SYMBOL_GPL(spi_res_add);
2161
2162 /**
2163  * spi_res_release - release all spi resources for this message
2164  * @master:  the @spi_master
2165  * @message: the @spi_message
2166  */
2167 void spi_res_release(struct spi_master *master,
2168                      struct spi_message *message)
2169 {
2170         struct spi_res *res;
2171
2172         while (!list_empty(&message->resources)) {
2173                 res = list_last_entry(&message->resources,
2174                                       struct spi_res, entry);
2175
2176                 if (res->release)
2177                         res->release(master, message, res->data);
2178
2179                 list_del(&res->entry);
2180
2181                 kfree(res);
2182         }
2183 }
2184 EXPORT_SYMBOL_GPL(spi_res_release);
2185
2186 /*-------------------------------------------------------------------------*/
2187
2188 /* Core methods for spi_message alterations */
2189
2190 static void __spi_replace_transfers_release(struct spi_master *master,
2191                                             struct spi_message *msg,
2192                                             void *res)
2193 {
2194         struct spi_replaced_transfers *rxfer = res;
2195         size_t i;
2196
2197         /* call extra callback if requested */
2198         if (rxfer->release)
2199                 rxfer->release(master, msg, res);
2200
2201         /* insert replaced transfers back into the message */
2202         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2203
2204         /* remove the formerly inserted entries */
2205         for (i = 0; i < rxfer->inserted; i++)
2206                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2207 }
2208
2209 /**
2210  * spi_replace_transfers - replace transfers with several transfers
2211  *                         and register change with spi_message.resources
2212  * @msg:           the spi_message we work upon
2213  * @xfer_first:    the first spi_transfer we want to replace
2214  * @remove:        number of transfers to remove
2215  * @insert:        the number of transfers we want to insert instead
2216  * @release:       extra release code necessary in some circumstances
2217  * @extradatasize: extra data to allocate (with alignment guarantees
2218  *                 of struct @spi_transfer)
2219  * @gfp:           gfp flags
2220  *
2221  * Returns: pointer to @spi_replaced_transfers,
2222  *          PTR_ERR(...) in case of errors.
2223  */
2224 struct spi_replaced_transfers *spi_replace_transfers(
2225         struct spi_message *msg,
2226         struct spi_transfer *xfer_first,
2227         size_t remove,
2228         size_t insert,
2229         spi_replaced_release_t release,
2230         size_t extradatasize,
2231         gfp_t gfp)
2232 {
2233         struct spi_replaced_transfers *rxfer;
2234         struct spi_transfer *xfer;
2235         size_t i;
2236
2237         /* allocate the structure using spi_res */
2238         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2239                               insert * sizeof(struct spi_transfer)
2240                               + sizeof(struct spi_replaced_transfers)
2241                               + extradatasize,
2242                               gfp);
2243         if (!rxfer)
2244                 return ERR_PTR(-ENOMEM);
2245
2246         /* the release code to invoke before running the generic release */
2247         rxfer->release = release;
2248
2249         /* assign extradata */
2250         if (extradatasize)
2251                 rxfer->extradata =
2252                         &rxfer->inserted_transfers[insert];
2253
2254         /* init the replaced_transfers list */
2255         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2256
2257         /* assign the list_entry after which we should reinsert
2258          * the @replaced_transfers - it may be spi_message.messages!
2259          */
2260         rxfer->replaced_after = xfer_first->transfer_list.prev;
2261
2262         /* remove the requested number of transfers */
2263         for (i = 0; i < remove; i++) {
2264                 /* if the entry after replaced_after it is msg->transfers
2265                  * then we have been requested to remove more transfers
2266                  * than are in the list
2267                  */
2268                 if (rxfer->replaced_after->next == &msg->transfers) {
2269                         dev_err(&msg->spi->dev,
2270                                 "requested to remove more spi_transfers than are available\n");
2271                         /* insert replaced transfers back into the message */
2272                         list_splice(&rxfer->replaced_transfers,
2273                                     rxfer->replaced_after);
2274
2275                         /* free the spi_replace_transfer structure */
2276                         spi_res_free(rxfer);
2277
2278                         /* and return with an error */
2279                         return ERR_PTR(-EINVAL);
2280                 }
2281
2282                 /* remove the entry after replaced_after from list of
2283                  * transfers and add it to list of replaced_transfers
2284                  */
2285                 list_move_tail(rxfer->replaced_after->next,
2286                                &rxfer->replaced_transfers);
2287         }
2288
2289         /* create copy of the given xfer with identical settings
2290          * based on the first transfer to get removed
2291          */
2292         for (i = 0; i < insert; i++) {
2293                 /* we need to run in reverse order */
2294                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2295
2296                 /* copy all spi_transfer data */
2297                 memcpy(xfer, xfer_first, sizeof(*xfer));
2298
2299                 /* add to list */
2300                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2301
2302                 /* clear cs_change and delay_usecs for all but the last */
2303                 if (i) {
2304                         xfer->cs_change = false;
2305                         xfer->delay_usecs = 0;
2306                 }
2307         }
2308
2309         /* set up inserted */
2310         rxfer->inserted = insert;
2311
2312         /* and register it with spi_res/spi_message */
2313         spi_res_add(msg, rxfer);
2314
2315         return rxfer;
2316 }
2317 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2318
2319 static int __spi_split_transfer_maxsize(struct spi_master *master,
2320                                         struct spi_message *msg,
2321                                         struct spi_transfer **xferp,
2322                                         size_t maxsize,
2323                                         gfp_t gfp)
2324 {
2325         struct spi_transfer *xfer = *xferp, *xfers;
2326         struct spi_replaced_transfers *srt;
2327         size_t offset;
2328         size_t count, i;
2329
2330         /* warn once about this fact that we are splitting a transfer */
2331         dev_warn_once(&msg->spi->dev,
2332                       "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2333                       xfer->len, maxsize);
2334
2335         /* calculate how many we have to replace */
2336         count = DIV_ROUND_UP(xfer->len, maxsize);
2337
2338         /* create replacement */
2339         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2340         if (IS_ERR(srt))
2341                 return PTR_ERR(srt);
2342         xfers = srt->inserted_transfers;
2343
2344         /* now handle each of those newly inserted spi_transfers
2345          * note that the replacements spi_transfers all are preset
2346          * to the same values as *xferp, so tx_buf, rx_buf and len
2347          * are all identical (as well as most others)
2348          * so we just have to fix up len and the pointers.
2349          *
2350          * this also includes support for the depreciated
2351          * spi_message.is_dma_mapped interface
2352          */
2353
2354         /* the first transfer just needs the length modified, so we
2355          * run it outside the loop
2356          */
2357         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2358
2359         /* all the others need rx_buf/tx_buf also set */
2360         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2361                 /* update rx_buf, tx_buf and dma */
2362                 if (xfers[i].rx_buf)
2363                         xfers[i].rx_buf += offset;
2364                 if (xfers[i].rx_dma)
2365                         xfers[i].rx_dma += offset;
2366                 if (xfers[i].tx_buf)
2367                         xfers[i].tx_buf += offset;
2368                 if (xfers[i].tx_dma)
2369                         xfers[i].tx_dma += offset;
2370
2371                 /* update length */
2372                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2373         }
2374
2375         /* we set up xferp to the last entry we have inserted,
2376          * so that we skip those already split transfers
2377          */
2378         *xferp = &xfers[count - 1];
2379
2380         /* increment statistics counters */
2381         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2382                                        transfers_split_maxsize);
2383         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2384                                        transfers_split_maxsize);
2385
2386         return 0;
2387 }
2388
2389 /**
2390  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2391  *                              when an individual transfer exceeds a
2392  *                              certain size
2393  * @master:    the @spi_master for this transfer
2394  * @msg:   the @spi_message to transform
2395  * @maxsize:  the maximum when to apply this
2396  * @gfp: GFP allocation flags
2397  *
2398  * Return: status of transformation
2399  */
2400 int spi_split_transfers_maxsize(struct spi_master *master,
2401                                 struct spi_message *msg,
2402                                 size_t maxsize,
2403                                 gfp_t gfp)
2404 {
2405         struct spi_transfer *xfer;
2406         int ret;
2407
2408         /* iterate over the transfer_list,
2409          * but note that xfer is advanced to the last transfer inserted
2410          * to avoid checking sizes again unnecessarily (also xfer does
2411          * potentiall belong to a different list by the time the
2412          * replacement has happened
2413          */
2414         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2415                 if (xfer->len > maxsize) {
2416                         ret = __spi_split_transfer_maxsize(
2417                                 master, msg, &xfer, maxsize, gfp);
2418                         if (ret)
2419                                 return ret;
2420                 }
2421         }
2422
2423         return 0;
2424 }
2425 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2426
2427 /*-------------------------------------------------------------------------*/
2428
2429 /* Core methods for SPI master protocol drivers.  Some of the
2430  * other core methods are currently defined as inline functions.
2431  */
2432
2433 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2434 {
2435         if (master->bits_per_word_mask) {
2436                 /* Only 32 bits fit in the mask */
2437                 if (bits_per_word > 32)
2438                         return -EINVAL;
2439                 if (!(master->bits_per_word_mask &
2440                                 SPI_BPW_MASK(bits_per_word)))
2441                         return -EINVAL;
2442         }
2443
2444         return 0;
2445 }
2446
2447 /**
2448  * spi_setup - setup SPI mode and clock rate
2449  * @spi: the device whose settings are being modified
2450  * Context: can sleep, and no requests are queued to the device
2451  *
2452  * SPI protocol drivers may need to update the transfer mode if the
2453  * device doesn't work with its default.  They may likewise need
2454  * to update clock rates or word sizes from initial values.  This function
2455  * changes those settings, and must be called from a context that can sleep.
2456  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2457  * effect the next time the device is selected and data is transferred to
2458  * or from it.  When this function returns, the spi device is deselected.
2459  *
2460  * Note that this call will fail if the protocol driver specifies an option
2461  * that the underlying controller or its driver does not support.  For
2462  * example, not all hardware supports wire transfers using nine bit words,
2463  * LSB-first wire encoding, or active-high chipselects.
2464  *
2465  * Return: zero on success, else a negative error code.
2466  */
2467 int spi_setup(struct spi_device *spi)
2468 {
2469         unsigned        bad_bits, ugly_bits;
2470         int             status;
2471
2472         /* check mode to prevent that DUAL and QUAD set at the same time
2473          */
2474         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2475                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2476                 dev_err(&spi->dev,
2477                 "setup: can not select dual and quad at the same time\n");
2478                 return -EINVAL;
2479         }
2480         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2481          */
2482         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2483                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2484                 return -EINVAL;
2485         /* help drivers fail *cleanly* when they need options
2486          * that aren't supported with their current master
2487          */
2488         bad_bits = spi->mode & ~spi->master->mode_bits;
2489         ugly_bits = bad_bits &
2490                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2491         if (ugly_bits) {
2492                 dev_warn(&spi->dev,
2493                          "setup: ignoring unsupported mode bits %x\n",
2494                          ugly_bits);
2495                 spi->mode &= ~ugly_bits;
2496                 bad_bits &= ~ugly_bits;
2497         }
2498         if (bad_bits) {
2499                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2500                         bad_bits);
2501                 return -EINVAL;
2502         }
2503
2504         if (!spi->bits_per_word)
2505                 spi->bits_per_word = 8;
2506
2507         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2508         if (status)
2509                 return status;
2510
2511         if (!spi->max_speed_hz)
2512                 spi->max_speed_hz = spi->master->max_speed_hz;
2513
2514         if (spi->master->setup)
2515                 status = spi->master->setup(spi);
2516
2517         spi_set_cs(spi, false);
2518
2519         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2520                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2521                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2522                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2523                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2524                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2525                         spi->bits_per_word, spi->max_speed_hz,
2526                         status);
2527
2528         return status;
2529 }
2530 EXPORT_SYMBOL_GPL(spi_setup);
2531
2532 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2533 {
2534         struct spi_master *master = spi->master;
2535         struct spi_transfer *xfer;
2536         int w_size;
2537
2538         if (list_empty(&message->transfers))
2539                 return -EINVAL;
2540
2541         /* Half-duplex links include original MicroWire, and ones with
2542          * only one data pin like SPI_3WIRE (switches direction) or where
2543          * either MOSI or MISO is missing.  They can also be caused by
2544          * software limitations.
2545          */
2546         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2547                         || (spi->mode & SPI_3WIRE)) {
2548                 unsigned flags = master->flags;
2549
2550                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2551                         if (xfer->rx_buf && xfer->tx_buf)
2552                                 return -EINVAL;
2553                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2554                                 return -EINVAL;
2555                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2556                                 return -EINVAL;
2557                 }
2558         }
2559
2560         /**
2561          * Set transfer bits_per_word and max speed as spi device default if
2562          * it is not set for this transfer.
2563          * Set transfer tx_nbits and rx_nbits as single transfer default
2564          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2565          */
2566         message->frame_length = 0;
2567         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2568                 message->frame_length += xfer->len;
2569                 if (!xfer->bits_per_word)
2570                         xfer->bits_per_word = spi->bits_per_word;
2571
2572                 if (!xfer->speed_hz)
2573                         xfer->speed_hz = spi->max_speed_hz;
2574                 if (!xfer->speed_hz)
2575                         xfer->speed_hz = master->max_speed_hz;
2576
2577                 if (master->max_speed_hz &&
2578                     xfer->speed_hz > master->max_speed_hz)
2579                         xfer->speed_hz = master->max_speed_hz;
2580
2581                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2582                         return -EINVAL;
2583
2584                 /*
2585                  * SPI transfer length should be multiple of SPI word size
2586                  * where SPI word size should be power-of-two multiple
2587                  */
2588                 if (xfer->bits_per_word <= 8)
2589                         w_size = 1;
2590                 else if (xfer->bits_per_word <= 16)
2591                         w_size = 2;
2592                 else
2593                         w_size = 4;
2594
2595                 /* No partial transfers accepted */
2596                 if (xfer->len % w_size)
2597                         return -EINVAL;
2598
2599                 if (xfer->speed_hz && master->min_speed_hz &&
2600                     xfer->speed_hz < master->min_speed_hz)
2601                         return -EINVAL;
2602
2603                 if (xfer->tx_buf && !xfer->tx_nbits)
2604                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2605                 if (xfer->rx_buf && !xfer->rx_nbits)
2606                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2607                 /* check transfer tx/rx_nbits:
2608                  * 1. check the value matches one of single, dual and quad
2609                  * 2. check tx/rx_nbits match the mode in spi_device
2610                  */
2611                 if (xfer->tx_buf) {
2612                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2613                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2614                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2615                                 return -EINVAL;
2616                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2617                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2618                                 return -EINVAL;
2619                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2620                                 !(spi->mode & SPI_TX_QUAD))
2621                                 return -EINVAL;
2622                 }
2623                 /* check transfer rx_nbits */
2624                 if (xfer->rx_buf) {
2625                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2626                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2627                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2628                                 return -EINVAL;
2629                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2630                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2631                                 return -EINVAL;
2632                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2633                                 !(spi->mode & SPI_RX_QUAD))
2634                                 return -EINVAL;
2635                 }
2636         }
2637
2638         message->status = -EINPROGRESS;
2639
2640         return 0;
2641 }
2642
2643 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2644 {
2645         struct spi_master *master = spi->master;
2646
2647         message->spi = spi;
2648
2649         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2650         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2651
2652         trace_spi_message_submit(message);
2653
2654         return master->transfer(spi, message);
2655 }
2656
2657 /**
2658  * spi_async - asynchronous SPI transfer
2659  * @spi: device with which data will be exchanged
2660  * @message: describes the data transfers, including completion callback
2661  * Context: any (irqs may be blocked, etc)
2662  *
2663  * This call may be used in_irq and other contexts which can't sleep,
2664  * as well as from task contexts which can sleep.
2665  *
2666  * The completion callback is invoked in a context which can't sleep.
2667  * Before that invocation, the value of message->status is undefined.
2668  * When the callback is issued, message->status holds either zero (to
2669  * indicate complete success) or a negative error code.  After that
2670  * callback returns, the driver which issued the transfer request may
2671  * deallocate the associated memory; it's no longer in use by any SPI
2672  * core or controller driver code.
2673  *
2674  * Note that although all messages to a spi_device are handled in
2675  * FIFO order, messages may go to different devices in other orders.
2676  * Some device might be higher priority, or have various "hard" access
2677  * time requirements, for example.
2678  *
2679  * On detection of any fault during the transfer, processing of
2680  * the entire message is aborted, and the device is deselected.
2681  * Until returning from the associated message completion callback,
2682  * no other spi_message queued to that device will be processed.
2683  * (This rule applies equally to all the synchronous transfer calls,
2684  * which are wrappers around this core asynchronous primitive.)
2685  *
2686  * Return: zero on success, else a negative error code.
2687  */
2688 int spi_async(struct spi_device *spi, struct spi_message *message)
2689 {
2690         struct spi_master *master = spi->master;
2691         int ret;
2692         unsigned long flags;
2693
2694         ret = __spi_validate(spi, message);
2695         if (ret != 0)
2696                 return ret;
2697
2698         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2699
2700         if (master->bus_lock_flag)
2701                 ret = -EBUSY;
2702         else
2703                 ret = __spi_async(spi, message);
2704
2705         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2706
2707         return ret;
2708 }
2709 EXPORT_SYMBOL_GPL(spi_async);
2710
2711 /**
2712  * spi_async_locked - version of spi_async with exclusive bus usage
2713  * @spi: device with which data will be exchanged
2714  * @message: describes the data transfers, including completion callback
2715  * Context: any (irqs may be blocked, etc)
2716  *
2717  * This call may be used in_irq and other contexts which can't sleep,
2718  * as well as from task contexts which can sleep.
2719  *
2720  * The completion callback is invoked in a context which can't sleep.
2721  * Before that invocation, the value of message->status is undefined.
2722  * When the callback is issued, message->status holds either zero (to
2723  * indicate complete success) or a negative error code.  After that
2724  * callback returns, the driver which issued the transfer request may
2725  * deallocate the associated memory; it's no longer in use by any SPI
2726  * core or controller driver code.
2727  *
2728  * Note that although all messages to a spi_device are handled in
2729  * FIFO order, messages may go to different devices in other orders.
2730  * Some device might be higher priority, or have various "hard" access
2731  * time requirements, for example.
2732  *
2733  * On detection of any fault during the transfer, processing of
2734  * the entire message is aborted, and the device is deselected.
2735  * Until returning from the associated message completion callback,
2736  * no other spi_message queued to that device will be processed.
2737  * (This rule applies equally to all the synchronous transfer calls,
2738  * which are wrappers around this core asynchronous primitive.)
2739  *
2740  * Return: zero on success, else a negative error code.
2741  */
2742 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2743 {
2744         struct spi_master *master = spi->master;
2745         int ret;
2746         unsigned long flags;
2747
2748         ret = __spi_validate(spi, message);
2749         if (ret != 0)
2750                 return ret;
2751
2752         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2753
2754         ret = __spi_async(spi, message);
2755
2756         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2757
2758         return ret;
2759
2760 }
2761 EXPORT_SYMBOL_GPL(spi_async_locked);
2762
2763
2764 int spi_flash_read(struct spi_device *spi,
2765                    struct spi_flash_read_message *msg)
2766
2767 {
2768         struct spi_master *master = spi->master;
2769         struct device *rx_dev = NULL;
2770         int ret;
2771
2772         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2773              msg->addr_nbits == SPI_NBITS_DUAL) &&
2774             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2775                 return -EINVAL;
2776         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2777              msg->addr_nbits == SPI_NBITS_QUAD) &&
2778             !(spi->mode & SPI_TX_QUAD))
2779                 return -EINVAL;
2780         if (msg->data_nbits == SPI_NBITS_DUAL &&
2781             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2782                 return -EINVAL;
2783         if (msg->data_nbits == SPI_NBITS_QUAD &&
2784             !(spi->mode &  SPI_RX_QUAD))
2785                 return -EINVAL;
2786
2787         if (master->auto_runtime_pm) {
2788                 ret = pm_runtime_get_sync(master->dev.parent);
2789                 if (ret < 0) {
2790                         dev_err(&master->dev, "Failed to power device: %d\n",
2791                                 ret);
2792                         return ret;
2793                 }
2794         }
2795
2796         mutex_lock(&master->bus_lock_mutex);
2797         mutex_lock(&master->io_mutex);
2798         if (master->dma_rx) {
2799                 rx_dev = master->dma_rx->device->dev;
2800                 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2801                                   msg->buf, msg->len,
2802                                   DMA_FROM_DEVICE);
2803                 if (!ret)
2804                         msg->cur_msg_mapped = true;
2805         }
2806         ret = master->spi_flash_read(spi, msg);
2807         if (msg->cur_msg_mapped)
2808                 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2809                               DMA_FROM_DEVICE);
2810         mutex_unlock(&master->io_mutex);
2811         mutex_unlock(&master->bus_lock_mutex);
2812
2813         if (master->auto_runtime_pm)
2814                 pm_runtime_put(master->dev.parent);
2815
2816         return ret;
2817 }
2818 EXPORT_SYMBOL_GPL(spi_flash_read);
2819
2820 /*-------------------------------------------------------------------------*/
2821
2822 /* Utility methods for SPI master protocol drivers, layered on
2823  * top of the core.  Some other utility methods are defined as
2824  * inline functions.
2825  */
2826
2827 static void spi_complete(void *arg)
2828 {
2829         complete(arg);
2830 }
2831
2832 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2833 {
2834         DECLARE_COMPLETION_ONSTACK(done);
2835         int status;
2836         struct spi_master *master = spi->master;
2837         unsigned long flags;
2838
2839         status = __spi_validate(spi, message);
2840         if (status != 0)
2841                 return status;
2842
2843         message->complete = spi_complete;
2844         message->context = &done;
2845         message->spi = spi;
2846
2847         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2848         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2849
2850         /* If we're not using the legacy transfer method then we will
2851          * try to transfer in the calling context so special case.
2852          * This code would be less tricky if we could remove the
2853          * support for driver implemented message queues.
2854          */
2855         if (master->transfer == spi_queued_transfer) {
2856                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2857
2858                 trace_spi_message_submit(message);
2859
2860                 status = __spi_queued_transfer(spi, message, false);
2861
2862                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2863         } else {
2864                 status = spi_async_locked(spi, message);
2865         }
2866
2867         if (status == 0) {
2868                 /* Push out the messages in the calling context if we
2869                  * can.
2870                  */
2871                 if (master->transfer == spi_queued_transfer) {
2872                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2873                                                        spi_sync_immediate);
2874                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2875                                                        spi_sync_immediate);
2876                         __spi_pump_messages(master, false);
2877                 }
2878
2879                 wait_for_completion(&done);
2880                 status = message->status;
2881         }
2882         message->context = NULL;
2883         return status;
2884 }
2885
2886 /**
2887  * spi_sync - blocking/synchronous SPI data transfers
2888  * @spi: device with which data will be exchanged
2889  * @message: describes the data transfers
2890  * Context: can sleep
2891  *
2892  * This call may only be used from a context that may sleep.  The sleep
2893  * is non-interruptible, and has no timeout.  Low-overhead controller
2894  * drivers may DMA directly into and out of the message buffers.
2895  *
2896  * Note that the SPI device's chip select is active during the message,
2897  * and then is normally disabled between messages.  Drivers for some
2898  * frequently-used devices may want to minimize costs of selecting a chip,
2899  * by leaving it selected in anticipation that the next message will go
2900  * to the same chip.  (That may increase power usage.)
2901  *
2902  * Also, the caller is guaranteeing that the memory associated with the
2903  * message will not be freed before this call returns.
2904  *
2905  * Return: zero on success, else a negative error code.
2906  */
2907 int spi_sync(struct spi_device *spi, struct spi_message *message)
2908 {
2909         int ret;
2910
2911         mutex_lock(&spi->master->bus_lock_mutex);
2912         ret = __spi_sync(spi, message);
2913         mutex_unlock(&spi->master->bus_lock_mutex);
2914
2915         return ret;
2916 }
2917 EXPORT_SYMBOL_GPL(spi_sync);
2918
2919 /**
2920  * spi_sync_locked - version of spi_sync with exclusive bus usage
2921  * @spi: device with which data will be exchanged
2922  * @message: describes the data transfers
2923  * Context: can sleep
2924  *
2925  * This call may only be used from a context that may sleep.  The sleep
2926  * is non-interruptible, and has no timeout.  Low-overhead controller
2927  * drivers may DMA directly into and out of the message buffers.
2928  *
2929  * This call should be used by drivers that require exclusive access to the
2930  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2931  * be released by a spi_bus_unlock call when the exclusive access is over.
2932  *
2933  * Return: zero on success, else a negative error code.
2934  */
2935 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2936 {
2937         return __spi_sync(spi, message);
2938 }
2939 EXPORT_SYMBOL_GPL(spi_sync_locked);
2940
2941 /**
2942  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2943  * @master: SPI bus master that should be locked for exclusive bus access
2944  * Context: can sleep
2945  *
2946  * This call may only be used from a context that may sleep.  The sleep
2947  * is non-interruptible, and has no timeout.
2948  *
2949  * This call should be used by drivers that require exclusive access to the
2950  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2951  * exclusive access is over. Data transfer must be done by spi_sync_locked
2952  * and spi_async_locked calls when the SPI bus lock is held.
2953  *
2954  * Return: always zero.
2955  */
2956 int spi_bus_lock(struct spi_master *master)
2957 {
2958         unsigned long flags;
2959
2960         mutex_lock(&master->bus_lock_mutex);
2961
2962         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2963         master->bus_lock_flag = 1;
2964         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2965
2966         /* mutex remains locked until spi_bus_unlock is called */
2967
2968         return 0;
2969 }
2970 EXPORT_SYMBOL_GPL(spi_bus_lock);
2971
2972 /**
2973  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2974  * @master: SPI bus master that was locked for exclusive bus access
2975  * Context: can sleep
2976  *
2977  * This call may only be used from a context that may sleep.  The sleep
2978  * is non-interruptible, and has no timeout.
2979  *
2980  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2981  * call.
2982  *
2983  * Return: always zero.
2984  */
2985 int spi_bus_unlock(struct spi_master *master)
2986 {
2987         master->bus_lock_flag = 0;
2988
2989         mutex_unlock(&master->bus_lock_mutex);
2990
2991         return 0;
2992 }
2993 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2994
2995 /* portable code must never pass more than 32 bytes */
2996 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2997
2998 static u8       *buf;
2999
3000 /**
3001  * spi_write_then_read - SPI synchronous write followed by read
3002  * @spi: device with which data will be exchanged
3003  * @txbuf: data to be written (need not be dma-safe)
3004  * @n_tx: size of txbuf, in bytes
3005  * @rxbuf: buffer into which data will be read (need not be dma-safe)
3006  * @n_rx: size of rxbuf, in bytes
3007  * Context: can sleep
3008  *
3009  * This performs a half duplex MicroWire style transaction with the
3010  * device, sending txbuf and then reading rxbuf.  The return value
3011  * is zero for success, else a negative errno status code.
3012  * This call may only be used from a context that may sleep.
3013  *
3014  * Parameters to this routine are always copied using a small buffer;
3015  * portable code should never use this for more than 32 bytes.
3016  * Performance-sensitive or bulk transfer code should instead use
3017  * spi_{async,sync}() calls with dma-safe buffers.
3018  *
3019  * Return: zero on success, else a negative error code.
3020  */
3021 int spi_write_then_read(struct spi_device *spi,
3022                 const void *txbuf, unsigned n_tx,
3023                 void *rxbuf, unsigned n_rx)
3024 {
3025         static DEFINE_MUTEX(lock);
3026
3027         int                     status;
3028         struct spi_message      message;
3029         struct spi_transfer     x[2];
3030         u8                      *local_buf;
3031
3032         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3033          * copying here, (as a pure convenience thing), but we can
3034          * keep heap costs out of the hot path unless someone else is
3035          * using the pre-allocated buffer or the transfer is too large.
3036          */
3037         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3038                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3039                                     GFP_KERNEL | GFP_DMA);
3040                 if (!local_buf)
3041                         return -ENOMEM;
3042         } else {
3043                 local_buf = buf;
3044         }
3045
3046         spi_message_init(&message);
3047         memset(x, 0, sizeof(x));
3048         if (n_tx) {
3049                 x[0].len = n_tx;
3050                 spi_message_add_tail(&x[0], &message);
3051         }
3052         if (n_rx) {
3053                 x[1].len = n_rx;
3054                 spi_message_add_tail(&x[1], &message);
3055         }
3056
3057         memcpy(local_buf, txbuf, n_tx);
3058         x[0].tx_buf = local_buf;
3059         x[1].rx_buf = local_buf + n_tx;
3060
3061         /* do the i/o */
3062         status = spi_sync(spi, &message);
3063         if (status == 0)
3064                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3065
3066         if (x[0].tx_buf == buf)
3067                 mutex_unlock(&lock);
3068         else
3069                 kfree(local_buf);
3070
3071         return status;
3072 }
3073 EXPORT_SYMBOL_GPL(spi_write_then_read);
3074
3075 /*-------------------------------------------------------------------------*/
3076
3077 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3078 static int __spi_of_device_match(struct device *dev, void *data)
3079 {
3080         return dev->of_node == data;
3081 }
3082
3083 /* must call put_device() when done with returned spi_device device */
3084 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3085 {
3086         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3087                                                 __spi_of_device_match);
3088         return dev ? to_spi_device(dev) : NULL;
3089 }
3090
3091 static int __spi_of_master_match(struct device *dev, const void *data)
3092 {
3093         return dev->of_node == data;
3094 }
3095
3096 /* the spi masters are not using spi_bus, so we find it with another way */
3097 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3098 {
3099         struct device *dev;
3100
3101         dev = class_find_device(&spi_master_class, NULL, node,
3102                                 __spi_of_master_match);
3103         if (!dev)
3104                 return NULL;
3105
3106         /* reference got in class_find_device */
3107         return container_of(dev, struct spi_master, dev);
3108 }
3109
3110 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3111                          void *arg)
3112 {
3113         struct of_reconfig_data *rd = arg;
3114         struct spi_master *master;
3115         struct spi_device *spi;
3116
3117         switch (of_reconfig_get_state_change(action, arg)) {
3118         case OF_RECONFIG_CHANGE_ADD:
3119                 master = of_find_spi_master_by_node(rd->dn->parent);
3120                 if (master == NULL)
3121                         return NOTIFY_OK;       /* not for us */
3122
3123                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3124                         put_device(&master->dev);
3125                         return NOTIFY_OK;
3126                 }
3127
3128                 spi = of_register_spi_device(master, rd->dn);
3129                 put_device(&master->dev);
3130
3131                 if (IS_ERR(spi)) {
3132                         pr_err("%s: failed to create for '%s'\n",
3133                                         __func__, rd->dn->full_name);
3134                         return notifier_from_errno(PTR_ERR(spi));
3135                 }
3136                 break;
3137
3138         case OF_RECONFIG_CHANGE_REMOVE:
3139                 /* already depopulated? */
3140                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3141                         return NOTIFY_OK;
3142
3143                 /* find our device by node */
3144                 spi = of_find_spi_device_by_node(rd->dn);
3145                 if (spi == NULL)
3146                         return NOTIFY_OK;       /* no? not meant for us */
3147
3148                 /* unregister takes one ref away */
3149                 spi_unregister_device(spi);
3150
3151                 /* and put the reference of the find */
3152                 put_device(&spi->dev);
3153                 break;
3154         }
3155
3156         return NOTIFY_OK;
3157 }
3158
3159 static struct notifier_block spi_of_notifier = {
3160         .notifier_call = of_spi_notify,
3161 };
3162 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3163 extern struct notifier_block spi_of_notifier;
3164 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3165
3166 #if IS_ENABLED(CONFIG_ACPI)
3167 static int spi_acpi_master_match(struct device *dev, const void *data)
3168 {
3169         return ACPI_COMPANION(dev->parent) == data;
3170 }
3171
3172 static int spi_acpi_device_match(struct device *dev, void *data)
3173 {
3174         return ACPI_COMPANION(dev) == data;
3175 }
3176
3177 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3178 {
3179         struct device *dev;
3180
3181         dev = class_find_device(&spi_master_class, NULL, adev,
3182                                 spi_acpi_master_match);
3183         if (!dev)
3184                 return NULL;
3185
3186         return container_of(dev, struct spi_master, dev);
3187 }
3188
3189 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3190 {
3191         struct device *dev;
3192
3193         dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3194
3195         return dev ? to_spi_device(dev) : NULL;
3196 }
3197
3198 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3199                            void *arg)
3200 {
3201         struct acpi_device *adev = arg;
3202         struct spi_master *master;
3203         struct spi_device *spi;
3204
3205         switch (value) {
3206         case ACPI_RECONFIG_DEVICE_ADD:
3207                 master = acpi_spi_find_master_by_adev(adev->parent);
3208                 if (!master)
3209                         break;
3210
3211                 acpi_register_spi_device(master, adev);
3212                 put_device(&master->dev);
3213                 break;
3214         case ACPI_RECONFIG_DEVICE_REMOVE:
3215                 if (!acpi_device_enumerated(adev))
3216                         break;
3217
3218                 spi = acpi_spi_find_device_by_adev(adev);
3219                 if (!spi)
3220                         break;
3221
3222                 spi_unregister_device(spi);
3223                 put_device(&spi->dev);
3224                 break;
3225         }
3226
3227         return NOTIFY_OK;
3228 }
3229
3230 static struct notifier_block spi_acpi_notifier = {
3231         .notifier_call = acpi_spi_notify,
3232 };
3233 #else
3234 extern struct notifier_block spi_acpi_notifier;
3235 #endif
3236
3237 static int __init spi_init(void)
3238 {
3239         int     status;
3240
3241         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3242         if (!buf) {
3243                 status = -ENOMEM;
3244                 goto err0;
3245         }
3246
3247         status = bus_register(&spi_bus_type);
3248         if (status < 0)
3249                 goto err1;
3250
3251         status = class_register(&spi_master_class);
3252         if (status < 0)
3253                 goto err2;
3254
3255         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3256                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3257         if (IS_ENABLED(CONFIG_ACPI))
3258                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3259
3260         return 0;
3261
3262 err2:
3263         bus_unregister(&spi_bus_type);
3264 err1:
3265         kfree(buf);
3266         buf = NULL;
3267 err0:
3268         return status;
3269 }
3270
3271 /* board_info is normally registered in arch_initcall(),
3272  * but even essential drivers wait till later
3273  *
3274  * REVISIT only boardinfo really needs static linking. the rest (device and
3275  * driver registration) _could_ be dynamically linked (modular) ... costs
3276  * include needing to have boardinfo data structures be much more public.
3277  */
3278 postcore_initcall(spi_init);
3279