Merge remote-tracking branch 'spi/topic/dma' into spi-next
[cascardo/linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = to_spi_device(dev);                    \
88         return spi_statistics_##field##_show(&spi->statistics, buf);    \
89 }                                                                       \
90 static struct device_attribute dev_attr_spi_device_##field = {          \
91         .attr = { .name = file, .mode = S_IRUGO },                      \
92         .show = spi_device_##field##_show,                              \
93 }
94
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
97                                             char *buf)                  \
98 {                                                                       \
99         unsigned long flags;                                            \
100         ssize_t len;                                                    \
101         spin_lock_irqsave(&stat->lock, flags);                          \
102         len = sprintf(buf, format_string, stat->field);                 \
103         spin_unlock_irqrestore(&stat->lock, flags);                     \
104         return len;                                                     \
105 }                                                                       \
106 SPI_STATISTICS_ATTRS(name, file)
107
108 #define SPI_STATISTICS_SHOW(field, format_string)                       \
109         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
110                                  field, format_string)
111
112 SPI_STATISTICS_SHOW(messages, "%lu");
113 SPI_STATISTICS_SHOW(transfers, "%lu");
114 SPI_STATISTICS_SHOW(errors, "%lu");
115 SPI_STATISTICS_SHOW(timedout, "%lu");
116
117 SPI_STATISTICS_SHOW(spi_sync, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
119 SPI_STATISTICS_SHOW(spi_async, "%lu");
120
121 SPI_STATISTICS_SHOW(bytes, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
124
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
126         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
127                                  "transfer_bytes_histo_" number,        \
128                                  transfer_bytes_histo[index],  "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
146
147 static struct attribute *spi_dev_attrs[] = {
148         &dev_attr_modalias.attr,
149         NULL,
150 };
151
152 static const struct attribute_group spi_dev_group = {
153         .attrs  = spi_dev_attrs,
154 };
155
156 static struct attribute *spi_device_statistics_attrs[] = {
157         &dev_attr_spi_device_messages.attr,
158         &dev_attr_spi_device_transfers.attr,
159         &dev_attr_spi_device_errors.attr,
160         &dev_attr_spi_device_timedout.attr,
161         &dev_attr_spi_device_spi_sync.attr,
162         &dev_attr_spi_device_spi_sync_immediate.attr,
163         &dev_attr_spi_device_spi_async.attr,
164         &dev_attr_spi_device_bytes.attr,
165         &dev_attr_spi_device_bytes_rx.attr,
166         &dev_attr_spi_device_bytes_tx.attr,
167         &dev_attr_spi_device_transfer_bytes_histo0.attr,
168         &dev_attr_spi_device_transfer_bytes_histo1.attr,
169         &dev_attr_spi_device_transfer_bytes_histo2.attr,
170         &dev_attr_spi_device_transfer_bytes_histo3.attr,
171         &dev_attr_spi_device_transfer_bytes_histo4.attr,
172         &dev_attr_spi_device_transfer_bytes_histo5.attr,
173         &dev_attr_spi_device_transfer_bytes_histo6.attr,
174         &dev_attr_spi_device_transfer_bytes_histo7.attr,
175         &dev_attr_spi_device_transfer_bytes_histo8.attr,
176         &dev_attr_spi_device_transfer_bytes_histo9.attr,
177         &dev_attr_spi_device_transfer_bytes_histo10.attr,
178         &dev_attr_spi_device_transfer_bytes_histo11.attr,
179         &dev_attr_spi_device_transfer_bytes_histo12.attr,
180         &dev_attr_spi_device_transfer_bytes_histo13.attr,
181         &dev_attr_spi_device_transfer_bytes_histo14.attr,
182         &dev_attr_spi_device_transfer_bytes_histo15.attr,
183         &dev_attr_spi_device_transfer_bytes_histo16.attr,
184         NULL,
185 };
186
187 static const struct attribute_group spi_device_statistics_group = {
188         .name  = "statistics",
189         .attrs  = spi_device_statistics_attrs,
190 };
191
192 static const struct attribute_group *spi_dev_groups[] = {
193         &spi_dev_group,
194         &spi_device_statistics_group,
195         NULL,
196 };
197
198 static struct attribute *spi_master_statistics_attrs[] = {
199         &dev_attr_spi_master_messages.attr,
200         &dev_attr_spi_master_transfers.attr,
201         &dev_attr_spi_master_errors.attr,
202         &dev_attr_spi_master_timedout.attr,
203         &dev_attr_spi_master_spi_sync.attr,
204         &dev_attr_spi_master_spi_sync_immediate.attr,
205         &dev_attr_spi_master_spi_async.attr,
206         &dev_attr_spi_master_bytes.attr,
207         &dev_attr_spi_master_bytes_rx.attr,
208         &dev_attr_spi_master_bytes_tx.attr,
209         &dev_attr_spi_master_transfer_bytes_histo0.attr,
210         &dev_attr_spi_master_transfer_bytes_histo1.attr,
211         &dev_attr_spi_master_transfer_bytes_histo2.attr,
212         &dev_attr_spi_master_transfer_bytes_histo3.attr,
213         &dev_attr_spi_master_transfer_bytes_histo4.attr,
214         &dev_attr_spi_master_transfer_bytes_histo5.attr,
215         &dev_attr_spi_master_transfer_bytes_histo6.attr,
216         &dev_attr_spi_master_transfer_bytes_histo7.attr,
217         &dev_attr_spi_master_transfer_bytes_histo8.attr,
218         &dev_attr_spi_master_transfer_bytes_histo9.attr,
219         &dev_attr_spi_master_transfer_bytes_histo10.attr,
220         &dev_attr_spi_master_transfer_bytes_histo11.attr,
221         &dev_attr_spi_master_transfer_bytes_histo12.attr,
222         &dev_attr_spi_master_transfer_bytes_histo13.attr,
223         &dev_attr_spi_master_transfer_bytes_histo14.attr,
224         &dev_attr_spi_master_transfer_bytes_histo15.attr,
225         &dev_attr_spi_master_transfer_bytes_histo16.attr,
226         NULL,
227 };
228
229 static const struct attribute_group spi_master_statistics_group = {
230         .name  = "statistics",
231         .attrs  = spi_master_statistics_attrs,
232 };
233
234 static const struct attribute_group *spi_master_groups[] = {
235         &spi_master_statistics_group,
236         NULL,
237 };
238
239 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
240                                        struct spi_transfer *xfer,
241                                        struct spi_master *master)
242 {
243         unsigned long flags;
244         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
245
246         if (l2len < 0)
247                 l2len = 0;
248
249         spin_lock_irqsave(&stats->lock, flags);
250
251         stats->transfers++;
252         stats->transfer_bytes_histo[l2len]++;
253
254         stats->bytes += xfer->len;
255         if ((xfer->tx_buf) &&
256             (xfer->tx_buf != master->dummy_tx))
257                 stats->bytes_tx += xfer->len;
258         if ((xfer->rx_buf) &&
259             (xfer->rx_buf != master->dummy_rx))
260                 stats->bytes_rx += xfer->len;
261
262         spin_unlock_irqrestore(&stats->lock, flags);
263 }
264 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
265
266 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
267  * and the sysfs version makes coldplug work too.
268  */
269
270 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
271                                                 const struct spi_device *sdev)
272 {
273         while (id->name[0]) {
274                 if (!strcmp(sdev->modalias, id->name))
275                         return id;
276                 id++;
277         }
278         return NULL;
279 }
280
281 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
282 {
283         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
284
285         return spi_match_id(sdrv->id_table, sdev);
286 }
287 EXPORT_SYMBOL_GPL(spi_get_device_id);
288
289 static int spi_match_device(struct device *dev, struct device_driver *drv)
290 {
291         const struct spi_device *spi = to_spi_device(dev);
292         const struct spi_driver *sdrv = to_spi_driver(drv);
293
294         /* Attempt an OF style match */
295         if (of_driver_match_device(dev, drv))
296                 return 1;
297
298         /* Then try ACPI */
299         if (acpi_driver_match_device(dev, drv))
300                 return 1;
301
302         if (sdrv->id_table)
303                 return !!spi_match_id(sdrv->id_table, spi);
304
305         return strcmp(spi->modalias, drv->name) == 0;
306 }
307
308 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
309 {
310         const struct spi_device         *spi = to_spi_device(dev);
311         int rc;
312
313         rc = acpi_device_uevent_modalias(dev, env);
314         if (rc != -ENODEV)
315                 return rc;
316
317         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
318         return 0;
319 }
320
321 struct bus_type spi_bus_type = {
322         .name           = "spi",
323         .dev_groups     = spi_dev_groups,
324         .match          = spi_match_device,
325         .uevent         = spi_uevent,
326 };
327 EXPORT_SYMBOL_GPL(spi_bus_type);
328
329
330 static int spi_drv_probe(struct device *dev)
331 {
332         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
333         struct spi_device               *spi = to_spi_device(dev);
334         int ret;
335
336         ret = of_clk_set_defaults(dev->of_node, false);
337         if (ret)
338                 return ret;
339
340         if (dev->of_node) {
341                 spi->irq = of_irq_get(dev->of_node, 0);
342                 if (spi->irq == -EPROBE_DEFER)
343                         return -EPROBE_DEFER;
344                 if (spi->irq < 0)
345                         spi->irq = 0;
346         }
347
348         ret = dev_pm_domain_attach(dev, true);
349         if (ret != -EPROBE_DEFER) {
350                 ret = sdrv->probe(spi);
351                 if (ret)
352                         dev_pm_domain_detach(dev, true);
353         }
354
355         return ret;
356 }
357
358 static int spi_drv_remove(struct device *dev)
359 {
360         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
361         int ret;
362
363         ret = sdrv->remove(to_spi_device(dev));
364         dev_pm_domain_detach(dev, true);
365
366         return ret;
367 }
368
369 static void spi_drv_shutdown(struct device *dev)
370 {
371         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
372
373         sdrv->shutdown(to_spi_device(dev));
374 }
375
376 /**
377  * __spi_register_driver - register a SPI driver
378  * @owner: owner module of the driver to register
379  * @sdrv: the driver to register
380  * Context: can sleep
381  *
382  * Return: zero on success, else a negative error code.
383  */
384 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
385 {
386         sdrv->driver.owner = owner;
387         sdrv->driver.bus = &spi_bus_type;
388         if (sdrv->probe)
389                 sdrv->driver.probe = spi_drv_probe;
390         if (sdrv->remove)
391                 sdrv->driver.remove = spi_drv_remove;
392         if (sdrv->shutdown)
393                 sdrv->driver.shutdown = spi_drv_shutdown;
394         return driver_register(&sdrv->driver);
395 }
396 EXPORT_SYMBOL_GPL(__spi_register_driver);
397
398 /*-------------------------------------------------------------------------*/
399
400 /* SPI devices should normally not be created by SPI device drivers; that
401  * would make them board-specific.  Similarly with SPI master drivers.
402  * Device registration normally goes into like arch/.../mach.../board-YYY.c
403  * with other readonly (flashable) information about mainboard devices.
404  */
405
406 struct boardinfo {
407         struct list_head        list;
408         struct spi_board_info   board_info;
409 };
410
411 static LIST_HEAD(board_list);
412 static LIST_HEAD(spi_master_list);
413
414 /*
415  * Used to protect add/del opertion for board_info list and
416  * spi_master list, and their matching process
417  */
418 static DEFINE_MUTEX(board_lock);
419
420 /**
421  * spi_alloc_device - Allocate a new SPI device
422  * @master: Controller to which device is connected
423  * Context: can sleep
424  *
425  * Allows a driver to allocate and initialize a spi_device without
426  * registering it immediately.  This allows a driver to directly
427  * fill the spi_device with device parameters before calling
428  * spi_add_device() on it.
429  *
430  * Caller is responsible to call spi_add_device() on the returned
431  * spi_device structure to add it to the SPI master.  If the caller
432  * needs to discard the spi_device without adding it, then it should
433  * call spi_dev_put() on it.
434  *
435  * Return: a pointer to the new device, or NULL.
436  */
437 struct spi_device *spi_alloc_device(struct spi_master *master)
438 {
439         struct spi_device       *spi;
440
441         if (!spi_master_get(master))
442                 return NULL;
443
444         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
445         if (!spi) {
446                 spi_master_put(master);
447                 return NULL;
448         }
449
450         spi->master = master;
451         spi->dev.parent = &master->dev;
452         spi->dev.bus = &spi_bus_type;
453         spi->dev.release = spidev_release;
454         spi->cs_gpio = -ENOENT;
455
456         spin_lock_init(&spi->statistics.lock);
457
458         device_initialize(&spi->dev);
459         return spi;
460 }
461 EXPORT_SYMBOL_GPL(spi_alloc_device);
462
463 static void spi_dev_set_name(struct spi_device *spi)
464 {
465         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
466
467         if (adev) {
468                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
469                 return;
470         }
471
472         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
473                      spi->chip_select);
474 }
475
476 static int spi_dev_check(struct device *dev, void *data)
477 {
478         struct spi_device *spi = to_spi_device(dev);
479         struct spi_device *new_spi = data;
480
481         if (spi->master == new_spi->master &&
482             spi->chip_select == new_spi->chip_select)
483                 return -EBUSY;
484         return 0;
485 }
486
487 /**
488  * spi_add_device - Add spi_device allocated with spi_alloc_device
489  * @spi: spi_device to register
490  *
491  * Companion function to spi_alloc_device.  Devices allocated with
492  * spi_alloc_device can be added onto the spi bus with this function.
493  *
494  * Return: 0 on success; negative errno on failure
495  */
496 int spi_add_device(struct spi_device *spi)
497 {
498         static DEFINE_MUTEX(spi_add_lock);
499         struct spi_master *master = spi->master;
500         struct device *dev = master->dev.parent;
501         int status;
502
503         /* Chipselects are numbered 0..max; validate. */
504         if (spi->chip_select >= master->num_chipselect) {
505                 dev_err(dev, "cs%d >= max %d\n",
506                         spi->chip_select,
507                         master->num_chipselect);
508                 return -EINVAL;
509         }
510
511         /* Set the bus ID string */
512         spi_dev_set_name(spi);
513
514         /* We need to make sure there's no other device with this
515          * chipselect **BEFORE** we call setup(), else we'll trash
516          * its configuration.  Lock against concurrent add() calls.
517          */
518         mutex_lock(&spi_add_lock);
519
520         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
521         if (status) {
522                 dev_err(dev, "chipselect %d already in use\n",
523                                 spi->chip_select);
524                 goto done;
525         }
526
527         if (master->cs_gpios)
528                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
529
530         /* Drivers may modify this initial i/o setup, but will
531          * normally rely on the device being setup.  Devices
532          * using SPI_CS_HIGH can't coexist well otherwise...
533          */
534         status = spi_setup(spi);
535         if (status < 0) {
536                 dev_err(dev, "can't setup %s, status %d\n",
537                                 dev_name(&spi->dev), status);
538                 goto done;
539         }
540
541         /* Device may be bound to an active driver when this returns */
542         status = device_add(&spi->dev);
543         if (status < 0)
544                 dev_err(dev, "can't add %s, status %d\n",
545                                 dev_name(&spi->dev), status);
546         else
547                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
548
549 done:
550         mutex_unlock(&spi_add_lock);
551         return status;
552 }
553 EXPORT_SYMBOL_GPL(spi_add_device);
554
555 /**
556  * spi_new_device - instantiate one new SPI device
557  * @master: Controller to which device is connected
558  * @chip: Describes the SPI device
559  * Context: can sleep
560  *
561  * On typical mainboards, this is purely internal; and it's not needed
562  * after board init creates the hard-wired devices.  Some development
563  * platforms may not be able to use spi_register_board_info though, and
564  * this is exported so that for example a USB or parport based adapter
565  * driver could add devices (which it would learn about out-of-band).
566  *
567  * Return: the new device, or NULL.
568  */
569 struct spi_device *spi_new_device(struct spi_master *master,
570                                   struct spi_board_info *chip)
571 {
572         struct spi_device       *proxy;
573         int                     status;
574
575         /* NOTE:  caller did any chip->bus_num checks necessary.
576          *
577          * Also, unless we change the return value convention to use
578          * error-or-pointer (not NULL-or-pointer), troubleshootability
579          * suggests syslogged diagnostics are best here (ugh).
580          */
581
582         proxy = spi_alloc_device(master);
583         if (!proxy)
584                 return NULL;
585
586         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
587
588         proxy->chip_select = chip->chip_select;
589         proxy->max_speed_hz = chip->max_speed_hz;
590         proxy->mode = chip->mode;
591         proxy->irq = chip->irq;
592         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
593         proxy->dev.platform_data = (void *) chip->platform_data;
594         proxy->controller_data = chip->controller_data;
595         proxy->controller_state = NULL;
596
597         status = spi_add_device(proxy);
598         if (status < 0) {
599                 spi_dev_put(proxy);
600                 return NULL;
601         }
602
603         return proxy;
604 }
605 EXPORT_SYMBOL_GPL(spi_new_device);
606
607 /**
608  * spi_unregister_device - unregister a single SPI device
609  * @spi: spi_device to unregister
610  *
611  * Start making the passed SPI device vanish. Normally this would be handled
612  * by spi_unregister_master().
613  */
614 void spi_unregister_device(struct spi_device *spi)
615 {
616         if (!spi)
617                 return;
618
619         if (spi->dev.of_node)
620                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
621         device_unregister(&spi->dev);
622 }
623 EXPORT_SYMBOL_GPL(spi_unregister_device);
624
625 static void spi_match_master_to_boardinfo(struct spi_master *master,
626                                 struct spi_board_info *bi)
627 {
628         struct spi_device *dev;
629
630         if (master->bus_num != bi->bus_num)
631                 return;
632
633         dev = spi_new_device(master, bi);
634         if (!dev)
635                 dev_err(master->dev.parent, "can't create new device for %s\n",
636                         bi->modalias);
637 }
638
639 /**
640  * spi_register_board_info - register SPI devices for a given board
641  * @info: array of chip descriptors
642  * @n: how many descriptors are provided
643  * Context: can sleep
644  *
645  * Board-specific early init code calls this (probably during arch_initcall)
646  * with segments of the SPI device table.  Any device nodes are created later,
647  * after the relevant parent SPI controller (bus_num) is defined.  We keep
648  * this table of devices forever, so that reloading a controller driver will
649  * not make Linux forget about these hard-wired devices.
650  *
651  * Other code can also call this, e.g. a particular add-on board might provide
652  * SPI devices through its expansion connector, so code initializing that board
653  * would naturally declare its SPI devices.
654  *
655  * The board info passed can safely be __initdata ... but be careful of
656  * any embedded pointers (platform_data, etc), they're copied as-is.
657  *
658  * Return: zero on success, else a negative error code.
659  */
660 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
661 {
662         struct boardinfo *bi;
663         int i;
664
665         if (!n)
666                 return -EINVAL;
667
668         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
669         if (!bi)
670                 return -ENOMEM;
671
672         for (i = 0; i < n; i++, bi++, info++) {
673                 struct spi_master *master;
674
675                 memcpy(&bi->board_info, info, sizeof(*info));
676                 mutex_lock(&board_lock);
677                 list_add_tail(&bi->list, &board_list);
678                 list_for_each_entry(master, &spi_master_list, list)
679                         spi_match_master_to_boardinfo(master, &bi->board_info);
680                 mutex_unlock(&board_lock);
681         }
682
683         return 0;
684 }
685
686 /*-------------------------------------------------------------------------*/
687
688 static void spi_set_cs(struct spi_device *spi, bool enable)
689 {
690         if (spi->mode & SPI_CS_HIGH)
691                 enable = !enable;
692
693         if (gpio_is_valid(spi->cs_gpio))
694                 gpio_set_value(spi->cs_gpio, !enable);
695         else if (spi->master->set_cs)
696                 spi->master->set_cs(spi, !enable);
697 }
698
699 #ifdef CONFIG_HAS_DMA
700 static int spi_map_buf(struct spi_master *master, struct device *dev,
701                        struct sg_table *sgt, void *buf, size_t len,
702                        enum dma_data_direction dir)
703 {
704         const bool vmalloced_buf = is_vmalloc_addr(buf);
705         unsigned int max_seg_size = dma_get_max_seg_size(dev);
706         int desc_len;
707         int sgs;
708         struct page *vm_page;
709         void *sg_buf;
710         size_t min;
711         int i, ret;
712
713         if (vmalloced_buf) {
714                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
715                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
716         } else {
717                 desc_len = min_t(int, max_seg_size, master->max_dma_len);
718                 sgs = DIV_ROUND_UP(len, desc_len);
719         }
720
721         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
722         if (ret != 0)
723                 return ret;
724
725         for (i = 0; i < sgs; i++) {
726
727                 if (vmalloced_buf) {
728                         min = min_t(size_t,
729                                     len, desc_len - offset_in_page(buf));
730                         vm_page = vmalloc_to_page(buf);
731                         if (!vm_page) {
732                                 sg_free_table(sgt);
733                                 return -ENOMEM;
734                         }
735                         sg_set_page(&sgt->sgl[i], vm_page,
736                                     min, offset_in_page(buf));
737                 } else {
738                         min = min_t(size_t, len, desc_len);
739                         sg_buf = buf;
740                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
741                 }
742
743                 buf += min;
744                 len -= min;
745         }
746
747         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
748         if (!ret)
749                 ret = -ENOMEM;
750         if (ret < 0) {
751                 sg_free_table(sgt);
752                 return ret;
753         }
754
755         sgt->nents = ret;
756
757         return 0;
758 }
759
760 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
761                           struct sg_table *sgt, enum dma_data_direction dir)
762 {
763         if (sgt->orig_nents) {
764                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
765                 sg_free_table(sgt);
766         }
767 }
768
769 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
770 {
771         struct device *tx_dev, *rx_dev;
772         struct spi_transfer *xfer;
773         int ret;
774
775         if (!master->can_dma)
776                 return 0;
777
778         if (master->dma_tx)
779                 tx_dev = master->dma_tx->device->dev;
780         else
781                 tx_dev = &master->dev;
782
783         if (master->dma_rx)
784                 rx_dev = master->dma_rx->device->dev;
785         else
786                 rx_dev = &master->dev;
787
788         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
789                 if (!master->can_dma(master, msg->spi, xfer))
790                         continue;
791
792                 if (xfer->tx_buf != NULL) {
793                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
794                                           (void *)xfer->tx_buf, xfer->len,
795                                           DMA_TO_DEVICE);
796                         if (ret != 0)
797                                 return ret;
798                 }
799
800                 if (xfer->rx_buf != NULL) {
801                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
802                                           xfer->rx_buf, xfer->len,
803                                           DMA_FROM_DEVICE);
804                         if (ret != 0) {
805                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
806                                               DMA_TO_DEVICE);
807                                 return ret;
808                         }
809                 }
810         }
811
812         master->cur_msg_mapped = true;
813
814         return 0;
815 }
816
817 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
818 {
819         struct spi_transfer *xfer;
820         struct device *tx_dev, *rx_dev;
821
822         if (!master->cur_msg_mapped || !master->can_dma)
823                 return 0;
824
825         if (master->dma_tx)
826                 tx_dev = master->dma_tx->device->dev;
827         else
828                 tx_dev = &master->dev;
829
830         if (master->dma_rx)
831                 rx_dev = master->dma_rx->device->dev;
832         else
833                 rx_dev = &master->dev;
834
835         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
836                 if (!master->can_dma(master, msg->spi, xfer))
837                         continue;
838
839                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
840                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
841         }
842
843         return 0;
844 }
845 #else /* !CONFIG_HAS_DMA */
846 static inline int __spi_map_msg(struct spi_master *master,
847                                 struct spi_message *msg)
848 {
849         return 0;
850 }
851
852 static inline int __spi_unmap_msg(struct spi_master *master,
853                                   struct spi_message *msg)
854 {
855         return 0;
856 }
857 #endif /* !CONFIG_HAS_DMA */
858
859 static inline int spi_unmap_msg(struct spi_master *master,
860                                 struct spi_message *msg)
861 {
862         struct spi_transfer *xfer;
863
864         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
865                 /*
866                  * Restore the original value of tx_buf or rx_buf if they are
867                  * NULL.
868                  */
869                 if (xfer->tx_buf == master->dummy_tx)
870                         xfer->tx_buf = NULL;
871                 if (xfer->rx_buf == master->dummy_rx)
872                         xfer->rx_buf = NULL;
873         }
874
875         return __spi_unmap_msg(master, msg);
876 }
877
878 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
879 {
880         struct spi_transfer *xfer;
881         void *tmp;
882         unsigned int max_tx, max_rx;
883
884         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
885                 max_tx = 0;
886                 max_rx = 0;
887
888                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
889                         if ((master->flags & SPI_MASTER_MUST_TX) &&
890                             !xfer->tx_buf)
891                                 max_tx = max(xfer->len, max_tx);
892                         if ((master->flags & SPI_MASTER_MUST_RX) &&
893                             !xfer->rx_buf)
894                                 max_rx = max(xfer->len, max_rx);
895                 }
896
897                 if (max_tx) {
898                         tmp = krealloc(master->dummy_tx, max_tx,
899                                        GFP_KERNEL | GFP_DMA);
900                         if (!tmp)
901                                 return -ENOMEM;
902                         master->dummy_tx = tmp;
903                         memset(tmp, 0, max_tx);
904                 }
905
906                 if (max_rx) {
907                         tmp = krealloc(master->dummy_rx, max_rx,
908                                        GFP_KERNEL | GFP_DMA);
909                         if (!tmp)
910                                 return -ENOMEM;
911                         master->dummy_rx = tmp;
912                 }
913
914                 if (max_tx || max_rx) {
915                         list_for_each_entry(xfer, &msg->transfers,
916                                             transfer_list) {
917                                 if (!xfer->tx_buf)
918                                         xfer->tx_buf = master->dummy_tx;
919                                 if (!xfer->rx_buf)
920                                         xfer->rx_buf = master->dummy_rx;
921                         }
922                 }
923         }
924
925         return __spi_map_msg(master, msg);
926 }
927
928 /*
929  * spi_transfer_one_message - Default implementation of transfer_one_message()
930  *
931  * This is a standard implementation of transfer_one_message() for
932  * drivers which impelment a transfer_one() operation.  It provides
933  * standard handling of delays and chip select management.
934  */
935 static int spi_transfer_one_message(struct spi_master *master,
936                                     struct spi_message *msg)
937 {
938         struct spi_transfer *xfer;
939         bool keep_cs = false;
940         int ret = 0;
941         unsigned long ms = 1;
942         struct spi_statistics *statm = &master->statistics;
943         struct spi_statistics *stats = &msg->spi->statistics;
944
945         spi_set_cs(msg->spi, true);
946
947         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
948         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
949
950         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
951                 trace_spi_transfer_start(msg, xfer);
952
953                 spi_statistics_add_transfer_stats(statm, xfer, master);
954                 spi_statistics_add_transfer_stats(stats, xfer, master);
955
956                 if (xfer->tx_buf || xfer->rx_buf) {
957                         reinit_completion(&master->xfer_completion);
958
959                         ret = master->transfer_one(master, msg->spi, xfer);
960                         if (ret < 0) {
961                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
962                                                                errors);
963                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
964                                                                errors);
965                                 dev_err(&msg->spi->dev,
966                                         "SPI transfer failed: %d\n", ret);
967                                 goto out;
968                         }
969
970                         if (ret > 0) {
971                                 ret = 0;
972                                 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
973                                 ms += ms + 100; /* some tolerance */
974
975                                 ms = wait_for_completion_timeout(&master->xfer_completion,
976                                                                  msecs_to_jiffies(ms));
977                         }
978
979                         if (ms == 0) {
980                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
981                                                                timedout);
982                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
983                                                                timedout);
984                                 dev_err(&msg->spi->dev,
985                                         "SPI transfer timed out\n");
986                                 msg->status = -ETIMEDOUT;
987                         }
988                 } else {
989                         if (xfer->len)
990                                 dev_err(&msg->spi->dev,
991                                         "Bufferless transfer has length %u\n",
992                                         xfer->len);
993                 }
994
995                 trace_spi_transfer_stop(msg, xfer);
996
997                 if (msg->status != -EINPROGRESS)
998                         goto out;
999
1000                 if (xfer->delay_usecs)
1001                         udelay(xfer->delay_usecs);
1002
1003                 if (xfer->cs_change) {
1004                         if (list_is_last(&xfer->transfer_list,
1005                                          &msg->transfers)) {
1006                                 keep_cs = true;
1007                         } else {
1008                                 spi_set_cs(msg->spi, false);
1009                                 udelay(10);
1010                                 spi_set_cs(msg->spi, true);
1011                         }
1012                 }
1013
1014                 msg->actual_length += xfer->len;
1015         }
1016
1017 out:
1018         if (ret != 0 || !keep_cs)
1019                 spi_set_cs(msg->spi, false);
1020
1021         if (msg->status == -EINPROGRESS)
1022                 msg->status = ret;
1023
1024         if (msg->status && master->handle_err)
1025                 master->handle_err(master, msg);
1026
1027         spi_finalize_current_message(master);
1028
1029         return ret;
1030 }
1031
1032 /**
1033  * spi_finalize_current_transfer - report completion of a transfer
1034  * @master: the master reporting completion
1035  *
1036  * Called by SPI drivers using the core transfer_one_message()
1037  * implementation to notify it that the current interrupt driven
1038  * transfer has finished and the next one may be scheduled.
1039  */
1040 void spi_finalize_current_transfer(struct spi_master *master)
1041 {
1042         complete(&master->xfer_completion);
1043 }
1044 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1045
1046 /**
1047  * __spi_pump_messages - function which processes spi message queue
1048  * @master: master to process queue for
1049  * @in_kthread: true if we are in the context of the message pump thread
1050  *
1051  * This function checks if there is any spi message in the queue that
1052  * needs processing and if so call out to the driver to initialize hardware
1053  * and transfer each message.
1054  *
1055  * Note that it is called both from the kthread itself and also from
1056  * inside spi_sync(); the queue extraction handling at the top of the
1057  * function should deal with this safely.
1058  */
1059 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1060 {
1061         unsigned long flags;
1062         bool was_busy = false;
1063         int ret;
1064
1065         /* Lock queue */
1066         spin_lock_irqsave(&master->queue_lock, flags);
1067
1068         /* Make sure we are not already running a message */
1069         if (master->cur_msg) {
1070                 spin_unlock_irqrestore(&master->queue_lock, flags);
1071                 return;
1072         }
1073
1074         /* If another context is idling the device then defer */
1075         if (master->idling) {
1076                 queue_kthread_work(&master->kworker, &master->pump_messages);
1077                 spin_unlock_irqrestore(&master->queue_lock, flags);
1078                 return;
1079         }
1080
1081         /* Check if the queue is idle */
1082         if (list_empty(&master->queue) || !master->running) {
1083                 if (!master->busy) {
1084                         spin_unlock_irqrestore(&master->queue_lock, flags);
1085                         return;
1086                 }
1087
1088                 /* Only do teardown in the thread */
1089                 if (!in_kthread) {
1090                         queue_kthread_work(&master->kworker,
1091                                            &master->pump_messages);
1092                         spin_unlock_irqrestore(&master->queue_lock, flags);
1093                         return;
1094                 }
1095
1096                 master->busy = false;
1097                 master->idling = true;
1098                 spin_unlock_irqrestore(&master->queue_lock, flags);
1099
1100                 kfree(master->dummy_rx);
1101                 master->dummy_rx = NULL;
1102                 kfree(master->dummy_tx);
1103                 master->dummy_tx = NULL;
1104                 if (master->unprepare_transfer_hardware &&
1105                     master->unprepare_transfer_hardware(master))
1106                         dev_err(&master->dev,
1107                                 "failed to unprepare transfer hardware\n");
1108                 if (master->auto_runtime_pm) {
1109                         pm_runtime_mark_last_busy(master->dev.parent);
1110                         pm_runtime_put_autosuspend(master->dev.parent);
1111                 }
1112                 trace_spi_master_idle(master);
1113
1114                 spin_lock_irqsave(&master->queue_lock, flags);
1115                 master->idling = false;
1116                 spin_unlock_irqrestore(&master->queue_lock, flags);
1117                 return;
1118         }
1119
1120         /* Extract head of queue */
1121         master->cur_msg =
1122                 list_first_entry(&master->queue, struct spi_message, queue);
1123
1124         list_del_init(&master->cur_msg->queue);
1125         if (master->busy)
1126                 was_busy = true;
1127         else
1128                 master->busy = true;
1129         spin_unlock_irqrestore(&master->queue_lock, flags);
1130
1131         if (!was_busy && master->auto_runtime_pm) {
1132                 ret = pm_runtime_get_sync(master->dev.parent);
1133                 if (ret < 0) {
1134                         dev_err(&master->dev, "Failed to power device: %d\n",
1135                                 ret);
1136                         return;
1137                 }
1138         }
1139
1140         if (!was_busy)
1141                 trace_spi_master_busy(master);
1142
1143         if (!was_busy && master->prepare_transfer_hardware) {
1144                 ret = master->prepare_transfer_hardware(master);
1145                 if (ret) {
1146                         dev_err(&master->dev,
1147                                 "failed to prepare transfer hardware\n");
1148
1149                         if (master->auto_runtime_pm)
1150                                 pm_runtime_put(master->dev.parent);
1151                         return;
1152                 }
1153         }
1154
1155         mutex_lock(&master->bus_lock_mutex);
1156         trace_spi_message_start(master->cur_msg);
1157
1158         if (master->prepare_message) {
1159                 ret = master->prepare_message(master, master->cur_msg);
1160                 if (ret) {
1161                         dev_err(&master->dev,
1162                                 "failed to prepare message: %d\n", ret);
1163                         master->cur_msg->status = ret;
1164                         spi_finalize_current_message(master);
1165                         mutex_unlock(&master->bus_lock_mutex);
1166                         return;
1167                 }
1168                 master->cur_msg_prepared = true;
1169         }
1170
1171         ret = spi_map_msg(master, master->cur_msg);
1172         if (ret) {
1173                 master->cur_msg->status = ret;
1174                 spi_finalize_current_message(master);
1175                 mutex_unlock(&master->bus_lock_mutex);
1176                 return;
1177         }
1178
1179         ret = master->transfer_one_message(master, master->cur_msg);
1180         if (ret) {
1181                 dev_err(&master->dev,
1182                         "failed to transfer one message from queue\n");
1183                 mutex_unlock(&master->bus_lock_mutex);
1184                 return;
1185         }
1186         mutex_unlock(&master->bus_lock_mutex);
1187
1188         /* Prod the scheduler in case transfer_one() was busy waiting */
1189         cond_resched();
1190 }
1191
1192 /**
1193  * spi_pump_messages - kthread work function which processes spi message queue
1194  * @work: pointer to kthread work struct contained in the master struct
1195  */
1196 static void spi_pump_messages(struct kthread_work *work)
1197 {
1198         struct spi_master *master =
1199                 container_of(work, struct spi_master, pump_messages);
1200
1201         __spi_pump_messages(master, true);
1202 }
1203
1204 static int spi_init_queue(struct spi_master *master)
1205 {
1206         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1207
1208         master->running = false;
1209         master->busy = false;
1210
1211         init_kthread_worker(&master->kworker);
1212         master->kworker_task = kthread_run(kthread_worker_fn,
1213                                            &master->kworker, "%s",
1214                                            dev_name(&master->dev));
1215         if (IS_ERR(master->kworker_task)) {
1216                 dev_err(&master->dev, "failed to create message pump task\n");
1217                 return PTR_ERR(master->kworker_task);
1218         }
1219         init_kthread_work(&master->pump_messages, spi_pump_messages);
1220
1221         /*
1222          * Master config will indicate if this controller should run the
1223          * message pump with high (realtime) priority to reduce the transfer
1224          * latency on the bus by minimising the delay between a transfer
1225          * request and the scheduling of the message pump thread. Without this
1226          * setting the message pump thread will remain at default priority.
1227          */
1228         if (master->rt) {
1229                 dev_info(&master->dev,
1230                         "will run message pump with realtime priority\n");
1231                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1232         }
1233
1234         return 0;
1235 }
1236
1237 /**
1238  * spi_get_next_queued_message() - called by driver to check for queued
1239  * messages
1240  * @master: the master to check for queued messages
1241  *
1242  * If there are more messages in the queue, the next message is returned from
1243  * this call.
1244  *
1245  * Return: the next message in the queue, else NULL if the queue is empty.
1246  */
1247 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1248 {
1249         struct spi_message *next;
1250         unsigned long flags;
1251
1252         /* get a pointer to the next message, if any */
1253         spin_lock_irqsave(&master->queue_lock, flags);
1254         next = list_first_entry_or_null(&master->queue, struct spi_message,
1255                                         queue);
1256         spin_unlock_irqrestore(&master->queue_lock, flags);
1257
1258         return next;
1259 }
1260 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1261
1262 /**
1263  * spi_finalize_current_message() - the current message is complete
1264  * @master: the master to return the message to
1265  *
1266  * Called by the driver to notify the core that the message in the front of the
1267  * queue is complete and can be removed from the queue.
1268  */
1269 void spi_finalize_current_message(struct spi_master *master)
1270 {
1271         struct spi_message *mesg;
1272         unsigned long flags;
1273         int ret;
1274
1275         spin_lock_irqsave(&master->queue_lock, flags);
1276         mesg = master->cur_msg;
1277         spin_unlock_irqrestore(&master->queue_lock, flags);
1278
1279         spi_unmap_msg(master, mesg);
1280
1281         if (master->cur_msg_prepared && master->unprepare_message) {
1282                 ret = master->unprepare_message(master, mesg);
1283                 if (ret) {
1284                         dev_err(&master->dev,
1285                                 "failed to unprepare message: %d\n", ret);
1286                 }
1287         }
1288
1289         spin_lock_irqsave(&master->queue_lock, flags);
1290         master->cur_msg = NULL;
1291         master->cur_msg_prepared = false;
1292         queue_kthread_work(&master->kworker, &master->pump_messages);
1293         spin_unlock_irqrestore(&master->queue_lock, flags);
1294
1295         trace_spi_message_done(mesg);
1296
1297         mesg->state = NULL;
1298         if (mesg->complete)
1299                 mesg->complete(mesg->context);
1300 }
1301 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1302
1303 static int spi_start_queue(struct spi_master *master)
1304 {
1305         unsigned long flags;
1306
1307         spin_lock_irqsave(&master->queue_lock, flags);
1308
1309         if (master->running || master->busy) {
1310                 spin_unlock_irqrestore(&master->queue_lock, flags);
1311                 return -EBUSY;
1312         }
1313
1314         master->running = true;
1315         master->cur_msg = NULL;
1316         spin_unlock_irqrestore(&master->queue_lock, flags);
1317
1318         queue_kthread_work(&master->kworker, &master->pump_messages);
1319
1320         return 0;
1321 }
1322
1323 static int spi_stop_queue(struct spi_master *master)
1324 {
1325         unsigned long flags;
1326         unsigned limit = 500;
1327         int ret = 0;
1328
1329         spin_lock_irqsave(&master->queue_lock, flags);
1330
1331         /*
1332          * This is a bit lame, but is optimized for the common execution path.
1333          * A wait_queue on the master->busy could be used, but then the common
1334          * execution path (pump_messages) would be required to call wake_up or
1335          * friends on every SPI message. Do this instead.
1336          */
1337         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1338                 spin_unlock_irqrestore(&master->queue_lock, flags);
1339                 usleep_range(10000, 11000);
1340                 spin_lock_irqsave(&master->queue_lock, flags);
1341         }
1342
1343         if (!list_empty(&master->queue) || master->busy)
1344                 ret = -EBUSY;
1345         else
1346                 master->running = false;
1347
1348         spin_unlock_irqrestore(&master->queue_lock, flags);
1349
1350         if (ret) {
1351                 dev_warn(&master->dev,
1352                          "could not stop message queue\n");
1353                 return ret;
1354         }
1355         return ret;
1356 }
1357
1358 static int spi_destroy_queue(struct spi_master *master)
1359 {
1360         int ret;
1361
1362         ret = spi_stop_queue(master);
1363
1364         /*
1365          * flush_kthread_worker will block until all work is done.
1366          * If the reason that stop_queue timed out is that the work will never
1367          * finish, then it does no good to call flush/stop thread, so
1368          * return anyway.
1369          */
1370         if (ret) {
1371                 dev_err(&master->dev, "problem destroying queue\n");
1372                 return ret;
1373         }
1374
1375         flush_kthread_worker(&master->kworker);
1376         kthread_stop(master->kworker_task);
1377
1378         return 0;
1379 }
1380
1381 static int __spi_queued_transfer(struct spi_device *spi,
1382                                  struct spi_message *msg,
1383                                  bool need_pump)
1384 {
1385         struct spi_master *master = spi->master;
1386         unsigned long flags;
1387
1388         spin_lock_irqsave(&master->queue_lock, flags);
1389
1390         if (!master->running) {
1391                 spin_unlock_irqrestore(&master->queue_lock, flags);
1392                 return -ESHUTDOWN;
1393         }
1394         msg->actual_length = 0;
1395         msg->status = -EINPROGRESS;
1396
1397         list_add_tail(&msg->queue, &master->queue);
1398         if (!master->busy && need_pump)
1399                 queue_kthread_work(&master->kworker, &master->pump_messages);
1400
1401         spin_unlock_irqrestore(&master->queue_lock, flags);
1402         return 0;
1403 }
1404
1405 /**
1406  * spi_queued_transfer - transfer function for queued transfers
1407  * @spi: spi device which is requesting transfer
1408  * @msg: spi message which is to handled is queued to driver queue
1409  *
1410  * Return: zero on success, else a negative error code.
1411  */
1412 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1413 {
1414         return __spi_queued_transfer(spi, msg, true);
1415 }
1416
1417 static int spi_master_initialize_queue(struct spi_master *master)
1418 {
1419         int ret;
1420
1421         master->transfer = spi_queued_transfer;
1422         if (!master->transfer_one_message)
1423                 master->transfer_one_message = spi_transfer_one_message;
1424
1425         /* Initialize and start queue */
1426         ret = spi_init_queue(master);
1427         if (ret) {
1428                 dev_err(&master->dev, "problem initializing queue\n");
1429                 goto err_init_queue;
1430         }
1431         master->queued = true;
1432         ret = spi_start_queue(master);
1433         if (ret) {
1434                 dev_err(&master->dev, "problem starting queue\n");
1435                 goto err_start_queue;
1436         }
1437
1438         return 0;
1439
1440 err_start_queue:
1441         spi_destroy_queue(master);
1442 err_init_queue:
1443         return ret;
1444 }
1445
1446 /*-------------------------------------------------------------------------*/
1447
1448 #if defined(CONFIG_OF)
1449 static struct spi_device *
1450 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1451 {
1452         struct spi_device *spi;
1453         int rc;
1454         u32 value;
1455
1456         /* Alloc an spi_device */
1457         spi = spi_alloc_device(master);
1458         if (!spi) {
1459                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1460                         nc->full_name);
1461                 rc = -ENOMEM;
1462                 goto err_out;
1463         }
1464
1465         /* Select device driver */
1466         rc = of_modalias_node(nc, spi->modalias,
1467                                 sizeof(spi->modalias));
1468         if (rc < 0) {
1469                 dev_err(&master->dev, "cannot find modalias for %s\n",
1470                         nc->full_name);
1471                 goto err_out;
1472         }
1473
1474         /* Device address */
1475         rc = of_property_read_u32(nc, "reg", &value);
1476         if (rc) {
1477                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1478                         nc->full_name, rc);
1479                 goto err_out;
1480         }
1481         spi->chip_select = value;
1482
1483         /* Mode (clock phase/polarity/etc.) */
1484         if (of_find_property(nc, "spi-cpha", NULL))
1485                 spi->mode |= SPI_CPHA;
1486         if (of_find_property(nc, "spi-cpol", NULL))
1487                 spi->mode |= SPI_CPOL;
1488         if (of_find_property(nc, "spi-cs-high", NULL))
1489                 spi->mode |= SPI_CS_HIGH;
1490         if (of_find_property(nc, "spi-3wire", NULL))
1491                 spi->mode |= SPI_3WIRE;
1492         if (of_find_property(nc, "spi-lsb-first", NULL))
1493                 spi->mode |= SPI_LSB_FIRST;
1494
1495         /* Device DUAL/QUAD mode */
1496         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1497                 switch (value) {
1498                 case 1:
1499                         break;
1500                 case 2:
1501                         spi->mode |= SPI_TX_DUAL;
1502                         break;
1503                 case 4:
1504                         spi->mode |= SPI_TX_QUAD;
1505                         break;
1506                 default:
1507                         dev_warn(&master->dev,
1508                                 "spi-tx-bus-width %d not supported\n",
1509                                 value);
1510                         break;
1511                 }
1512         }
1513
1514         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1515                 switch (value) {
1516                 case 1:
1517                         break;
1518                 case 2:
1519                         spi->mode |= SPI_RX_DUAL;
1520                         break;
1521                 case 4:
1522                         spi->mode |= SPI_RX_QUAD;
1523                         break;
1524                 default:
1525                         dev_warn(&master->dev,
1526                                 "spi-rx-bus-width %d not supported\n",
1527                                 value);
1528                         break;
1529                 }
1530         }
1531
1532         /* Device speed */
1533         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1534         if (rc) {
1535                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1536                         nc->full_name, rc);
1537                 goto err_out;
1538         }
1539         spi->max_speed_hz = value;
1540
1541         /* Store a pointer to the node in the device structure */
1542         of_node_get(nc);
1543         spi->dev.of_node = nc;
1544
1545         /* Register the new device */
1546         rc = spi_add_device(spi);
1547         if (rc) {
1548                 dev_err(&master->dev, "spi_device register error %s\n",
1549                         nc->full_name);
1550                 goto err_out;
1551         }
1552
1553         return spi;
1554
1555 err_out:
1556         spi_dev_put(spi);
1557         return ERR_PTR(rc);
1558 }
1559
1560 /**
1561  * of_register_spi_devices() - Register child devices onto the SPI bus
1562  * @master:     Pointer to spi_master device
1563  *
1564  * Registers an spi_device for each child node of master node which has a 'reg'
1565  * property.
1566  */
1567 static void of_register_spi_devices(struct spi_master *master)
1568 {
1569         struct spi_device *spi;
1570         struct device_node *nc;
1571
1572         if (!master->dev.of_node)
1573                 return;
1574
1575         for_each_available_child_of_node(master->dev.of_node, nc) {
1576                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1577                         continue;
1578                 spi = of_register_spi_device(master, nc);
1579                 if (IS_ERR(spi))
1580                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1581                                 nc->full_name);
1582         }
1583 }
1584 #else
1585 static void of_register_spi_devices(struct spi_master *master) { }
1586 #endif
1587
1588 #ifdef CONFIG_ACPI
1589 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1590 {
1591         struct spi_device *spi = data;
1592
1593         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1594                 struct acpi_resource_spi_serialbus *sb;
1595
1596                 sb = &ares->data.spi_serial_bus;
1597                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1598                         spi->chip_select = sb->device_selection;
1599                         spi->max_speed_hz = sb->connection_speed;
1600
1601                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1602                                 spi->mode |= SPI_CPHA;
1603                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1604                                 spi->mode |= SPI_CPOL;
1605                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1606                                 spi->mode |= SPI_CS_HIGH;
1607                 }
1608         } else if (spi->irq < 0) {
1609                 struct resource r;
1610
1611                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1612                         spi->irq = r.start;
1613         }
1614
1615         /* Always tell the ACPI core to skip this resource */
1616         return 1;
1617 }
1618
1619 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1620                                        void *data, void **return_value)
1621 {
1622         struct spi_master *master = data;
1623         struct list_head resource_list;
1624         struct acpi_device *adev;
1625         struct spi_device *spi;
1626         int ret;
1627
1628         if (acpi_bus_get_device(handle, &adev))
1629                 return AE_OK;
1630         if (acpi_bus_get_status(adev) || !adev->status.present)
1631                 return AE_OK;
1632
1633         spi = spi_alloc_device(master);
1634         if (!spi) {
1635                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1636                         dev_name(&adev->dev));
1637                 return AE_NO_MEMORY;
1638         }
1639
1640         ACPI_COMPANION_SET(&spi->dev, adev);
1641         spi->irq = -1;
1642
1643         INIT_LIST_HEAD(&resource_list);
1644         ret = acpi_dev_get_resources(adev, &resource_list,
1645                                      acpi_spi_add_resource, spi);
1646         acpi_dev_free_resource_list(&resource_list);
1647
1648         if (ret < 0 || !spi->max_speed_hz) {
1649                 spi_dev_put(spi);
1650                 return AE_OK;
1651         }
1652
1653         if (spi->irq < 0)
1654                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1655
1656         adev->power.flags.ignore_parent = true;
1657         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1658         if (spi_add_device(spi)) {
1659                 adev->power.flags.ignore_parent = false;
1660                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1661                         dev_name(&adev->dev));
1662                 spi_dev_put(spi);
1663         }
1664
1665         return AE_OK;
1666 }
1667
1668 static void acpi_register_spi_devices(struct spi_master *master)
1669 {
1670         acpi_status status;
1671         acpi_handle handle;
1672
1673         handle = ACPI_HANDLE(master->dev.parent);
1674         if (!handle)
1675                 return;
1676
1677         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1678                                      acpi_spi_add_device, NULL,
1679                                      master, NULL);
1680         if (ACPI_FAILURE(status))
1681                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1682 }
1683 #else
1684 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1685 #endif /* CONFIG_ACPI */
1686
1687 static void spi_master_release(struct device *dev)
1688 {
1689         struct spi_master *master;
1690
1691         master = container_of(dev, struct spi_master, dev);
1692         kfree(master);
1693 }
1694
1695 static struct class spi_master_class = {
1696         .name           = "spi_master",
1697         .owner          = THIS_MODULE,
1698         .dev_release    = spi_master_release,
1699         .dev_groups     = spi_master_groups,
1700 };
1701
1702
1703 /**
1704  * spi_alloc_master - allocate SPI master controller
1705  * @dev: the controller, possibly using the platform_bus
1706  * @size: how much zeroed driver-private data to allocate; the pointer to this
1707  *      memory is in the driver_data field of the returned device,
1708  *      accessible with spi_master_get_devdata().
1709  * Context: can sleep
1710  *
1711  * This call is used only by SPI master controller drivers, which are the
1712  * only ones directly touching chip registers.  It's how they allocate
1713  * an spi_master structure, prior to calling spi_register_master().
1714  *
1715  * This must be called from context that can sleep.
1716  *
1717  * The caller is responsible for assigning the bus number and initializing
1718  * the master's methods before calling spi_register_master(); and (after errors
1719  * adding the device) calling spi_master_put() to prevent a memory leak.
1720  *
1721  * Return: the SPI master structure on success, else NULL.
1722  */
1723 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1724 {
1725         struct spi_master       *master;
1726
1727         if (!dev)
1728                 return NULL;
1729
1730         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1731         if (!master)
1732                 return NULL;
1733
1734         device_initialize(&master->dev);
1735         master->bus_num = -1;
1736         master->num_chipselect = 1;
1737         master->dev.class = &spi_master_class;
1738         master->dev.parent = dev;
1739         spi_master_set_devdata(master, &master[1]);
1740
1741         return master;
1742 }
1743 EXPORT_SYMBOL_GPL(spi_alloc_master);
1744
1745 #ifdef CONFIG_OF
1746 static int of_spi_register_master(struct spi_master *master)
1747 {
1748         int nb, i, *cs;
1749         struct device_node *np = master->dev.of_node;
1750
1751         if (!np)
1752                 return 0;
1753
1754         nb = of_gpio_named_count(np, "cs-gpios");
1755         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1756
1757         /* Return error only for an incorrectly formed cs-gpios property */
1758         if (nb == 0 || nb == -ENOENT)
1759                 return 0;
1760         else if (nb < 0)
1761                 return nb;
1762
1763         cs = devm_kzalloc(&master->dev,
1764                           sizeof(int) * master->num_chipselect,
1765                           GFP_KERNEL);
1766         master->cs_gpios = cs;
1767
1768         if (!master->cs_gpios)
1769                 return -ENOMEM;
1770
1771         for (i = 0; i < master->num_chipselect; i++)
1772                 cs[i] = -ENOENT;
1773
1774         for (i = 0; i < nb; i++)
1775                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1776
1777         return 0;
1778 }
1779 #else
1780 static int of_spi_register_master(struct spi_master *master)
1781 {
1782         return 0;
1783 }
1784 #endif
1785
1786 /**
1787  * spi_register_master - register SPI master controller
1788  * @master: initialized master, originally from spi_alloc_master()
1789  * Context: can sleep
1790  *
1791  * SPI master controllers connect to their drivers using some non-SPI bus,
1792  * such as the platform bus.  The final stage of probe() in that code
1793  * includes calling spi_register_master() to hook up to this SPI bus glue.
1794  *
1795  * SPI controllers use board specific (often SOC specific) bus numbers,
1796  * and board-specific addressing for SPI devices combines those numbers
1797  * with chip select numbers.  Since SPI does not directly support dynamic
1798  * device identification, boards need configuration tables telling which
1799  * chip is at which address.
1800  *
1801  * This must be called from context that can sleep.  It returns zero on
1802  * success, else a negative error code (dropping the master's refcount).
1803  * After a successful return, the caller is responsible for calling
1804  * spi_unregister_master().
1805  *
1806  * Return: zero on success, else a negative error code.
1807  */
1808 int spi_register_master(struct spi_master *master)
1809 {
1810         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1811         struct device           *dev = master->dev.parent;
1812         struct boardinfo        *bi;
1813         int                     status = -ENODEV;
1814         int                     dynamic = 0;
1815
1816         if (!dev)
1817                 return -ENODEV;
1818
1819         status = of_spi_register_master(master);
1820         if (status)
1821                 return status;
1822
1823         /* even if it's just one always-selected device, there must
1824          * be at least one chipselect
1825          */
1826         if (master->num_chipselect == 0)
1827                 return -EINVAL;
1828
1829         if ((master->bus_num < 0) && master->dev.of_node)
1830                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1831
1832         /* convention:  dynamically assigned bus IDs count down from the max */
1833         if (master->bus_num < 0) {
1834                 /* FIXME switch to an IDR based scheme, something like
1835                  * I2C now uses, so we can't run out of "dynamic" IDs
1836                  */
1837                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1838                 dynamic = 1;
1839         }
1840
1841         INIT_LIST_HEAD(&master->queue);
1842         spin_lock_init(&master->queue_lock);
1843         spin_lock_init(&master->bus_lock_spinlock);
1844         mutex_init(&master->bus_lock_mutex);
1845         master->bus_lock_flag = 0;
1846         init_completion(&master->xfer_completion);
1847         if (!master->max_dma_len)
1848                 master->max_dma_len = INT_MAX;
1849
1850         /* register the device, then userspace will see it.
1851          * registration fails if the bus ID is in use.
1852          */
1853         dev_set_name(&master->dev, "spi%u", master->bus_num);
1854         status = device_add(&master->dev);
1855         if (status < 0)
1856                 goto done;
1857         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1858                         dynamic ? " (dynamic)" : "");
1859
1860         /* If we're using a queued driver, start the queue */
1861         if (master->transfer)
1862                 dev_info(dev, "master is unqueued, this is deprecated\n");
1863         else {
1864                 status = spi_master_initialize_queue(master);
1865                 if (status) {
1866                         device_del(&master->dev);
1867                         goto done;
1868                 }
1869         }
1870         /* add statistics */
1871         spin_lock_init(&master->statistics.lock);
1872
1873         mutex_lock(&board_lock);
1874         list_add_tail(&master->list, &spi_master_list);
1875         list_for_each_entry(bi, &board_list, list)
1876                 spi_match_master_to_boardinfo(master, &bi->board_info);
1877         mutex_unlock(&board_lock);
1878
1879         /* Register devices from the device tree and ACPI */
1880         of_register_spi_devices(master);
1881         acpi_register_spi_devices(master);
1882 done:
1883         return status;
1884 }
1885 EXPORT_SYMBOL_GPL(spi_register_master);
1886
1887 static void devm_spi_unregister(struct device *dev, void *res)
1888 {
1889         spi_unregister_master(*(struct spi_master **)res);
1890 }
1891
1892 /**
1893  * dev_spi_register_master - register managed SPI master controller
1894  * @dev:    device managing SPI master
1895  * @master: initialized master, originally from spi_alloc_master()
1896  * Context: can sleep
1897  *
1898  * Register a SPI device as with spi_register_master() which will
1899  * automatically be unregister
1900  *
1901  * Return: zero on success, else a negative error code.
1902  */
1903 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1904 {
1905         struct spi_master **ptr;
1906         int ret;
1907
1908         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1909         if (!ptr)
1910                 return -ENOMEM;
1911
1912         ret = spi_register_master(master);
1913         if (!ret) {
1914                 *ptr = master;
1915                 devres_add(dev, ptr);
1916         } else {
1917                 devres_free(ptr);
1918         }
1919
1920         return ret;
1921 }
1922 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1923
1924 static int __unregister(struct device *dev, void *null)
1925 {
1926         spi_unregister_device(to_spi_device(dev));
1927         return 0;
1928 }
1929
1930 /**
1931  * spi_unregister_master - unregister SPI master controller
1932  * @master: the master being unregistered
1933  * Context: can sleep
1934  *
1935  * This call is used only by SPI master controller drivers, which are the
1936  * only ones directly touching chip registers.
1937  *
1938  * This must be called from context that can sleep.
1939  */
1940 void spi_unregister_master(struct spi_master *master)
1941 {
1942         int dummy;
1943
1944         if (master->queued) {
1945                 if (spi_destroy_queue(master))
1946                         dev_err(&master->dev, "queue remove failed\n");
1947         }
1948
1949         mutex_lock(&board_lock);
1950         list_del(&master->list);
1951         mutex_unlock(&board_lock);
1952
1953         dummy = device_for_each_child(&master->dev, NULL, __unregister);
1954         device_unregister(&master->dev);
1955 }
1956 EXPORT_SYMBOL_GPL(spi_unregister_master);
1957
1958 int spi_master_suspend(struct spi_master *master)
1959 {
1960         int ret;
1961
1962         /* Basically no-ops for non-queued masters */
1963         if (!master->queued)
1964                 return 0;
1965
1966         ret = spi_stop_queue(master);
1967         if (ret)
1968                 dev_err(&master->dev, "queue stop failed\n");
1969
1970         return ret;
1971 }
1972 EXPORT_SYMBOL_GPL(spi_master_suspend);
1973
1974 int spi_master_resume(struct spi_master *master)
1975 {
1976         int ret;
1977
1978         if (!master->queued)
1979                 return 0;
1980
1981         ret = spi_start_queue(master);
1982         if (ret)
1983                 dev_err(&master->dev, "queue restart failed\n");
1984
1985         return ret;
1986 }
1987 EXPORT_SYMBOL_GPL(spi_master_resume);
1988
1989 static int __spi_master_match(struct device *dev, const void *data)
1990 {
1991         struct spi_master *m;
1992         const u16 *bus_num = data;
1993
1994         m = container_of(dev, struct spi_master, dev);
1995         return m->bus_num == *bus_num;
1996 }
1997
1998 /**
1999  * spi_busnum_to_master - look up master associated with bus_num
2000  * @bus_num: the master's bus number
2001  * Context: can sleep
2002  *
2003  * This call may be used with devices that are registered after
2004  * arch init time.  It returns a refcounted pointer to the relevant
2005  * spi_master (which the caller must release), or NULL if there is
2006  * no such master registered.
2007  *
2008  * Return: the SPI master structure on success, else NULL.
2009  */
2010 struct spi_master *spi_busnum_to_master(u16 bus_num)
2011 {
2012         struct device           *dev;
2013         struct spi_master       *master = NULL;
2014
2015         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2016                                 __spi_master_match);
2017         if (dev)
2018                 master = container_of(dev, struct spi_master, dev);
2019         /* reference got in class_find_device */
2020         return master;
2021 }
2022 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2023
2024
2025 /*-------------------------------------------------------------------------*/
2026
2027 /* Core methods for SPI master protocol drivers.  Some of the
2028  * other core methods are currently defined as inline functions.
2029  */
2030
2031 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2032 {
2033         if (master->bits_per_word_mask) {
2034                 /* Only 32 bits fit in the mask */
2035                 if (bits_per_word > 32)
2036                         return -EINVAL;
2037                 if (!(master->bits_per_word_mask &
2038                                 SPI_BPW_MASK(bits_per_word)))
2039                         return -EINVAL;
2040         }
2041
2042         return 0;
2043 }
2044
2045 /**
2046  * spi_setup - setup SPI mode and clock rate
2047  * @spi: the device whose settings are being modified
2048  * Context: can sleep, and no requests are queued to the device
2049  *
2050  * SPI protocol drivers may need to update the transfer mode if the
2051  * device doesn't work with its default.  They may likewise need
2052  * to update clock rates or word sizes from initial values.  This function
2053  * changes those settings, and must be called from a context that can sleep.
2054  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2055  * effect the next time the device is selected and data is transferred to
2056  * or from it.  When this function returns, the spi device is deselected.
2057  *
2058  * Note that this call will fail if the protocol driver specifies an option
2059  * that the underlying controller or its driver does not support.  For
2060  * example, not all hardware supports wire transfers using nine bit words,
2061  * LSB-first wire encoding, or active-high chipselects.
2062  *
2063  * Return: zero on success, else a negative error code.
2064  */
2065 int spi_setup(struct spi_device *spi)
2066 {
2067         unsigned        bad_bits, ugly_bits;
2068         int             status;
2069
2070         /* check mode to prevent that DUAL and QUAD set at the same time
2071          */
2072         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2073                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2074                 dev_err(&spi->dev,
2075                 "setup: can not select dual and quad at the same time\n");
2076                 return -EINVAL;
2077         }
2078         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2079          */
2080         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2081                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2082                 return -EINVAL;
2083         /* help drivers fail *cleanly* when they need options
2084          * that aren't supported with their current master
2085          */
2086         bad_bits = spi->mode & ~spi->master->mode_bits;
2087         ugly_bits = bad_bits &
2088                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2089         if (ugly_bits) {
2090                 dev_warn(&spi->dev,
2091                          "setup: ignoring unsupported mode bits %x\n",
2092                          ugly_bits);
2093                 spi->mode &= ~ugly_bits;
2094                 bad_bits &= ~ugly_bits;
2095         }
2096         if (bad_bits) {
2097                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2098                         bad_bits);
2099                 return -EINVAL;
2100         }
2101
2102         if (!spi->bits_per_word)
2103                 spi->bits_per_word = 8;
2104
2105         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2106         if (status)
2107                 return status;
2108
2109         if (!spi->max_speed_hz)
2110                 spi->max_speed_hz = spi->master->max_speed_hz;
2111
2112         if (spi->master->setup)
2113                 status = spi->master->setup(spi);
2114
2115         spi_set_cs(spi, false);
2116
2117         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2118                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2119                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2120                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2121                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2122                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2123                         spi->bits_per_word, spi->max_speed_hz,
2124                         status);
2125
2126         return status;
2127 }
2128 EXPORT_SYMBOL_GPL(spi_setup);
2129
2130 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2131 {
2132         struct spi_master *master = spi->master;
2133         struct spi_transfer *xfer;
2134         int w_size;
2135
2136         if (list_empty(&message->transfers))
2137                 return -EINVAL;
2138
2139         /* Half-duplex links include original MicroWire, and ones with
2140          * only one data pin like SPI_3WIRE (switches direction) or where
2141          * either MOSI or MISO is missing.  They can also be caused by
2142          * software limitations.
2143          */
2144         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2145                         || (spi->mode & SPI_3WIRE)) {
2146                 unsigned flags = master->flags;
2147
2148                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2149                         if (xfer->rx_buf && xfer->tx_buf)
2150                                 return -EINVAL;
2151                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2152                                 return -EINVAL;
2153                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2154                                 return -EINVAL;
2155                 }
2156         }
2157
2158         /**
2159          * Set transfer bits_per_word and max speed as spi device default if
2160          * it is not set for this transfer.
2161          * Set transfer tx_nbits and rx_nbits as single transfer default
2162          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2163          */
2164         message->frame_length = 0;
2165         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2166                 message->frame_length += xfer->len;
2167                 if (!xfer->bits_per_word)
2168                         xfer->bits_per_word = spi->bits_per_word;
2169
2170                 if (!xfer->speed_hz)
2171                         xfer->speed_hz = spi->max_speed_hz;
2172                 if (!xfer->speed_hz)
2173                         xfer->speed_hz = master->max_speed_hz;
2174
2175                 if (master->max_speed_hz &&
2176                     xfer->speed_hz > master->max_speed_hz)
2177                         xfer->speed_hz = master->max_speed_hz;
2178
2179                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2180                         return -EINVAL;
2181
2182                 /*
2183                  * SPI transfer length should be multiple of SPI word size
2184                  * where SPI word size should be power-of-two multiple
2185                  */
2186                 if (xfer->bits_per_word <= 8)
2187                         w_size = 1;
2188                 else if (xfer->bits_per_word <= 16)
2189                         w_size = 2;
2190                 else
2191                         w_size = 4;
2192
2193                 /* No partial transfers accepted */
2194                 if (xfer->len % w_size)
2195                         return -EINVAL;
2196
2197                 if (xfer->speed_hz && master->min_speed_hz &&
2198                     xfer->speed_hz < master->min_speed_hz)
2199                         return -EINVAL;
2200
2201                 if (xfer->tx_buf && !xfer->tx_nbits)
2202                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2203                 if (xfer->rx_buf && !xfer->rx_nbits)
2204                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2205                 /* check transfer tx/rx_nbits:
2206                  * 1. check the value matches one of single, dual and quad
2207                  * 2. check tx/rx_nbits match the mode in spi_device
2208                  */
2209                 if (xfer->tx_buf) {
2210                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2211                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2212                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2213                                 return -EINVAL;
2214                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2215                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2216                                 return -EINVAL;
2217                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2218                                 !(spi->mode & SPI_TX_QUAD))
2219                                 return -EINVAL;
2220                 }
2221                 /* check transfer rx_nbits */
2222                 if (xfer->rx_buf) {
2223                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2224                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2225                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2226                                 return -EINVAL;
2227                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2228                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2229                                 return -EINVAL;
2230                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2231                                 !(spi->mode & SPI_RX_QUAD))
2232                                 return -EINVAL;
2233                 }
2234         }
2235
2236         message->status = -EINPROGRESS;
2237
2238         return 0;
2239 }
2240
2241 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2242 {
2243         struct spi_master *master = spi->master;
2244
2245         message->spi = spi;
2246
2247         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2248         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2249
2250         trace_spi_message_submit(message);
2251
2252         return master->transfer(spi, message);
2253 }
2254
2255 /**
2256  * spi_async - asynchronous SPI transfer
2257  * @spi: device with which data will be exchanged
2258  * @message: describes the data transfers, including completion callback
2259  * Context: any (irqs may be blocked, etc)
2260  *
2261  * This call may be used in_irq and other contexts which can't sleep,
2262  * as well as from task contexts which can sleep.
2263  *
2264  * The completion callback is invoked in a context which can't sleep.
2265  * Before that invocation, the value of message->status is undefined.
2266  * When the callback is issued, message->status holds either zero (to
2267  * indicate complete success) or a negative error code.  After that
2268  * callback returns, the driver which issued the transfer request may
2269  * deallocate the associated memory; it's no longer in use by any SPI
2270  * core or controller driver code.
2271  *
2272  * Note that although all messages to a spi_device are handled in
2273  * FIFO order, messages may go to different devices in other orders.
2274  * Some device might be higher priority, or have various "hard" access
2275  * time requirements, for example.
2276  *
2277  * On detection of any fault during the transfer, processing of
2278  * the entire message is aborted, and the device is deselected.
2279  * Until returning from the associated message completion callback,
2280  * no other spi_message queued to that device will be processed.
2281  * (This rule applies equally to all the synchronous transfer calls,
2282  * which are wrappers around this core asynchronous primitive.)
2283  *
2284  * Return: zero on success, else a negative error code.
2285  */
2286 int spi_async(struct spi_device *spi, struct spi_message *message)
2287 {
2288         struct spi_master *master = spi->master;
2289         int ret;
2290         unsigned long flags;
2291
2292         ret = __spi_validate(spi, message);
2293         if (ret != 0)
2294                 return ret;
2295
2296         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2297
2298         if (master->bus_lock_flag)
2299                 ret = -EBUSY;
2300         else
2301                 ret = __spi_async(spi, message);
2302
2303         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2304
2305         return ret;
2306 }
2307 EXPORT_SYMBOL_GPL(spi_async);
2308
2309 /**
2310  * spi_async_locked - version of spi_async with exclusive bus usage
2311  * @spi: device with which data will be exchanged
2312  * @message: describes the data transfers, including completion callback
2313  * Context: any (irqs may be blocked, etc)
2314  *
2315  * This call may be used in_irq and other contexts which can't sleep,
2316  * as well as from task contexts which can sleep.
2317  *
2318  * The completion callback is invoked in a context which can't sleep.
2319  * Before that invocation, the value of message->status is undefined.
2320  * When the callback is issued, message->status holds either zero (to
2321  * indicate complete success) or a negative error code.  After that
2322  * callback returns, the driver which issued the transfer request may
2323  * deallocate the associated memory; it's no longer in use by any SPI
2324  * core or controller driver code.
2325  *
2326  * Note that although all messages to a spi_device are handled in
2327  * FIFO order, messages may go to different devices in other orders.
2328  * Some device might be higher priority, or have various "hard" access
2329  * time requirements, for example.
2330  *
2331  * On detection of any fault during the transfer, processing of
2332  * the entire message is aborted, and the device is deselected.
2333  * Until returning from the associated message completion callback,
2334  * no other spi_message queued to that device will be processed.
2335  * (This rule applies equally to all the synchronous transfer calls,
2336  * which are wrappers around this core asynchronous primitive.)
2337  *
2338  * Return: zero on success, else a negative error code.
2339  */
2340 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2341 {
2342         struct spi_master *master = spi->master;
2343         int ret;
2344         unsigned long flags;
2345
2346         ret = __spi_validate(spi, message);
2347         if (ret != 0)
2348                 return ret;
2349
2350         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2351
2352         ret = __spi_async(spi, message);
2353
2354         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2355
2356         return ret;
2357
2358 }
2359 EXPORT_SYMBOL_GPL(spi_async_locked);
2360
2361
2362 int spi_flash_read(struct spi_device *spi,
2363                    struct spi_flash_read_message *msg)
2364
2365 {
2366         struct spi_master *master = spi->master;
2367         int ret;
2368
2369         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2370              msg->addr_nbits == SPI_NBITS_DUAL) &&
2371             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2372                 return -EINVAL;
2373         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2374              msg->addr_nbits == SPI_NBITS_QUAD) &&
2375             !(spi->mode & SPI_TX_QUAD))
2376                 return -EINVAL;
2377         if (msg->data_nbits == SPI_NBITS_DUAL &&
2378             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2379                 return -EINVAL;
2380         if (msg->data_nbits == SPI_NBITS_QUAD &&
2381             !(spi->mode &  SPI_RX_QUAD))
2382                 return -EINVAL;
2383
2384         if (master->auto_runtime_pm) {
2385                 ret = pm_runtime_get_sync(master->dev.parent);
2386                 if (ret < 0) {
2387                         dev_err(&master->dev, "Failed to power device: %d\n",
2388                                 ret);
2389                         return ret;
2390                 }
2391         }
2392         mutex_lock(&master->bus_lock_mutex);
2393         ret = master->spi_flash_read(spi, msg);
2394         mutex_unlock(&master->bus_lock_mutex);
2395         if (master->auto_runtime_pm)
2396                 pm_runtime_put(master->dev.parent);
2397
2398         return ret;
2399 }
2400 EXPORT_SYMBOL_GPL(spi_flash_read);
2401
2402 /*-------------------------------------------------------------------------*/
2403
2404 /* Utility methods for SPI master protocol drivers, layered on
2405  * top of the core.  Some other utility methods are defined as
2406  * inline functions.
2407  */
2408
2409 static void spi_complete(void *arg)
2410 {
2411         complete(arg);
2412 }
2413
2414 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2415                       int bus_locked)
2416 {
2417         DECLARE_COMPLETION_ONSTACK(done);
2418         int status;
2419         struct spi_master *master = spi->master;
2420         unsigned long flags;
2421
2422         status = __spi_validate(spi, message);
2423         if (status != 0)
2424                 return status;
2425
2426         message->complete = spi_complete;
2427         message->context = &done;
2428         message->spi = spi;
2429
2430         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2431         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2432
2433         if (!bus_locked)
2434                 mutex_lock(&master->bus_lock_mutex);
2435
2436         /* If we're not using the legacy transfer method then we will
2437          * try to transfer in the calling context so special case.
2438          * This code would be less tricky if we could remove the
2439          * support for driver implemented message queues.
2440          */
2441         if (master->transfer == spi_queued_transfer) {
2442                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2443
2444                 trace_spi_message_submit(message);
2445
2446                 status = __spi_queued_transfer(spi, message, false);
2447
2448                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2449         } else {
2450                 status = spi_async_locked(spi, message);
2451         }
2452
2453         if (!bus_locked)
2454                 mutex_unlock(&master->bus_lock_mutex);
2455
2456         if (status == 0) {
2457                 /* Push out the messages in the calling context if we
2458                  * can.
2459                  */
2460                 if (master->transfer == spi_queued_transfer) {
2461                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2462                                                        spi_sync_immediate);
2463                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2464                                                        spi_sync_immediate);
2465                         __spi_pump_messages(master, false);
2466                 }
2467
2468                 wait_for_completion(&done);
2469                 status = message->status;
2470         }
2471         message->context = NULL;
2472         return status;
2473 }
2474
2475 /**
2476  * spi_sync - blocking/synchronous SPI data transfers
2477  * @spi: device with which data will be exchanged
2478  * @message: describes the data transfers
2479  * Context: can sleep
2480  *
2481  * This call may only be used from a context that may sleep.  The sleep
2482  * is non-interruptible, and has no timeout.  Low-overhead controller
2483  * drivers may DMA directly into and out of the message buffers.
2484  *
2485  * Note that the SPI device's chip select is active during the message,
2486  * and then is normally disabled between messages.  Drivers for some
2487  * frequently-used devices may want to minimize costs of selecting a chip,
2488  * by leaving it selected in anticipation that the next message will go
2489  * to the same chip.  (That may increase power usage.)
2490  *
2491  * Also, the caller is guaranteeing that the memory associated with the
2492  * message will not be freed before this call returns.
2493  *
2494  * Return: zero on success, else a negative error code.
2495  */
2496 int spi_sync(struct spi_device *spi, struct spi_message *message)
2497 {
2498         return __spi_sync(spi, message, 0);
2499 }
2500 EXPORT_SYMBOL_GPL(spi_sync);
2501
2502 /**
2503  * spi_sync_locked - version of spi_sync with exclusive bus usage
2504  * @spi: device with which data will be exchanged
2505  * @message: describes the data transfers
2506  * Context: can sleep
2507  *
2508  * This call may only be used from a context that may sleep.  The sleep
2509  * is non-interruptible, and has no timeout.  Low-overhead controller
2510  * drivers may DMA directly into and out of the message buffers.
2511  *
2512  * This call should be used by drivers that require exclusive access to the
2513  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2514  * be released by a spi_bus_unlock call when the exclusive access is over.
2515  *
2516  * Return: zero on success, else a negative error code.
2517  */
2518 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2519 {
2520         return __spi_sync(spi, message, 1);
2521 }
2522 EXPORT_SYMBOL_GPL(spi_sync_locked);
2523
2524 /**
2525  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2526  * @master: SPI bus master that should be locked for exclusive bus access
2527  * Context: can sleep
2528  *
2529  * This call may only be used from a context that may sleep.  The sleep
2530  * is non-interruptible, and has no timeout.
2531  *
2532  * This call should be used by drivers that require exclusive access to the
2533  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2534  * exclusive access is over. Data transfer must be done by spi_sync_locked
2535  * and spi_async_locked calls when the SPI bus lock is held.
2536  *
2537  * Return: always zero.
2538  */
2539 int spi_bus_lock(struct spi_master *master)
2540 {
2541         unsigned long flags;
2542
2543         mutex_lock(&master->bus_lock_mutex);
2544
2545         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2546         master->bus_lock_flag = 1;
2547         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2548
2549         /* mutex remains locked until spi_bus_unlock is called */
2550
2551         return 0;
2552 }
2553 EXPORT_SYMBOL_GPL(spi_bus_lock);
2554
2555 /**
2556  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2557  * @master: SPI bus master that was locked for exclusive bus access
2558  * Context: can sleep
2559  *
2560  * This call may only be used from a context that may sleep.  The sleep
2561  * is non-interruptible, and has no timeout.
2562  *
2563  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2564  * call.
2565  *
2566  * Return: always zero.
2567  */
2568 int spi_bus_unlock(struct spi_master *master)
2569 {
2570         master->bus_lock_flag = 0;
2571
2572         mutex_unlock(&master->bus_lock_mutex);
2573
2574         return 0;
2575 }
2576 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2577
2578 /* portable code must never pass more than 32 bytes */
2579 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2580
2581 static u8       *buf;
2582
2583 /**
2584  * spi_write_then_read - SPI synchronous write followed by read
2585  * @spi: device with which data will be exchanged
2586  * @txbuf: data to be written (need not be dma-safe)
2587  * @n_tx: size of txbuf, in bytes
2588  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2589  * @n_rx: size of rxbuf, in bytes
2590  * Context: can sleep
2591  *
2592  * This performs a half duplex MicroWire style transaction with the
2593  * device, sending txbuf and then reading rxbuf.  The return value
2594  * is zero for success, else a negative errno status code.
2595  * This call may only be used from a context that may sleep.
2596  *
2597  * Parameters to this routine are always copied using a small buffer;
2598  * portable code should never use this for more than 32 bytes.
2599  * Performance-sensitive or bulk transfer code should instead use
2600  * spi_{async,sync}() calls with dma-safe buffers.
2601  *
2602  * Return: zero on success, else a negative error code.
2603  */
2604 int spi_write_then_read(struct spi_device *spi,
2605                 const void *txbuf, unsigned n_tx,
2606                 void *rxbuf, unsigned n_rx)
2607 {
2608         static DEFINE_MUTEX(lock);
2609
2610         int                     status;
2611         struct spi_message      message;
2612         struct spi_transfer     x[2];
2613         u8                      *local_buf;
2614
2615         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2616          * copying here, (as a pure convenience thing), but we can
2617          * keep heap costs out of the hot path unless someone else is
2618          * using the pre-allocated buffer or the transfer is too large.
2619          */
2620         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2621                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2622                                     GFP_KERNEL | GFP_DMA);
2623                 if (!local_buf)
2624                         return -ENOMEM;
2625         } else {
2626                 local_buf = buf;
2627         }
2628
2629         spi_message_init(&message);
2630         memset(x, 0, sizeof(x));
2631         if (n_tx) {
2632                 x[0].len = n_tx;
2633                 spi_message_add_tail(&x[0], &message);
2634         }
2635         if (n_rx) {
2636                 x[1].len = n_rx;
2637                 spi_message_add_tail(&x[1], &message);
2638         }
2639
2640         memcpy(local_buf, txbuf, n_tx);
2641         x[0].tx_buf = local_buf;
2642         x[1].rx_buf = local_buf + n_tx;
2643
2644         /* do the i/o */
2645         status = spi_sync(spi, &message);
2646         if (status == 0)
2647                 memcpy(rxbuf, x[1].rx_buf, n_rx);
2648
2649         if (x[0].tx_buf == buf)
2650                 mutex_unlock(&lock);
2651         else
2652                 kfree(local_buf);
2653
2654         return status;
2655 }
2656 EXPORT_SYMBOL_GPL(spi_write_then_read);
2657
2658 /*-------------------------------------------------------------------------*/
2659
2660 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2661 static int __spi_of_device_match(struct device *dev, void *data)
2662 {
2663         return dev->of_node == data;
2664 }
2665
2666 /* must call put_device() when done with returned spi_device device */
2667 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2668 {
2669         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2670                                                 __spi_of_device_match);
2671         return dev ? to_spi_device(dev) : NULL;
2672 }
2673
2674 static int __spi_of_master_match(struct device *dev, const void *data)
2675 {
2676         return dev->of_node == data;
2677 }
2678
2679 /* the spi masters are not using spi_bus, so we find it with another way */
2680 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2681 {
2682         struct device *dev;
2683
2684         dev = class_find_device(&spi_master_class, NULL, node,
2685                                 __spi_of_master_match);
2686         if (!dev)
2687                 return NULL;
2688
2689         /* reference got in class_find_device */
2690         return container_of(dev, struct spi_master, dev);
2691 }
2692
2693 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2694                          void *arg)
2695 {
2696         struct of_reconfig_data *rd = arg;
2697         struct spi_master *master;
2698         struct spi_device *spi;
2699
2700         switch (of_reconfig_get_state_change(action, arg)) {
2701         case OF_RECONFIG_CHANGE_ADD:
2702                 master = of_find_spi_master_by_node(rd->dn->parent);
2703                 if (master == NULL)
2704                         return NOTIFY_OK;       /* not for us */
2705
2706                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
2707                         put_device(&master->dev);
2708                         return NOTIFY_OK;
2709                 }
2710
2711                 spi = of_register_spi_device(master, rd->dn);
2712                 put_device(&master->dev);
2713
2714                 if (IS_ERR(spi)) {
2715                         pr_err("%s: failed to create for '%s'\n",
2716                                         __func__, rd->dn->full_name);
2717                         return notifier_from_errno(PTR_ERR(spi));
2718                 }
2719                 break;
2720
2721         case OF_RECONFIG_CHANGE_REMOVE:
2722                 /* already depopulated? */
2723                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
2724                         return NOTIFY_OK;
2725
2726                 /* find our device by node */
2727                 spi = of_find_spi_device_by_node(rd->dn);
2728                 if (spi == NULL)
2729                         return NOTIFY_OK;       /* no? not meant for us */
2730
2731                 /* unregister takes one ref away */
2732                 spi_unregister_device(spi);
2733
2734                 /* and put the reference of the find */
2735                 put_device(&spi->dev);
2736                 break;
2737         }
2738
2739         return NOTIFY_OK;
2740 }
2741
2742 static struct notifier_block spi_of_notifier = {
2743         .notifier_call = of_spi_notify,
2744 };
2745 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2746 extern struct notifier_block spi_of_notifier;
2747 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2748
2749 static int __init spi_init(void)
2750 {
2751         int     status;
2752
2753         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2754         if (!buf) {
2755                 status = -ENOMEM;
2756                 goto err0;
2757         }
2758
2759         status = bus_register(&spi_bus_type);
2760         if (status < 0)
2761                 goto err1;
2762
2763         status = class_register(&spi_master_class);
2764         if (status < 0)
2765                 goto err2;
2766
2767         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2768                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2769
2770         return 0;
2771
2772 err2:
2773         bus_unregister(&spi_bus_type);
2774 err1:
2775         kfree(buf);
2776         buf = NULL;
2777 err0:
2778         return status;
2779 }
2780
2781 /* board_info is normally registered in arch_initcall(),
2782  * but even essential drivers wait till later
2783  *
2784  * REVISIT only boardinfo really needs static linking. the rest (device and
2785  * driver registration) _could_ be dynamically linked (modular) ... costs
2786  * include needing to have boardinfo data structures be much more public.
2787  */
2788 postcore_initcall(spi_init);
2789