07476ca083a046b6cfaff127ccd7895667e15467
[cascardo/linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = container_of(dev,                      \
88                                               struct spi_device, dev);  \
89         return spi_statistics_##field##_show(&spi->statistics, buf);    \
90 }                                                                       \
91 static struct device_attribute dev_attr_spi_device_##field = {          \
92         .attr = { .name = file, .mode = S_IRUGO },                      \
93         .show = spi_device_##field##_show,                              \
94 }
95
96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98                                             char *buf)                  \
99 {                                                                       \
100         unsigned long flags;                                            \
101         ssize_t len;                                                    \
102         spin_lock_irqsave(&stat->lock, flags);                          \
103         len = sprintf(buf, format_string, stat->field);                 \
104         spin_unlock_irqrestore(&stat->lock, flags);                     \
105         return len;                                                     \
106 }                                                                       \
107 SPI_STATISTICS_ATTRS(name, file)
108
109 #define SPI_STATISTICS_SHOW(field, format_string)                       \
110         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
111                                  field, format_string)
112
113 SPI_STATISTICS_SHOW(messages, "%lu");
114 SPI_STATISTICS_SHOW(transfers, "%lu");
115 SPI_STATISTICS_SHOW(errors, "%lu");
116 SPI_STATISTICS_SHOW(timedout, "%lu");
117
118 SPI_STATISTICS_SHOW(spi_sync, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120 SPI_STATISTICS_SHOW(spi_async, "%lu");
121
122 SPI_STATISTICS_SHOW(bytes, "%llu");
123 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125
126 static struct attribute *spi_dev_attrs[] = {
127         &dev_attr_modalias.attr,
128         NULL,
129 };
130
131 static const struct attribute_group spi_dev_group = {
132         .attrs  = spi_dev_attrs,
133 };
134
135 static struct attribute *spi_device_statistics_attrs[] = {
136         &dev_attr_spi_device_messages.attr,
137         &dev_attr_spi_device_transfers.attr,
138         &dev_attr_spi_device_errors.attr,
139         &dev_attr_spi_device_timedout.attr,
140         &dev_attr_spi_device_spi_sync.attr,
141         &dev_attr_spi_device_spi_sync_immediate.attr,
142         &dev_attr_spi_device_spi_async.attr,
143         &dev_attr_spi_device_bytes.attr,
144         &dev_attr_spi_device_bytes_rx.attr,
145         &dev_attr_spi_device_bytes_tx.attr,
146         NULL,
147 };
148
149 static const struct attribute_group spi_device_statistics_group = {
150         .name  = "statistics",
151         .attrs  = spi_device_statistics_attrs,
152 };
153
154 static const struct attribute_group *spi_dev_groups[] = {
155         &spi_dev_group,
156         &spi_device_statistics_group,
157         NULL,
158 };
159
160 static struct attribute *spi_master_statistics_attrs[] = {
161         &dev_attr_spi_master_messages.attr,
162         &dev_attr_spi_master_transfers.attr,
163         &dev_attr_spi_master_errors.attr,
164         &dev_attr_spi_master_timedout.attr,
165         &dev_attr_spi_master_spi_sync.attr,
166         &dev_attr_spi_master_spi_sync_immediate.attr,
167         &dev_attr_spi_master_spi_async.attr,
168         &dev_attr_spi_master_bytes.attr,
169         &dev_attr_spi_master_bytes_rx.attr,
170         &dev_attr_spi_master_bytes_tx.attr,
171         NULL,
172 };
173
174 static const struct attribute_group spi_master_statistics_group = {
175         .name  = "statistics",
176         .attrs  = spi_master_statistics_attrs,
177 };
178
179 static const struct attribute_group *spi_master_groups[] = {
180         &spi_master_statistics_group,
181         NULL,
182 };
183
184 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
185                                        struct spi_transfer *xfer,
186                                        struct spi_master *master)
187 {
188         unsigned long flags;
189
190         spin_lock_irqsave(&stats->lock, flags);
191
192         stats->transfers++;
193
194         stats->bytes += xfer->len;
195         if ((xfer->tx_buf) &&
196             (xfer->tx_buf != master->dummy_tx))
197                 stats->bytes_tx += xfer->len;
198         if ((xfer->rx_buf) &&
199             (xfer->rx_buf != master->dummy_rx))
200                 stats->bytes_rx += xfer->len;
201
202         spin_unlock_irqrestore(&stats->lock, flags);
203 }
204 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
205
206 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
207  * and the sysfs version makes coldplug work too.
208  */
209
210 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
211                                                 const struct spi_device *sdev)
212 {
213         while (id->name[0]) {
214                 if (!strcmp(sdev->modalias, id->name))
215                         return id;
216                 id++;
217         }
218         return NULL;
219 }
220
221 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
222 {
223         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
224
225         return spi_match_id(sdrv->id_table, sdev);
226 }
227 EXPORT_SYMBOL_GPL(spi_get_device_id);
228
229 static int spi_match_device(struct device *dev, struct device_driver *drv)
230 {
231         const struct spi_device *spi = to_spi_device(dev);
232         const struct spi_driver *sdrv = to_spi_driver(drv);
233
234         /* Attempt an OF style match */
235         if (of_driver_match_device(dev, drv))
236                 return 1;
237
238         /* Then try ACPI */
239         if (acpi_driver_match_device(dev, drv))
240                 return 1;
241
242         if (sdrv->id_table)
243                 return !!spi_match_id(sdrv->id_table, spi);
244
245         return strcmp(spi->modalias, drv->name) == 0;
246 }
247
248 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
249 {
250         const struct spi_device         *spi = to_spi_device(dev);
251         int rc;
252
253         rc = acpi_device_uevent_modalias(dev, env);
254         if (rc != -ENODEV)
255                 return rc;
256
257         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
258         return 0;
259 }
260
261 struct bus_type spi_bus_type = {
262         .name           = "spi",
263         .dev_groups     = spi_dev_groups,
264         .match          = spi_match_device,
265         .uevent         = spi_uevent,
266 };
267 EXPORT_SYMBOL_GPL(spi_bus_type);
268
269
270 static int spi_drv_probe(struct device *dev)
271 {
272         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
273         int ret;
274
275         ret = of_clk_set_defaults(dev->of_node, false);
276         if (ret)
277                 return ret;
278
279         ret = dev_pm_domain_attach(dev, true);
280         if (ret != -EPROBE_DEFER) {
281                 ret = sdrv->probe(to_spi_device(dev));
282                 if (ret)
283                         dev_pm_domain_detach(dev, true);
284         }
285
286         return ret;
287 }
288
289 static int spi_drv_remove(struct device *dev)
290 {
291         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
292         int ret;
293
294         ret = sdrv->remove(to_spi_device(dev));
295         dev_pm_domain_detach(dev, true);
296
297         return ret;
298 }
299
300 static void spi_drv_shutdown(struct device *dev)
301 {
302         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
303
304         sdrv->shutdown(to_spi_device(dev));
305 }
306
307 /**
308  * spi_register_driver - register a SPI driver
309  * @sdrv: the driver to register
310  * Context: can sleep
311  */
312 int spi_register_driver(struct spi_driver *sdrv)
313 {
314         sdrv->driver.bus = &spi_bus_type;
315         if (sdrv->probe)
316                 sdrv->driver.probe = spi_drv_probe;
317         if (sdrv->remove)
318                 sdrv->driver.remove = spi_drv_remove;
319         if (sdrv->shutdown)
320                 sdrv->driver.shutdown = spi_drv_shutdown;
321         return driver_register(&sdrv->driver);
322 }
323 EXPORT_SYMBOL_GPL(spi_register_driver);
324
325 /*-------------------------------------------------------------------------*/
326
327 /* SPI devices should normally not be created by SPI device drivers; that
328  * would make them board-specific.  Similarly with SPI master drivers.
329  * Device registration normally goes into like arch/.../mach.../board-YYY.c
330  * with other readonly (flashable) information about mainboard devices.
331  */
332
333 struct boardinfo {
334         struct list_head        list;
335         struct spi_board_info   board_info;
336 };
337
338 static LIST_HEAD(board_list);
339 static LIST_HEAD(spi_master_list);
340
341 /*
342  * Used to protect add/del opertion for board_info list and
343  * spi_master list, and their matching process
344  */
345 static DEFINE_MUTEX(board_lock);
346
347 /**
348  * spi_alloc_device - Allocate a new SPI device
349  * @master: Controller to which device is connected
350  * Context: can sleep
351  *
352  * Allows a driver to allocate and initialize a spi_device without
353  * registering it immediately.  This allows a driver to directly
354  * fill the spi_device with device parameters before calling
355  * spi_add_device() on it.
356  *
357  * Caller is responsible to call spi_add_device() on the returned
358  * spi_device structure to add it to the SPI master.  If the caller
359  * needs to discard the spi_device without adding it, then it should
360  * call spi_dev_put() on it.
361  *
362  * Returns a pointer to the new device, or NULL.
363  */
364 struct spi_device *spi_alloc_device(struct spi_master *master)
365 {
366         struct spi_device       *spi;
367
368         if (!spi_master_get(master))
369                 return NULL;
370
371         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
372         if (!spi) {
373                 spi_master_put(master);
374                 return NULL;
375         }
376
377         spi->master = master;
378         spi->dev.parent = &master->dev;
379         spi->dev.bus = &spi_bus_type;
380         spi->dev.release = spidev_release;
381         spi->cs_gpio = -ENOENT;
382
383         spin_lock_init(&spi->statistics.lock);
384
385         device_initialize(&spi->dev);
386         return spi;
387 }
388 EXPORT_SYMBOL_GPL(spi_alloc_device);
389
390 static void spi_dev_set_name(struct spi_device *spi)
391 {
392         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
393
394         if (adev) {
395                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
396                 return;
397         }
398
399         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
400                      spi->chip_select);
401 }
402
403 static int spi_dev_check(struct device *dev, void *data)
404 {
405         struct spi_device *spi = to_spi_device(dev);
406         struct spi_device *new_spi = data;
407
408         if (spi->master == new_spi->master &&
409             spi->chip_select == new_spi->chip_select)
410                 return -EBUSY;
411         return 0;
412 }
413
414 /**
415  * spi_add_device - Add spi_device allocated with spi_alloc_device
416  * @spi: spi_device to register
417  *
418  * Companion function to spi_alloc_device.  Devices allocated with
419  * spi_alloc_device can be added onto the spi bus with this function.
420  *
421  * Returns 0 on success; negative errno on failure
422  */
423 int spi_add_device(struct spi_device *spi)
424 {
425         static DEFINE_MUTEX(spi_add_lock);
426         struct spi_master *master = spi->master;
427         struct device *dev = master->dev.parent;
428         int status;
429
430         /* Chipselects are numbered 0..max; validate. */
431         if (spi->chip_select >= master->num_chipselect) {
432                 dev_err(dev, "cs%d >= max %d\n",
433                         spi->chip_select,
434                         master->num_chipselect);
435                 return -EINVAL;
436         }
437
438         /* Set the bus ID string */
439         spi_dev_set_name(spi);
440
441         /* We need to make sure there's no other device with this
442          * chipselect **BEFORE** we call setup(), else we'll trash
443          * its configuration.  Lock against concurrent add() calls.
444          */
445         mutex_lock(&spi_add_lock);
446
447         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
448         if (status) {
449                 dev_err(dev, "chipselect %d already in use\n",
450                                 spi->chip_select);
451                 goto done;
452         }
453
454         if (master->cs_gpios)
455                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
456
457         /* Drivers may modify this initial i/o setup, but will
458          * normally rely on the device being setup.  Devices
459          * using SPI_CS_HIGH can't coexist well otherwise...
460          */
461         status = spi_setup(spi);
462         if (status < 0) {
463                 dev_err(dev, "can't setup %s, status %d\n",
464                                 dev_name(&spi->dev), status);
465                 goto done;
466         }
467
468         /* Device may be bound to an active driver when this returns */
469         status = device_add(&spi->dev);
470         if (status < 0)
471                 dev_err(dev, "can't add %s, status %d\n",
472                                 dev_name(&spi->dev), status);
473         else
474                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
475
476 done:
477         mutex_unlock(&spi_add_lock);
478         return status;
479 }
480 EXPORT_SYMBOL_GPL(spi_add_device);
481
482 /**
483  * spi_new_device - instantiate one new SPI device
484  * @master: Controller to which device is connected
485  * @chip: Describes the SPI device
486  * Context: can sleep
487  *
488  * On typical mainboards, this is purely internal; and it's not needed
489  * after board init creates the hard-wired devices.  Some development
490  * platforms may not be able to use spi_register_board_info though, and
491  * this is exported so that for example a USB or parport based adapter
492  * driver could add devices (which it would learn about out-of-band).
493  *
494  * Returns the new device, or NULL.
495  */
496 struct spi_device *spi_new_device(struct spi_master *master,
497                                   struct spi_board_info *chip)
498 {
499         struct spi_device       *proxy;
500         int                     status;
501
502         /* NOTE:  caller did any chip->bus_num checks necessary.
503          *
504          * Also, unless we change the return value convention to use
505          * error-or-pointer (not NULL-or-pointer), troubleshootability
506          * suggests syslogged diagnostics are best here (ugh).
507          */
508
509         proxy = spi_alloc_device(master);
510         if (!proxy)
511                 return NULL;
512
513         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
514
515         proxy->chip_select = chip->chip_select;
516         proxy->max_speed_hz = chip->max_speed_hz;
517         proxy->mode = chip->mode;
518         proxy->irq = chip->irq;
519         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
520         proxy->dev.platform_data = (void *) chip->platform_data;
521         proxy->controller_data = chip->controller_data;
522         proxy->controller_state = NULL;
523
524         status = spi_add_device(proxy);
525         if (status < 0) {
526                 spi_dev_put(proxy);
527                 return NULL;
528         }
529
530         return proxy;
531 }
532 EXPORT_SYMBOL_GPL(spi_new_device);
533
534 static void spi_match_master_to_boardinfo(struct spi_master *master,
535                                 struct spi_board_info *bi)
536 {
537         struct spi_device *dev;
538
539         if (master->bus_num != bi->bus_num)
540                 return;
541
542         dev = spi_new_device(master, bi);
543         if (!dev)
544                 dev_err(master->dev.parent, "can't create new device for %s\n",
545                         bi->modalias);
546 }
547
548 /**
549  * spi_register_board_info - register SPI devices for a given board
550  * @info: array of chip descriptors
551  * @n: how many descriptors are provided
552  * Context: can sleep
553  *
554  * Board-specific early init code calls this (probably during arch_initcall)
555  * with segments of the SPI device table.  Any device nodes are created later,
556  * after the relevant parent SPI controller (bus_num) is defined.  We keep
557  * this table of devices forever, so that reloading a controller driver will
558  * not make Linux forget about these hard-wired devices.
559  *
560  * Other code can also call this, e.g. a particular add-on board might provide
561  * SPI devices through its expansion connector, so code initializing that board
562  * would naturally declare its SPI devices.
563  *
564  * The board info passed can safely be __initdata ... but be careful of
565  * any embedded pointers (platform_data, etc), they're copied as-is.
566  */
567 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
568 {
569         struct boardinfo *bi;
570         int i;
571
572         if (!n)
573                 return -EINVAL;
574
575         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
576         if (!bi)
577                 return -ENOMEM;
578
579         for (i = 0; i < n; i++, bi++, info++) {
580                 struct spi_master *master;
581
582                 memcpy(&bi->board_info, info, sizeof(*info));
583                 mutex_lock(&board_lock);
584                 list_add_tail(&bi->list, &board_list);
585                 list_for_each_entry(master, &spi_master_list, list)
586                         spi_match_master_to_boardinfo(master, &bi->board_info);
587                 mutex_unlock(&board_lock);
588         }
589
590         return 0;
591 }
592
593 /*-------------------------------------------------------------------------*/
594
595 static void spi_set_cs(struct spi_device *spi, bool enable)
596 {
597         if (spi->mode & SPI_CS_HIGH)
598                 enable = !enable;
599
600         if (spi->cs_gpio >= 0)
601                 gpio_set_value(spi->cs_gpio, !enable);
602         else if (spi->master->set_cs)
603                 spi->master->set_cs(spi, !enable);
604 }
605
606 #ifdef CONFIG_HAS_DMA
607 static int spi_map_buf(struct spi_master *master, struct device *dev,
608                        struct sg_table *sgt, void *buf, size_t len,
609                        enum dma_data_direction dir)
610 {
611         const bool vmalloced_buf = is_vmalloc_addr(buf);
612         const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
613         const int sgs = DIV_ROUND_UP(len, desc_len);
614         struct page *vm_page;
615         void *sg_buf;
616         size_t min;
617         int i, ret;
618
619         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
620         if (ret != 0)
621                 return ret;
622
623         for (i = 0; i < sgs; i++) {
624                 min = min_t(size_t, len, desc_len);
625
626                 if (vmalloced_buf) {
627                         vm_page = vmalloc_to_page(buf);
628                         if (!vm_page) {
629                                 sg_free_table(sgt);
630                                 return -ENOMEM;
631                         }
632                         sg_set_page(&sgt->sgl[i], vm_page,
633                                     min, offset_in_page(buf));
634                 } else {
635                         sg_buf = buf;
636                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
637                 }
638
639
640                 buf += min;
641                 len -= min;
642         }
643
644         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
645         if (!ret)
646                 ret = -ENOMEM;
647         if (ret < 0) {
648                 sg_free_table(sgt);
649                 return ret;
650         }
651
652         sgt->nents = ret;
653
654         return 0;
655 }
656
657 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
658                           struct sg_table *sgt, enum dma_data_direction dir)
659 {
660         if (sgt->orig_nents) {
661                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
662                 sg_free_table(sgt);
663         }
664 }
665
666 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
667 {
668         struct device *tx_dev, *rx_dev;
669         struct spi_transfer *xfer;
670         int ret;
671
672         if (!master->can_dma)
673                 return 0;
674
675         tx_dev = master->dma_tx->device->dev;
676         rx_dev = master->dma_rx->device->dev;
677
678         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
679                 if (!master->can_dma(master, msg->spi, xfer))
680                         continue;
681
682                 if (xfer->tx_buf != NULL) {
683                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
684                                           (void *)xfer->tx_buf, xfer->len,
685                                           DMA_TO_DEVICE);
686                         if (ret != 0)
687                                 return ret;
688                 }
689
690                 if (xfer->rx_buf != NULL) {
691                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
692                                           xfer->rx_buf, xfer->len,
693                                           DMA_FROM_DEVICE);
694                         if (ret != 0) {
695                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
696                                               DMA_TO_DEVICE);
697                                 return ret;
698                         }
699                 }
700         }
701
702         master->cur_msg_mapped = true;
703
704         return 0;
705 }
706
707 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
708 {
709         struct spi_transfer *xfer;
710         struct device *tx_dev, *rx_dev;
711
712         if (!master->cur_msg_mapped || !master->can_dma)
713                 return 0;
714
715         tx_dev = master->dma_tx->device->dev;
716         rx_dev = master->dma_rx->device->dev;
717
718         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
719                 if (!master->can_dma(master, msg->spi, xfer))
720                         continue;
721
722                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
723                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
724         }
725
726         return 0;
727 }
728 #else /* !CONFIG_HAS_DMA */
729 static inline int __spi_map_msg(struct spi_master *master,
730                                 struct spi_message *msg)
731 {
732         return 0;
733 }
734
735 static inline int __spi_unmap_msg(struct spi_master *master,
736                                   struct spi_message *msg)
737 {
738         return 0;
739 }
740 #endif /* !CONFIG_HAS_DMA */
741
742 static inline int spi_unmap_msg(struct spi_master *master,
743                                 struct spi_message *msg)
744 {
745         struct spi_transfer *xfer;
746
747         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
748                 /*
749                  * Restore the original value of tx_buf or rx_buf if they are
750                  * NULL.
751                  */
752                 if (xfer->tx_buf == master->dummy_tx)
753                         xfer->tx_buf = NULL;
754                 if (xfer->rx_buf == master->dummy_rx)
755                         xfer->rx_buf = NULL;
756         }
757
758         return __spi_unmap_msg(master, msg);
759 }
760
761 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
762 {
763         struct spi_transfer *xfer;
764         void *tmp;
765         unsigned int max_tx, max_rx;
766
767         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
768                 max_tx = 0;
769                 max_rx = 0;
770
771                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
772                         if ((master->flags & SPI_MASTER_MUST_TX) &&
773                             !xfer->tx_buf)
774                                 max_tx = max(xfer->len, max_tx);
775                         if ((master->flags & SPI_MASTER_MUST_RX) &&
776                             !xfer->rx_buf)
777                                 max_rx = max(xfer->len, max_rx);
778                 }
779
780                 if (max_tx) {
781                         tmp = krealloc(master->dummy_tx, max_tx,
782                                        GFP_KERNEL | GFP_DMA);
783                         if (!tmp)
784                                 return -ENOMEM;
785                         master->dummy_tx = tmp;
786                         memset(tmp, 0, max_tx);
787                 }
788
789                 if (max_rx) {
790                         tmp = krealloc(master->dummy_rx, max_rx,
791                                        GFP_KERNEL | GFP_DMA);
792                         if (!tmp)
793                                 return -ENOMEM;
794                         master->dummy_rx = tmp;
795                 }
796
797                 if (max_tx || max_rx) {
798                         list_for_each_entry(xfer, &msg->transfers,
799                                             transfer_list) {
800                                 if (!xfer->tx_buf)
801                                         xfer->tx_buf = master->dummy_tx;
802                                 if (!xfer->rx_buf)
803                                         xfer->rx_buf = master->dummy_rx;
804                         }
805                 }
806         }
807
808         return __spi_map_msg(master, msg);
809 }
810
811 /*
812  * spi_transfer_one_message - Default implementation of transfer_one_message()
813  *
814  * This is a standard implementation of transfer_one_message() for
815  * drivers which impelment a transfer_one() operation.  It provides
816  * standard handling of delays and chip select management.
817  */
818 static int spi_transfer_one_message(struct spi_master *master,
819                                     struct spi_message *msg)
820 {
821         struct spi_transfer *xfer;
822         bool keep_cs = false;
823         int ret = 0;
824         unsigned long ms = 1;
825         struct spi_statistics *statm = &master->statistics;
826         struct spi_statistics *stats = &msg->spi->statistics;
827
828         spi_set_cs(msg->spi, true);
829
830         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
831         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
832
833         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
834                 trace_spi_transfer_start(msg, xfer);
835
836                 spi_statistics_add_transfer_stats(statm, xfer, master);
837                 spi_statistics_add_transfer_stats(stats, xfer, master);
838
839                 if (xfer->tx_buf || xfer->rx_buf) {
840                         reinit_completion(&master->xfer_completion);
841
842                         ret = master->transfer_one(master, msg->spi, xfer);
843                         if (ret < 0) {
844                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
845                                                                errors);
846                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
847                                                                errors);
848                                 dev_err(&msg->spi->dev,
849                                         "SPI transfer failed: %d\n", ret);
850                                 goto out;
851                         }
852
853                         if (ret > 0) {
854                                 ret = 0;
855                                 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
856                                 ms += ms + 100; /* some tolerance */
857
858                                 ms = wait_for_completion_timeout(&master->xfer_completion,
859                                                                  msecs_to_jiffies(ms));
860                         }
861
862                         if (ms == 0) {
863                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
864                                                                timedout);
865                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
866                                                                timedout);
867                                 dev_err(&msg->spi->dev,
868                                         "SPI transfer timed out\n");
869                                 msg->status = -ETIMEDOUT;
870                         }
871                 } else {
872                         if (xfer->len)
873                                 dev_err(&msg->spi->dev,
874                                         "Bufferless transfer has length %u\n",
875                                         xfer->len);
876                 }
877
878                 trace_spi_transfer_stop(msg, xfer);
879
880                 if (msg->status != -EINPROGRESS)
881                         goto out;
882
883                 if (xfer->delay_usecs)
884                         udelay(xfer->delay_usecs);
885
886                 if (xfer->cs_change) {
887                         if (list_is_last(&xfer->transfer_list,
888                                          &msg->transfers)) {
889                                 keep_cs = true;
890                         } else {
891                                 spi_set_cs(msg->spi, false);
892                                 udelay(10);
893                                 spi_set_cs(msg->spi, true);
894                         }
895                 }
896
897                 msg->actual_length += xfer->len;
898         }
899
900 out:
901         if (ret != 0 || !keep_cs)
902                 spi_set_cs(msg->spi, false);
903
904         if (msg->status == -EINPROGRESS)
905                 msg->status = ret;
906
907         if (msg->status && master->handle_err)
908                 master->handle_err(master, msg);
909
910         spi_finalize_current_message(master);
911
912         return ret;
913 }
914
915 /**
916  * spi_finalize_current_transfer - report completion of a transfer
917  * @master: the master reporting completion
918  *
919  * Called by SPI drivers using the core transfer_one_message()
920  * implementation to notify it that the current interrupt driven
921  * transfer has finished and the next one may be scheduled.
922  */
923 void spi_finalize_current_transfer(struct spi_master *master)
924 {
925         complete(&master->xfer_completion);
926 }
927 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
928
929 /**
930  * __spi_pump_messages - function which processes spi message queue
931  * @master: master to process queue for
932  * @in_kthread: true if we are in the context of the message pump thread
933  *
934  * This function checks if there is any spi message in the queue that
935  * needs processing and if so call out to the driver to initialize hardware
936  * and transfer each message.
937  *
938  * Note that it is called both from the kthread itself and also from
939  * inside spi_sync(); the queue extraction handling at the top of the
940  * function should deal with this safely.
941  */
942 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
943 {
944         unsigned long flags;
945         bool was_busy = false;
946         int ret;
947
948         /* Lock queue */
949         spin_lock_irqsave(&master->queue_lock, flags);
950
951         /* Make sure we are not already running a message */
952         if (master->cur_msg) {
953                 spin_unlock_irqrestore(&master->queue_lock, flags);
954                 return;
955         }
956
957         /* If another context is idling the device then defer */
958         if (master->idling) {
959                 queue_kthread_work(&master->kworker, &master->pump_messages);
960                 spin_unlock_irqrestore(&master->queue_lock, flags);
961                 return;
962         }
963
964         /* Check if the queue is idle */
965         if (list_empty(&master->queue) || !master->running) {
966                 if (!master->busy) {
967                         spin_unlock_irqrestore(&master->queue_lock, flags);
968                         return;
969                 }
970
971                 /* Only do teardown in the thread */
972                 if (!in_kthread) {
973                         queue_kthread_work(&master->kworker,
974                                            &master->pump_messages);
975                         spin_unlock_irqrestore(&master->queue_lock, flags);
976                         return;
977                 }
978
979                 master->busy = false;
980                 master->idling = true;
981                 spin_unlock_irqrestore(&master->queue_lock, flags);
982
983                 kfree(master->dummy_rx);
984                 master->dummy_rx = NULL;
985                 kfree(master->dummy_tx);
986                 master->dummy_tx = NULL;
987                 if (master->unprepare_transfer_hardware &&
988                     master->unprepare_transfer_hardware(master))
989                         dev_err(&master->dev,
990                                 "failed to unprepare transfer hardware\n");
991                 if (master->auto_runtime_pm) {
992                         pm_runtime_mark_last_busy(master->dev.parent);
993                         pm_runtime_put_autosuspend(master->dev.parent);
994                 }
995                 trace_spi_master_idle(master);
996
997                 spin_lock_irqsave(&master->queue_lock, flags);
998                 master->idling = false;
999                 spin_unlock_irqrestore(&master->queue_lock, flags);
1000                 return;
1001         }
1002
1003         /* Extract head of queue */
1004         master->cur_msg =
1005                 list_first_entry(&master->queue, struct spi_message, queue);
1006
1007         list_del_init(&master->cur_msg->queue);
1008         if (master->busy)
1009                 was_busy = true;
1010         else
1011                 master->busy = true;
1012         spin_unlock_irqrestore(&master->queue_lock, flags);
1013
1014         if (!was_busy && master->auto_runtime_pm) {
1015                 ret = pm_runtime_get_sync(master->dev.parent);
1016                 if (ret < 0) {
1017                         dev_err(&master->dev, "Failed to power device: %d\n",
1018                                 ret);
1019                         return;
1020                 }
1021         }
1022
1023         if (!was_busy)
1024                 trace_spi_master_busy(master);
1025
1026         if (!was_busy && master->prepare_transfer_hardware) {
1027                 ret = master->prepare_transfer_hardware(master);
1028                 if (ret) {
1029                         dev_err(&master->dev,
1030                                 "failed to prepare transfer hardware\n");
1031
1032                         if (master->auto_runtime_pm)
1033                                 pm_runtime_put(master->dev.parent);
1034                         return;
1035                 }
1036         }
1037
1038         trace_spi_message_start(master->cur_msg);
1039
1040         if (master->prepare_message) {
1041                 ret = master->prepare_message(master, master->cur_msg);
1042                 if (ret) {
1043                         dev_err(&master->dev,
1044                                 "failed to prepare message: %d\n", ret);
1045                         master->cur_msg->status = ret;
1046                         spi_finalize_current_message(master);
1047                         return;
1048                 }
1049                 master->cur_msg_prepared = true;
1050         }
1051
1052         ret = spi_map_msg(master, master->cur_msg);
1053         if (ret) {
1054                 master->cur_msg->status = ret;
1055                 spi_finalize_current_message(master);
1056                 return;
1057         }
1058
1059         ret = master->transfer_one_message(master, master->cur_msg);
1060         if (ret) {
1061                 dev_err(&master->dev,
1062                         "failed to transfer one message from queue\n");
1063                 return;
1064         }
1065 }
1066
1067 /**
1068  * spi_pump_messages - kthread work function which processes spi message queue
1069  * @work: pointer to kthread work struct contained in the master struct
1070  */
1071 static void spi_pump_messages(struct kthread_work *work)
1072 {
1073         struct spi_master *master =
1074                 container_of(work, struct spi_master, pump_messages);
1075
1076         __spi_pump_messages(master, true);
1077 }
1078
1079 static int spi_init_queue(struct spi_master *master)
1080 {
1081         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1082
1083         master->running = false;
1084         master->busy = false;
1085
1086         init_kthread_worker(&master->kworker);
1087         master->kworker_task = kthread_run(kthread_worker_fn,
1088                                            &master->kworker, "%s",
1089                                            dev_name(&master->dev));
1090         if (IS_ERR(master->kworker_task)) {
1091                 dev_err(&master->dev, "failed to create message pump task\n");
1092                 return PTR_ERR(master->kworker_task);
1093         }
1094         init_kthread_work(&master->pump_messages, spi_pump_messages);
1095
1096         /*
1097          * Master config will indicate if this controller should run the
1098          * message pump with high (realtime) priority to reduce the transfer
1099          * latency on the bus by minimising the delay between a transfer
1100          * request and the scheduling of the message pump thread. Without this
1101          * setting the message pump thread will remain at default priority.
1102          */
1103         if (master->rt) {
1104                 dev_info(&master->dev,
1105                         "will run message pump with realtime priority\n");
1106                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1107         }
1108
1109         return 0;
1110 }
1111
1112 /**
1113  * spi_get_next_queued_message() - called by driver to check for queued
1114  * messages
1115  * @master: the master to check for queued messages
1116  *
1117  * If there are more messages in the queue, the next message is returned from
1118  * this call.
1119  */
1120 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1121 {
1122         struct spi_message *next;
1123         unsigned long flags;
1124
1125         /* get a pointer to the next message, if any */
1126         spin_lock_irqsave(&master->queue_lock, flags);
1127         next = list_first_entry_or_null(&master->queue, struct spi_message,
1128                                         queue);
1129         spin_unlock_irqrestore(&master->queue_lock, flags);
1130
1131         return next;
1132 }
1133 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1134
1135 /**
1136  * spi_finalize_current_message() - the current message is complete
1137  * @master: the master to return the message to
1138  *
1139  * Called by the driver to notify the core that the message in the front of the
1140  * queue is complete and can be removed from the queue.
1141  */
1142 void spi_finalize_current_message(struct spi_master *master)
1143 {
1144         struct spi_message *mesg;
1145         unsigned long flags;
1146         int ret;
1147
1148         spin_lock_irqsave(&master->queue_lock, flags);
1149         mesg = master->cur_msg;
1150         spin_unlock_irqrestore(&master->queue_lock, flags);
1151
1152         spi_unmap_msg(master, mesg);
1153
1154         if (master->cur_msg_prepared && master->unprepare_message) {
1155                 ret = master->unprepare_message(master, mesg);
1156                 if (ret) {
1157                         dev_err(&master->dev,
1158                                 "failed to unprepare message: %d\n", ret);
1159                 }
1160         }
1161
1162         spin_lock_irqsave(&master->queue_lock, flags);
1163         master->cur_msg = NULL;
1164         master->cur_msg_prepared = false;
1165         queue_kthread_work(&master->kworker, &master->pump_messages);
1166         spin_unlock_irqrestore(&master->queue_lock, flags);
1167
1168         trace_spi_message_done(mesg);
1169
1170         mesg->state = NULL;
1171         if (mesg->complete)
1172                 mesg->complete(mesg->context);
1173 }
1174 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1175
1176 static int spi_start_queue(struct spi_master *master)
1177 {
1178         unsigned long flags;
1179
1180         spin_lock_irqsave(&master->queue_lock, flags);
1181
1182         if (master->running || master->busy) {
1183                 spin_unlock_irqrestore(&master->queue_lock, flags);
1184                 return -EBUSY;
1185         }
1186
1187         master->running = true;
1188         master->cur_msg = NULL;
1189         spin_unlock_irqrestore(&master->queue_lock, flags);
1190
1191         queue_kthread_work(&master->kworker, &master->pump_messages);
1192
1193         return 0;
1194 }
1195
1196 static int spi_stop_queue(struct spi_master *master)
1197 {
1198         unsigned long flags;
1199         unsigned limit = 500;
1200         int ret = 0;
1201
1202         spin_lock_irqsave(&master->queue_lock, flags);
1203
1204         /*
1205          * This is a bit lame, but is optimized for the common execution path.
1206          * A wait_queue on the master->busy could be used, but then the common
1207          * execution path (pump_messages) would be required to call wake_up or
1208          * friends on every SPI message. Do this instead.
1209          */
1210         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1211                 spin_unlock_irqrestore(&master->queue_lock, flags);
1212                 usleep_range(10000, 11000);
1213                 spin_lock_irqsave(&master->queue_lock, flags);
1214         }
1215
1216         if (!list_empty(&master->queue) || master->busy)
1217                 ret = -EBUSY;
1218         else
1219                 master->running = false;
1220
1221         spin_unlock_irqrestore(&master->queue_lock, flags);
1222
1223         if (ret) {
1224                 dev_warn(&master->dev,
1225                          "could not stop message queue\n");
1226                 return ret;
1227         }
1228         return ret;
1229 }
1230
1231 static int spi_destroy_queue(struct spi_master *master)
1232 {
1233         int ret;
1234
1235         ret = spi_stop_queue(master);
1236
1237         /*
1238          * flush_kthread_worker will block until all work is done.
1239          * If the reason that stop_queue timed out is that the work will never
1240          * finish, then it does no good to call flush/stop thread, so
1241          * return anyway.
1242          */
1243         if (ret) {
1244                 dev_err(&master->dev, "problem destroying queue\n");
1245                 return ret;
1246         }
1247
1248         flush_kthread_worker(&master->kworker);
1249         kthread_stop(master->kworker_task);
1250
1251         return 0;
1252 }
1253
1254 static int __spi_queued_transfer(struct spi_device *spi,
1255                                  struct spi_message *msg,
1256                                  bool need_pump)
1257 {
1258         struct spi_master *master = spi->master;
1259         unsigned long flags;
1260
1261         spin_lock_irqsave(&master->queue_lock, flags);
1262
1263         if (!master->running) {
1264                 spin_unlock_irqrestore(&master->queue_lock, flags);
1265                 return -ESHUTDOWN;
1266         }
1267         msg->actual_length = 0;
1268         msg->status = -EINPROGRESS;
1269
1270         list_add_tail(&msg->queue, &master->queue);
1271         if (!master->busy && need_pump)
1272                 queue_kthread_work(&master->kworker, &master->pump_messages);
1273
1274         spin_unlock_irqrestore(&master->queue_lock, flags);
1275         return 0;
1276 }
1277
1278 /**
1279  * spi_queued_transfer - transfer function for queued transfers
1280  * @spi: spi device which is requesting transfer
1281  * @msg: spi message which is to handled is queued to driver queue
1282  */
1283 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1284 {
1285         return __spi_queued_transfer(spi, msg, true);
1286 }
1287
1288 static int spi_master_initialize_queue(struct spi_master *master)
1289 {
1290         int ret;
1291
1292         master->transfer = spi_queued_transfer;
1293         if (!master->transfer_one_message)
1294                 master->transfer_one_message = spi_transfer_one_message;
1295
1296         /* Initialize and start queue */
1297         ret = spi_init_queue(master);
1298         if (ret) {
1299                 dev_err(&master->dev, "problem initializing queue\n");
1300                 goto err_init_queue;
1301         }
1302         master->queued = true;
1303         ret = spi_start_queue(master);
1304         if (ret) {
1305                 dev_err(&master->dev, "problem starting queue\n");
1306                 goto err_start_queue;
1307         }
1308
1309         return 0;
1310
1311 err_start_queue:
1312         spi_destroy_queue(master);
1313 err_init_queue:
1314         return ret;
1315 }
1316
1317 /*-------------------------------------------------------------------------*/
1318
1319 #if defined(CONFIG_OF)
1320 static struct spi_device *
1321 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1322 {
1323         struct spi_device *spi;
1324         int rc;
1325         u32 value;
1326
1327         /* Alloc an spi_device */
1328         spi = spi_alloc_device(master);
1329         if (!spi) {
1330                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1331                         nc->full_name);
1332                 rc = -ENOMEM;
1333                 goto err_out;
1334         }
1335
1336         /* Select device driver */
1337         rc = of_modalias_node(nc, spi->modalias,
1338                                 sizeof(spi->modalias));
1339         if (rc < 0) {
1340                 dev_err(&master->dev, "cannot find modalias for %s\n",
1341                         nc->full_name);
1342                 goto err_out;
1343         }
1344
1345         /* Device address */
1346         rc = of_property_read_u32(nc, "reg", &value);
1347         if (rc) {
1348                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1349                         nc->full_name, rc);
1350                 goto err_out;
1351         }
1352         spi->chip_select = value;
1353
1354         /* Mode (clock phase/polarity/etc.) */
1355         if (of_find_property(nc, "spi-cpha", NULL))
1356                 spi->mode |= SPI_CPHA;
1357         if (of_find_property(nc, "spi-cpol", NULL))
1358                 spi->mode |= SPI_CPOL;
1359         if (of_find_property(nc, "spi-cs-high", NULL))
1360                 spi->mode |= SPI_CS_HIGH;
1361         if (of_find_property(nc, "spi-3wire", NULL))
1362                 spi->mode |= SPI_3WIRE;
1363         if (of_find_property(nc, "spi-lsb-first", NULL))
1364                 spi->mode |= SPI_LSB_FIRST;
1365
1366         /* Device DUAL/QUAD mode */
1367         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1368                 switch (value) {
1369                 case 1:
1370                         break;
1371                 case 2:
1372                         spi->mode |= SPI_TX_DUAL;
1373                         break;
1374                 case 4:
1375                         spi->mode |= SPI_TX_QUAD;
1376                         break;
1377                 default:
1378                         dev_warn(&master->dev,
1379                                 "spi-tx-bus-width %d not supported\n",
1380                                 value);
1381                         break;
1382                 }
1383         }
1384
1385         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1386                 switch (value) {
1387                 case 1:
1388                         break;
1389                 case 2:
1390                         spi->mode |= SPI_RX_DUAL;
1391                         break;
1392                 case 4:
1393                         spi->mode |= SPI_RX_QUAD;
1394                         break;
1395                 default:
1396                         dev_warn(&master->dev,
1397                                 "spi-rx-bus-width %d not supported\n",
1398                                 value);
1399                         break;
1400                 }
1401         }
1402
1403         /* Device speed */
1404         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1405         if (rc) {
1406                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1407                         nc->full_name, rc);
1408                 goto err_out;
1409         }
1410         spi->max_speed_hz = value;
1411
1412         /* IRQ */
1413         spi->irq = irq_of_parse_and_map(nc, 0);
1414
1415         /* Store a pointer to the node in the device structure */
1416         of_node_get(nc);
1417         spi->dev.of_node = nc;
1418
1419         /* Register the new device */
1420         rc = spi_add_device(spi);
1421         if (rc) {
1422                 dev_err(&master->dev, "spi_device register error %s\n",
1423                         nc->full_name);
1424                 goto err_out;
1425         }
1426
1427         return spi;
1428
1429 err_out:
1430         spi_dev_put(spi);
1431         return ERR_PTR(rc);
1432 }
1433
1434 /**
1435  * of_register_spi_devices() - Register child devices onto the SPI bus
1436  * @master:     Pointer to spi_master device
1437  *
1438  * Registers an spi_device for each child node of master node which has a 'reg'
1439  * property.
1440  */
1441 static void of_register_spi_devices(struct spi_master *master)
1442 {
1443         struct spi_device *spi;
1444         struct device_node *nc;
1445
1446         if (!master->dev.of_node)
1447                 return;
1448
1449         for_each_available_child_of_node(master->dev.of_node, nc) {
1450                 spi = of_register_spi_device(master, nc);
1451                 if (IS_ERR(spi))
1452                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1453                                 nc->full_name);
1454         }
1455 }
1456 #else
1457 static void of_register_spi_devices(struct spi_master *master) { }
1458 #endif
1459
1460 #ifdef CONFIG_ACPI
1461 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1462 {
1463         struct spi_device *spi = data;
1464
1465         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1466                 struct acpi_resource_spi_serialbus *sb;
1467
1468                 sb = &ares->data.spi_serial_bus;
1469                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1470                         spi->chip_select = sb->device_selection;
1471                         spi->max_speed_hz = sb->connection_speed;
1472
1473                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1474                                 spi->mode |= SPI_CPHA;
1475                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1476                                 spi->mode |= SPI_CPOL;
1477                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1478                                 spi->mode |= SPI_CS_HIGH;
1479                 }
1480         } else if (spi->irq < 0) {
1481                 struct resource r;
1482
1483                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1484                         spi->irq = r.start;
1485         }
1486
1487         /* Always tell the ACPI core to skip this resource */
1488         return 1;
1489 }
1490
1491 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1492                                        void *data, void **return_value)
1493 {
1494         struct spi_master *master = data;
1495         struct list_head resource_list;
1496         struct acpi_device *adev;
1497         struct spi_device *spi;
1498         int ret;
1499
1500         if (acpi_bus_get_device(handle, &adev))
1501                 return AE_OK;
1502         if (acpi_bus_get_status(adev) || !adev->status.present)
1503                 return AE_OK;
1504
1505         spi = spi_alloc_device(master);
1506         if (!spi) {
1507                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1508                         dev_name(&adev->dev));
1509                 return AE_NO_MEMORY;
1510         }
1511
1512         ACPI_COMPANION_SET(&spi->dev, adev);
1513         spi->irq = -1;
1514
1515         INIT_LIST_HEAD(&resource_list);
1516         ret = acpi_dev_get_resources(adev, &resource_list,
1517                                      acpi_spi_add_resource, spi);
1518         acpi_dev_free_resource_list(&resource_list);
1519
1520         if (ret < 0 || !spi->max_speed_hz) {
1521                 spi_dev_put(spi);
1522                 return AE_OK;
1523         }
1524
1525         adev->power.flags.ignore_parent = true;
1526         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1527         if (spi_add_device(spi)) {
1528                 adev->power.flags.ignore_parent = false;
1529                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1530                         dev_name(&adev->dev));
1531                 spi_dev_put(spi);
1532         }
1533
1534         return AE_OK;
1535 }
1536
1537 static void acpi_register_spi_devices(struct spi_master *master)
1538 {
1539         acpi_status status;
1540         acpi_handle handle;
1541
1542         handle = ACPI_HANDLE(master->dev.parent);
1543         if (!handle)
1544                 return;
1545
1546         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1547                                      acpi_spi_add_device, NULL,
1548                                      master, NULL);
1549         if (ACPI_FAILURE(status))
1550                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1551 }
1552 #else
1553 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1554 #endif /* CONFIG_ACPI */
1555
1556 static void spi_master_release(struct device *dev)
1557 {
1558         struct spi_master *master;
1559
1560         master = container_of(dev, struct spi_master, dev);
1561         kfree(master);
1562 }
1563
1564 static struct class spi_master_class = {
1565         .name           = "spi_master",
1566         .owner          = THIS_MODULE,
1567         .dev_release    = spi_master_release,
1568         .dev_groups     = spi_master_groups,
1569 };
1570
1571
1572 /**
1573  * spi_alloc_master - allocate SPI master controller
1574  * @dev: the controller, possibly using the platform_bus
1575  * @size: how much zeroed driver-private data to allocate; the pointer to this
1576  *      memory is in the driver_data field of the returned device,
1577  *      accessible with spi_master_get_devdata().
1578  * Context: can sleep
1579  *
1580  * This call is used only by SPI master controller drivers, which are the
1581  * only ones directly touching chip registers.  It's how they allocate
1582  * an spi_master structure, prior to calling spi_register_master().
1583  *
1584  * This must be called from context that can sleep.  It returns the SPI
1585  * master structure on success, else NULL.
1586  *
1587  * The caller is responsible for assigning the bus number and initializing
1588  * the master's methods before calling spi_register_master(); and (after errors
1589  * adding the device) calling spi_master_put() and kfree() to prevent a memory
1590  * leak.
1591  */
1592 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1593 {
1594         struct spi_master       *master;
1595
1596         if (!dev)
1597                 return NULL;
1598
1599         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1600         if (!master)
1601                 return NULL;
1602
1603         device_initialize(&master->dev);
1604         master->bus_num = -1;
1605         master->num_chipselect = 1;
1606         master->dev.class = &spi_master_class;
1607         master->dev.parent = get_device(dev);
1608         spi_master_set_devdata(master, &master[1]);
1609
1610         return master;
1611 }
1612 EXPORT_SYMBOL_GPL(spi_alloc_master);
1613
1614 #ifdef CONFIG_OF
1615 static int of_spi_register_master(struct spi_master *master)
1616 {
1617         int nb, i, *cs;
1618         struct device_node *np = master->dev.of_node;
1619
1620         if (!np)
1621                 return 0;
1622
1623         nb = of_gpio_named_count(np, "cs-gpios");
1624         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1625
1626         /* Return error only for an incorrectly formed cs-gpios property */
1627         if (nb == 0 || nb == -ENOENT)
1628                 return 0;
1629         else if (nb < 0)
1630                 return nb;
1631
1632         cs = devm_kzalloc(&master->dev,
1633                           sizeof(int) * master->num_chipselect,
1634                           GFP_KERNEL);
1635         master->cs_gpios = cs;
1636
1637         if (!master->cs_gpios)
1638                 return -ENOMEM;
1639
1640         for (i = 0; i < master->num_chipselect; i++)
1641                 cs[i] = -ENOENT;
1642
1643         for (i = 0; i < nb; i++)
1644                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1645
1646         return 0;
1647 }
1648 #else
1649 static int of_spi_register_master(struct spi_master *master)
1650 {
1651         return 0;
1652 }
1653 #endif
1654
1655 /**
1656  * spi_register_master - register SPI master controller
1657  * @master: initialized master, originally from spi_alloc_master()
1658  * Context: can sleep
1659  *
1660  * SPI master controllers connect to their drivers using some non-SPI bus,
1661  * such as the platform bus.  The final stage of probe() in that code
1662  * includes calling spi_register_master() to hook up to this SPI bus glue.
1663  *
1664  * SPI controllers use board specific (often SOC specific) bus numbers,
1665  * and board-specific addressing for SPI devices combines those numbers
1666  * with chip select numbers.  Since SPI does not directly support dynamic
1667  * device identification, boards need configuration tables telling which
1668  * chip is at which address.
1669  *
1670  * This must be called from context that can sleep.  It returns zero on
1671  * success, else a negative error code (dropping the master's refcount).
1672  * After a successful return, the caller is responsible for calling
1673  * spi_unregister_master().
1674  */
1675 int spi_register_master(struct spi_master *master)
1676 {
1677         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1678         struct device           *dev = master->dev.parent;
1679         struct boardinfo        *bi;
1680         int                     status = -ENODEV;
1681         int                     dynamic = 0;
1682
1683         if (!dev)
1684                 return -ENODEV;
1685
1686         status = of_spi_register_master(master);
1687         if (status)
1688                 return status;
1689
1690         /* even if it's just one always-selected device, there must
1691          * be at least one chipselect
1692          */
1693         if (master->num_chipselect == 0)
1694                 return -EINVAL;
1695
1696         if ((master->bus_num < 0) && master->dev.of_node)
1697                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1698
1699         /* convention:  dynamically assigned bus IDs count down from the max */
1700         if (master->bus_num < 0) {
1701                 /* FIXME switch to an IDR based scheme, something like
1702                  * I2C now uses, so we can't run out of "dynamic" IDs
1703                  */
1704                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1705                 dynamic = 1;
1706         }
1707
1708         INIT_LIST_HEAD(&master->queue);
1709         spin_lock_init(&master->queue_lock);
1710         spin_lock_init(&master->bus_lock_spinlock);
1711         mutex_init(&master->bus_lock_mutex);
1712         master->bus_lock_flag = 0;
1713         init_completion(&master->xfer_completion);
1714         if (!master->max_dma_len)
1715                 master->max_dma_len = INT_MAX;
1716
1717         /* register the device, then userspace will see it.
1718          * registration fails if the bus ID is in use.
1719          */
1720         dev_set_name(&master->dev, "spi%u", master->bus_num);
1721         status = device_add(&master->dev);
1722         if (status < 0)
1723                 goto done;
1724         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1725                         dynamic ? " (dynamic)" : "");
1726
1727         /* If we're using a queued driver, start the queue */
1728         if (master->transfer)
1729                 dev_info(dev, "master is unqueued, this is deprecated\n");
1730         else {
1731                 status = spi_master_initialize_queue(master);
1732                 if (status) {
1733                         device_del(&master->dev);
1734                         goto done;
1735                 }
1736         }
1737         /* add statistics */
1738         spin_lock_init(&master->statistics.lock);
1739
1740         mutex_lock(&board_lock);
1741         list_add_tail(&master->list, &spi_master_list);
1742         list_for_each_entry(bi, &board_list, list)
1743                 spi_match_master_to_boardinfo(master, &bi->board_info);
1744         mutex_unlock(&board_lock);
1745
1746         /* Register devices from the device tree and ACPI */
1747         of_register_spi_devices(master);
1748         acpi_register_spi_devices(master);
1749 done:
1750         return status;
1751 }
1752 EXPORT_SYMBOL_GPL(spi_register_master);
1753
1754 static void devm_spi_unregister(struct device *dev, void *res)
1755 {
1756         spi_unregister_master(*(struct spi_master **)res);
1757 }
1758
1759 /**
1760  * dev_spi_register_master - register managed SPI master controller
1761  * @dev:    device managing SPI master
1762  * @master: initialized master, originally from spi_alloc_master()
1763  * Context: can sleep
1764  *
1765  * Register a SPI device as with spi_register_master() which will
1766  * automatically be unregister
1767  */
1768 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1769 {
1770         struct spi_master **ptr;
1771         int ret;
1772
1773         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1774         if (!ptr)
1775                 return -ENOMEM;
1776
1777         ret = spi_register_master(master);
1778         if (!ret) {
1779                 *ptr = master;
1780                 devres_add(dev, ptr);
1781         } else {
1782                 devres_free(ptr);
1783         }
1784
1785         return ret;
1786 }
1787 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1788
1789 static int __unregister(struct device *dev, void *null)
1790 {
1791         spi_unregister_device(to_spi_device(dev));
1792         return 0;
1793 }
1794
1795 /**
1796  * spi_unregister_master - unregister SPI master controller
1797  * @master: the master being unregistered
1798  * Context: can sleep
1799  *
1800  * This call is used only by SPI master controller drivers, which are the
1801  * only ones directly touching chip registers.
1802  *
1803  * This must be called from context that can sleep.
1804  */
1805 void spi_unregister_master(struct spi_master *master)
1806 {
1807         int dummy;
1808
1809         if (master->queued) {
1810                 if (spi_destroy_queue(master))
1811                         dev_err(&master->dev, "queue remove failed\n");
1812         }
1813
1814         mutex_lock(&board_lock);
1815         list_del(&master->list);
1816         mutex_unlock(&board_lock);
1817
1818         dummy = device_for_each_child(&master->dev, NULL, __unregister);
1819         device_unregister(&master->dev);
1820 }
1821 EXPORT_SYMBOL_GPL(spi_unregister_master);
1822
1823 int spi_master_suspend(struct spi_master *master)
1824 {
1825         int ret;
1826
1827         /* Basically no-ops for non-queued masters */
1828         if (!master->queued)
1829                 return 0;
1830
1831         ret = spi_stop_queue(master);
1832         if (ret)
1833                 dev_err(&master->dev, "queue stop failed\n");
1834
1835         return ret;
1836 }
1837 EXPORT_SYMBOL_GPL(spi_master_suspend);
1838
1839 int spi_master_resume(struct spi_master *master)
1840 {
1841         int ret;
1842
1843         if (!master->queued)
1844                 return 0;
1845
1846         ret = spi_start_queue(master);
1847         if (ret)
1848                 dev_err(&master->dev, "queue restart failed\n");
1849
1850         return ret;
1851 }
1852 EXPORT_SYMBOL_GPL(spi_master_resume);
1853
1854 static int __spi_master_match(struct device *dev, const void *data)
1855 {
1856         struct spi_master *m;
1857         const u16 *bus_num = data;
1858
1859         m = container_of(dev, struct spi_master, dev);
1860         return m->bus_num == *bus_num;
1861 }
1862
1863 /**
1864  * spi_busnum_to_master - look up master associated with bus_num
1865  * @bus_num: the master's bus number
1866  * Context: can sleep
1867  *
1868  * This call may be used with devices that are registered after
1869  * arch init time.  It returns a refcounted pointer to the relevant
1870  * spi_master (which the caller must release), or NULL if there is
1871  * no such master registered.
1872  */
1873 struct spi_master *spi_busnum_to_master(u16 bus_num)
1874 {
1875         struct device           *dev;
1876         struct spi_master       *master = NULL;
1877
1878         dev = class_find_device(&spi_master_class, NULL, &bus_num,
1879                                 __spi_master_match);
1880         if (dev)
1881                 master = container_of(dev, struct spi_master, dev);
1882         /* reference got in class_find_device */
1883         return master;
1884 }
1885 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1886
1887
1888 /*-------------------------------------------------------------------------*/
1889
1890 /* Core methods for SPI master protocol drivers.  Some of the
1891  * other core methods are currently defined as inline functions.
1892  */
1893
1894 /**
1895  * spi_setup - setup SPI mode and clock rate
1896  * @spi: the device whose settings are being modified
1897  * Context: can sleep, and no requests are queued to the device
1898  *
1899  * SPI protocol drivers may need to update the transfer mode if the
1900  * device doesn't work with its default.  They may likewise need
1901  * to update clock rates or word sizes from initial values.  This function
1902  * changes those settings, and must be called from a context that can sleep.
1903  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1904  * effect the next time the device is selected and data is transferred to
1905  * or from it.  When this function returns, the spi device is deselected.
1906  *
1907  * Note that this call will fail if the protocol driver specifies an option
1908  * that the underlying controller or its driver does not support.  For
1909  * example, not all hardware supports wire transfers using nine bit words,
1910  * LSB-first wire encoding, or active-high chipselects.
1911  */
1912 int spi_setup(struct spi_device *spi)
1913 {
1914         unsigned        bad_bits, ugly_bits;
1915         int             status = 0;
1916
1917         /* check mode to prevent that DUAL and QUAD set at the same time
1918          */
1919         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1920                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1921                 dev_err(&spi->dev,
1922                 "setup: can not select dual and quad at the same time\n");
1923                 return -EINVAL;
1924         }
1925         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1926          */
1927         if ((spi->mode & SPI_3WIRE) && (spi->mode &
1928                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1929                 return -EINVAL;
1930         /* help drivers fail *cleanly* when they need options
1931          * that aren't supported with their current master
1932          */
1933         bad_bits = spi->mode & ~spi->master->mode_bits;
1934         ugly_bits = bad_bits &
1935                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1936         if (ugly_bits) {
1937                 dev_warn(&spi->dev,
1938                          "setup: ignoring unsupported mode bits %x\n",
1939                          ugly_bits);
1940                 spi->mode &= ~ugly_bits;
1941                 bad_bits &= ~ugly_bits;
1942         }
1943         if (bad_bits) {
1944                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1945                         bad_bits);
1946                 return -EINVAL;
1947         }
1948
1949         if (!spi->bits_per_word)
1950                 spi->bits_per_word = 8;
1951
1952         if (!spi->max_speed_hz)
1953                 spi->max_speed_hz = spi->master->max_speed_hz;
1954
1955         spi_set_cs(spi, false);
1956
1957         if (spi->master->setup)
1958                 status = spi->master->setup(spi);
1959
1960         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1961                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1962                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1963                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1964                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
1965                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
1966                         spi->bits_per_word, spi->max_speed_hz,
1967                         status);
1968
1969         return status;
1970 }
1971 EXPORT_SYMBOL_GPL(spi_setup);
1972
1973 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1974 {
1975         struct spi_master *master = spi->master;
1976         struct spi_transfer *xfer;
1977         int w_size;
1978
1979         if (list_empty(&message->transfers))
1980                 return -EINVAL;
1981
1982         /* Half-duplex links include original MicroWire, and ones with
1983          * only one data pin like SPI_3WIRE (switches direction) or where
1984          * either MOSI or MISO is missing.  They can also be caused by
1985          * software limitations.
1986          */
1987         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1988                         || (spi->mode & SPI_3WIRE)) {
1989                 unsigned flags = master->flags;
1990
1991                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1992                         if (xfer->rx_buf && xfer->tx_buf)
1993                                 return -EINVAL;
1994                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1995                                 return -EINVAL;
1996                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1997                                 return -EINVAL;
1998                 }
1999         }
2000
2001         /**
2002          * Set transfer bits_per_word and max speed as spi device default if
2003          * it is not set for this transfer.
2004          * Set transfer tx_nbits and rx_nbits as single transfer default
2005          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2006          */
2007         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2008                 message->frame_length += xfer->len;
2009                 if (!xfer->bits_per_word)
2010                         xfer->bits_per_word = spi->bits_per_word;
2011
2012                 if (!xfer->speed_hz)
2013                         xfer->speed_hz = spi->max_speed_hz;
2014
2015                 if (master->max_speed_hz &&
2016                     xfer->speed_hz > master->max_speed_hz)
2017                         xfer->speed_hz = master->max_speed_hz;
2018
2019                 if (master->bits_per_word_mask) {
2020                         /* Only 32 bits fit in the mask */
2021                         if (xfer->bits_per_word > 32)
2022                                 return -EINVAL;
2023                         if (!(master->bits_per_word_mask &
2024                                         BIT(xfer->bits_per_word - 1)))
2025                                 return -EINVAL;
2026                 }
2027
2028                 /*
2029                  * SPI transfer length should be multiple of SPI word size
2030                  * where SPI word size should be power-of-two multiple
2031                  */
2032                 if (xfer->bits_per_word <= 8)
2033                         w_size = 1;
2034                 else if (xfer->bits_per_word <= 16)
2035                         w_size = 2;
2036                 else
2037                         w_size = 4;
2038
2039                 /* No partial transfers accepted */
2040                 if (xfer->len % w_size)
2041                         return -EINVAL;
2042
2043                 if (xfer->speed_hz && master->min_speed_hz &&
2044                     xfer->speed_hz < master->min_speed_hz)
2045                         return -EINVAL;
2046
2047                 if (xfer->tx_buf && !xfer->tx_nbits)
2048                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2049                 if (xfer->rx_buf && !xfer->rx_nbits)
2050                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2051                 /* check transfer tx/rx_nbits:
2052                  * 1. check the value matches one of single, dual and quad
2053                  * 2. check tx/rx_nbits match the mode in spi_device
2054                  */
2055                 if (xfer->tx_buf) {
2056                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2057                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2058                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2059                                 return -EINVAL;
2060                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2061                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2062                                 return -EINVAL;
2063                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2064                                 !(spi->mode & SPI_TX_QUAD))
2065                                 return -EINVAL;
2066                 }
2067                 /* check transfer rx_nbits */
2068                 if (xfer->rx_buf) {
2069                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2070                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2071                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2072                                 return -EINVAL;
2073                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2074                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2075                                 return -EINVAL;
2076                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2077                                 !(spi->mode & SPI_RX_QUAD))
2078                                 return -EINVAL;
2079                 }
2080         }
2081
2082         message->status = -EINPROGRESS;
2083
2084         return 0;
2085 }
2086
2087 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2088 {
2089         struct spi_master *master = spi->master;
2090
2091         message->spi = spi;
2092
2093         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2094         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2095
2096         trace_spi_message_submit(message);
2097
2098         return master->transfer(spi, message);
2099 }
2100
2101 /**
2102  * spi_async - asynchronous SPI transfer
2103  * @spi: device with which data will be exchanged
2104  * @message: describes the data transfers, including completion callback
2105  * Context: any (irqs may be blocked, etc)
2106  *
2107  * This call may be used in_irq and other contexts which can't sleep,
2108  * as well as from task contexts which can sleep.
2109  *
2110  * The completion callback is invoked in a context which can't sleep.
2111  * Before that invocation, the value of message->status is undefined.
2112  * When the callback is issued, message->status holds either zero (to
2113  * indicate complete success) or a negative error code.  After that
2114  * callback returns, the driver which issued the transfer request may
2115  * deallocate the associated memory; it's no longer in use by any SPI
2116  * core or controller driver code.
2117  *
2118  * Note that although all messages to a spi_device are handled in
2119  * FIFO order, messages may go to different devices in other orders.
2120  * Some device might be higher priority, or have various "hard" access
2121  * time requirements, for example.
2122  *
2123  * On detection of any fault during the transfer, processing of
2124  * the entire message is aborted, and the device is deselected.
2125  * Until returning from the associated message completion callback,
2126  * no other spi_message queued to that device will be processed.
2127  * (This rule applies equally to all the synchronous transfer calls,
2128  * which are wrappers around this core asynchronous primitive.)
2129  */
2130 int spi_async(struct spi_device *spi, struct spi_message *message)
2131 {
2132         struct spi_master *master = spi->master;
2133         int ret;
2134         unsigned long flags;
2135
2136         ret = __spi_validate(spi, message);
2137         if (ret != 0)
2138                 return ret;
2139
2140         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2141
2142         if (master->bus_lock_flag)
2143                 ret = -EBUSY;
2144         else
2145                 ret = __spi_async(spi, message);
2146
2147         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2148
2149         return ret;
2150 }
2151 EXPORT_SYMBOL_GPL(spi_async);
2152
2153 /**
2154  * spi_async_locked - version of spi_async with exclusive bus usage
2155  * @spi: device with which data will be exchanged
2156  * @message: describes the data transfers, including completion callback
2157  * Context: any (irqs may be blocked, etc)
2158  *
2159  * This call may be used in_irq and other contexts which can't sleep,
2160  * as well as from task contexts which can sleep.
2161  *
2162  * The completion callback is invoked in a context which can't sleep.
2163  * Before that invocation, the value of message->status is undefined.
2164  * When the callback is issued, message->status holds either zero (to
2165  * indicate complete success) or a negative error code.  After that
2166  * callback returns, the driver which issued the transfer request may
2167  * deallocate the associated memory; it's no longer in use by any SPI
2168  * core or controller driver code.
2169  *
2170  * Note that although all messages to a spi_device are handled in
2171  * FIFO order, messages may go to different devices in other orders.
2172  * Some device might be higher priority, or have various "hard" access
2173  * time requirements, for example.
2174  *
2175  * On detection of any fault during the transfer, processing of
2176  * the entire message is aborted, and the device is deselected.
2177  * Until returning from the associated message completion callback,
2178  * no other spi_message queued to that device will be processed.
2179  * (This rule applies equally to all the synchronous transfer calls,
2180  * which are wrappers around this core asynchronous primitive.)
2181  */
2182 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2183 {
2184         struct spi_master *master = spi->master;
2185         int ret;
2186         unsigned long flags;
2187
2188         ret = __spi_validate(spi, message);
2189         if (ret != 0)
2190                 return ret;
2191
2192         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2193
2194         ret = __spi_async(spi, message);
2195
2196         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2197
2198         return ret;
2199
2200 }
2201 EXPORT_SYMBOL_GPL(spi_async_locked);
2202
2203
2204 /*-------------------------------------------------------------------------*/
2205
2206 /* Utility methods for SPI master protocol drivers, layered on
2207  * top of the core.  Some other utility methods are defined as
2208  * inline functions.
2209  */
2210
2211 static void spi_complete(void *arg)
2212 {
2213         complete(arg);
2214 }
2215
2216 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2217                       int bus_locked)
2218 {
2219         DECLARE_COMPLETION_ONSTACK(done);
2220         int status;
2221         struct spi_master *master = spi->master;
2222         unsigned long flags;
2223
2224         status = __spi_validate(spi, message);
2225         if (status != 0)
2226                 return status;
2227
2228         message->complete = spi_complete;
2229         message->context = &done;
2230         message->spi = spi;
2231
2232         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2233         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2234
2235         if (!bus_locked)
2236                 mutex_lock(&master->bus_lock_mutex);
2237
2238         /* If we're not using the legacy transfer method then we will
2239          * try to transfer in the calling context so special case.
2240          * This code would be less tricky if we could remove the
2241          * support for driver implemented message queues.
2242          */
2243         if (master->transfer == spi_queued_transfer) {
2244                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2245
2246                 trace_spi_message_submit(message);
2247
2248                 status = __spi_queued_transfer(spi, message, false);
2249
2250                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2251         } else {
2252                 status = spi_async_locked(spi, message);
2253         }
2254
2255         if (!bus_locked)
2256                 mutex_unlock(&master->bus_lock_mutex);
2257
2258         if (status == 0) {
2259                 /* Push out the messages in the calling context if we
2260                  * can.
2261                  */
2262                 if (master->transfer == spi_queued_transfer) {
2263                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2264                                                        spi_sync_immediate);
2265                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2266                                                        spi_sync_immediate);
2267                         __spi_pump_messages(master, false);
2268                 }
2269
2270                 wait_for_completion(&done);
2271                 status = message->status;
2272         }
2273         message->context = NULL;
2274         return status;
2275 }
2276
2277 /**
2278  * spi_sync - blocking/synchronous SPI data transfers
2279  * @spi: device with which data will be exchanged
2280  * @message: describes the data transfers
2281  * Context: can sleep
2282  *
2283  * This call may only be used from a context that may sleep.  The sleep
2284  * is non-interruptible, and has no timeout.  Low-overhead controller
2285  * drivers may DMA directly into and out of the message buffers.
2286  *
2287  * Note that the SPI device's chip select is active during the message,
2288  * and then is normally disabled between messages.  Drivers for some
2289  * frequently-used devices may want to minimize costs of selecting a chip,
2290  * by leaving it selected in anticipation that the next message will go
2291  * to the same chip.  (That may increase power usage.)
2292  *
2293  * Also, the caller is guaranteeing that the memory associated with the
2294  * message will not be freed before this call returns.
2295  *
2296  * It returns zero on success, else a negative error code.
2297  */
2298 int spi_sync(struct spi_device *spi, struct spi_message *message)
2299 {
2300         return __spi_sync(spi, message, 0);
2301 }
2302 EXPORT_SYMBOL_GPL(spi_sync);
2303
2304 /**
2305  * spi_sync_locked - version of spi_sync with exclusive bus usage
2306  * @spi: device with which data will be exchanged
2307  * @message: describes the data transfers
2308  * Context: can sleep
2309  *
2310  * This call may only be used from a context that may sleep.  The sleep
2311  * is non-interruptible, and has no timeout.  Low-overhead controller
2312  * drivers may DMA directly into and out of the message buffers.
2313  *
2314  * This call should be used by drivers that require exclusive access to the
2315  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2316  * be released by a spi_bus_unlock call when the exclusive access is over.
2317  *
2318  * It returns zero on success, else a negative error code.
2319  */
2320 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2321 {
2322         return __spi_sync(spi, message, 1);
2323 }
2324 EXPORT_SYMBOL_GPL(spi_sync_locked);
2325
2326 /**
2327  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2328  * @master: SPI bus master that should be locked for exclusive bus access
2329  * Context: can sleep
2330  *
2331  * This call may only be used from a context that may sleep.  The sleep
2332  * is non-interruptible, and has no timeout.
2333  *
2334  * This call should be used by drivers that require exclusive access to the
2335  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2336  * exclusive access is over. Data transfer must be done by spi_sync_locked
2337  * and spi_async_locked calls when the SPI bus lock is held.
2338  *
2339  * It returns zero on success, else a negative error code.
2340  */
2341 int spi_bus_lock(struct spi_master *master)
2342 {
2343         unsigned long flags;
2344
2345         mutex_lock(&master->bus_lock_mutex);
2346
2347         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2348         master->bus_lock_flag = 1;
2349         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2350
2351         /* mutex remains locked until spi_bus_unlock is called */
2352
2353         return 0;
2354 }
2355 EXPORT_SYMBOL_GPL(spi_bus_lock);
2356
2357 /**
2358  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2359  * @master: SPI bus master that was locked for exclusive bus access
2360  * Context: can sleep
2361  *
2362  * This call may only be used from a context that may sleep.  The sleep
2363  * is non-interruptible, and has no timeout.
2364  *
2365  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2366  * call.
2367  *
2368  * It returns zero on success, else a negative error code.
2369  */
2370 int spi_bus_unlock(struct spi_master *master)
2371 {
2372         master->bus_lock_flag = 0;
2373
2374         mutex_unlock(&master->bus_lock_mutex);
2375
2376         return 0;
2377 }
2378 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2379
2380 /* portable code must never pass more than 32 bytes */
2381 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2382
2383 static u8       *buf;
2384
2385 /**
2386  * spi_write_then_read - SPI synchronous write followed by read
2387  * @spi: device with which data will be exchanged
2388  * @txbuf: data to be written (need not be dma-safe)
2389  * @n_tx: size of txbuf, in bytes
2390  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2391  * @n_rx: size of rxbuf, in bytes
2392  * Context: can sleep
2393  *
2394  * This performs a half duplex MicroWire style transaction with the
2395  * device, sending txbuf and then reading rxbuf.  The return value
2396  * is zero for success, else a negative errno status code.
2397  * This call may only be used from a context that may sleep.
2398  *
2399  * Parameters to this routine are always copied using a small buffer;
2400  * portable code should never use this for more than 32 bytes.
2401  * Performance-sensitive or bulk transfer code should instead use
2402  * spi_{async,sync}() calls with dma-safe buffers.
2403  */
2404 int spi_write_then_read(struct spi_device *spi,
2405                 const void *txbuf, unsigned n_tx,
2406                 void *rxbuf, unsigned n_rx)
2407 {
2408         static DEFINE_MUTEX(lock);
2409
2410         int                     status;
2411         struct spi_message      message;
2412         struct spi_transfer     x[2];
2413         u8                      *local_buf;
2414
2415         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2416          * copying here, (as a pure convenience thing), but we can
2417          * keep heap costs out of the hot path unless someone else is
2418          * using the pre-allocated buffer or the transfer is too large.
2419          */
2420         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2421                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2422                                     GFP_KERNEL | GFP_DMA);
2423                 if (!local_buf)
2424                         return -ENOMEM;
2425         } else {
2426                 local_buf = buf;
2427         }
2428
2429         spi_message_init(&message);
2430         memset(x, 0, sizeof(x));
2431         if (n_tx) {
2432                 x[0].len = n_tx;
2433                 spi_message_add_tail(&x[0], &message);
2434         }
2435         if (n_rx) {
2436                 x[1].len = n_rx;
2437                 spi_message_add_tail(&x[1], &message);
2438         }
2439
2440         memcpy(local_buf, txbuf, n_tx);
2441         x[0].tx_buf = local_buf;
2442         x[1].rx_buf = local_buf + n_tx;
2443
2444         /* do the i/o */
2445         status = spi_sync(spi, &message);
2446         if (status == 0)
2447                 memcpy(rxbuf, x[1].rx_buf, n_rx);
2448
2449         if (x[0].tx_buf == buf)
2450                 mutex_unlock(&lock);
2451         else
2452                 kfree(local_buf);
2453
2454         return status;
2455 }
2456 EXPORT_SYMBOL_GPL(spi_write_then_read);
2457
2458 /*-------------------------------------------------------------------------*/
2459
2460 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2461 static int __spi_of_device_match(struct device *dev, void *data)
2462 {
2463         return dev->of_node == data;
2464 }
2465
2466 /* must call put_device() when done with returned spi_device device */
2467 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2468 {
2469         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2470                                                 __spi_of_device_match);
2471         return dev ? to_spi_device(dev) : NULL;
2472 }
2473
2474 static int __spi_of_master_match(struct device *dev, const void *data)
2475 {
2476         return dev->of_node == data;
2477 }
2478
2479 /* the spi masters are not using spi_bus, so we find it with another way */
2480 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2481 {
2482         struct device *dev;
2483
2484         dev = class_find_device(&spi_master_class, NULL, node,
2485                                 __spi_of_master_match);
2486         if (!dev)
2487                 return NULL;
2488
2489         /* reference got in class_find_device */
2490         return container_of(dev, struct spi_master, dev);
2491 }
2492
2493 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2494                          void *arg)
2495 {
2496         struct of_reconfig_data *rd = arg;
2497         struct spi_master *master;
2498         struct spi_device *spi;
2499
2500         switch (of_reconfig_get_state_change(action, arg)) {
2501         case OF_RECONFIG_CHANGE_ADD:
2502                 master = of_find_spi_master_by_node(rd->dn->parent);
2503                 if (master == NULL)
2504                         return NOTIFY_OK;       /* not for us */
2505
2506                 spi = of_register_spi_device(master, rd->dn);
2507                 put_device(&master->dev);
2508
2509                 if (IS_ERR(spi)) {
2510                         pr_err("%s: failed to create for '%s'\n",
2511                                         __func__, rd->dn->full_name);
2512                         return notifier_from_errno(PTR_ERR(spi));
2513                 }
2514                 break;
2515
2516         case OF_RECONFIG_CHANGE_REMOVE:
2517                 /* find our device by node */
2518                 spi = of_find_spi_device_by_node(rd->dn);
2519                 if (spi == NULL)
2520                         return NOTIFY_OK;       /* no? not meant for us */
2521
2522                 /* unregister takes one ref away */
2523                 spi_unregister_device(spi);
2524
2525                 /* and put the reference of the find */
2526                 put_device(&spi->dev);
2527                 break;
2528         }
2529
2530         return NOTIFY_OK;
2531 }
2532
2533 static struct notifier_block spi_of_notifier = {
2534         .notifier_call = of_spi_notify,
2535 };
2536 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2537 extern struct notifier_block spi_of_notifier;
2538 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2539
2540 static int __init spi_init(void)
2541 {
2542         int     status;
2543
2544         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2545         if (!buf) {
2546                 status = -ENOMEM;
2547                 goto err0;
2548         }
2549
2550         status = bus_register(&spi_bus_type);
2551         if (status < 0)
2552                 goto err1;
2553
2554         status = class_register(&spi_master_class);
2555         if (status < 0)
2556                 goto err2;
2557
2558         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2559                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2560
2561         return 0;
2562
2563 err2:
2564         bus_unregister(&spi_bus_type);
2565 err1:
2566         kfree(buf);
2567         buf = NULL;
2568 err0:
2569         return status;
2570 }
2571
2572 /* board_info is normally registered in arch_initcall(),
2573  * but even essential drivers wait till later
2574  *
2575  * REVISIT only boardinfo really needs static linking. the rest (device and
2576  * driver registration) _could_ be dynamically linked (modular) ... costs
2577  * include needing to have boardinfo data structures be much more public.
2578  */
2579 postcore_initcall(spi_init);
2580