4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/kernel.h>
23 #include <linux/kmod.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/cache.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/mutex.h>
30 #include <linux/of_device.h>
31 #include <linux/of_irq.h>
32 #include <linux/clk/clk-conf.h>
33 #include <linux/slab.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/spi/spi.h>
36 #include <linux/of_gpio.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/export.h>
39 #include <linux/sched/rt.h>
40 #include <linux/delay.h>
41 #include <linux/kthread.h>
42 #include <linux/ioport.h>
43 #include <linux/acpi.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/spi.h>
48 static void spidev_release(struct device *dev)
50 struct spi_device *spi = to_spi_device(dev);
52 /* spi masters may cleanup for released devices */
53 if (spi->master->cleanup)
54 spi->master->cleanup(spi);
56 spi_master_put(spi->master);
61 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
63 const struct spi_device *spi = to_spi_device(dev);
66 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
70 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
72 static DEVICE_ATTR_RO(modalias);
74 static struct attribute *spi_dev_attrs[] = {
75 &dev_attr_modalias.attr,
78 ATTRIBUTE_GROUPS(spi_dev);
80 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
81 * and the sysfs version makes coldplug work too.
84 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
85 const struct spi_device *sdev)
88 if (!strcmp(sdev->modalias, id->name))
95 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
97 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
99 return spi_match_id(sdrv->id_table, sdev);
101 EXPORT_SYMBOL_GPL(spi_get_device_id);
103 static int spi_match_device(struct device *dev, struct device_driver *drv)
105 const struct spi_device *spi = to_spi_device(dev);
106 const struct spi_driver *sdrv = to_spi_driver(drv);
108 /* Attempt an OF style match */
109 if (of_driver_match_device(dev, drv))
113 if (acpi_driver_match_device(dev, drv))
117 return !!spi_match_id(sdrv->id_table, spi);
119 return strcmp(spi->modalias, drv->name) == 0;
122 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
124 const struct spi_device *spi = to_spi_device(dev);
127 rc = acpi_device_uevent_modalias(dev, env);
131 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
135 #ifdef CONFIG_PM_SLEEP
136 static int spi_legacy_suspend(struct device *dev, pm_message_t message)
139 struct spi_driver *drv = to_spi_driver(dev->driver);
141 /* suspend will stop irqs and dma; no more i/o */
144 value = drv->suspend(to_spi_device(dev), message);
146 dev_dbg(dev, "... can't suspend\n");
151 static int spi_legacy_resume(struct device *dev)
154 struct spi_driver *drv = to_spi_driver(dev->driver);
156 /* resume may restart the i/o queue */
159 value = drv->resume(to_spi_device(dev));
161 dev_dbg(dev, "... can't resume\n");
166 static int spi_pm_suspend(struct device *dev)
168 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
171 return pm_generic_suspend(dev);
173 return spi_legacy_suspend(dev, PMSG_SUSPEND);
176 static int spi_pm_resume(struct device *dev)
178 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
181 return pm_generic_resume(dev);
183 return spi_legacy_resume(dev);
186 static int spi_pm_freeze(struct device *dev)
188 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
191 return pm_generic_freeze(dev);
193 return spi_legacy_suspend(dev, PMSG_FREEZE);
196 static int spi_pm_thaw(struct device *dev)
198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
201 return pm_generic_thaw(dev);
203 return spi_legacy_resume(dev);
206 static int spi_pm_poweroff(struct device *dev)
208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
211 return pm_generic_poweroff(dev);
213 return spi_legacy_suspend(dev, PMSG_HIBERNATE);
216 static int spi_pm_restore(struct device *dev)
218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221 return pm_generic_restore(dev);
223 return spi_legacy_resume(dev);
226 #define spi_pm_suspend NULL
227 #define spi_pm_resume NULL
228 #define spi_pm_freeze NULL
229 #define spi_pm_thaw NULL
230 #define spi_pm_poweroff NULL
231 #define spi_pm_restore NULL
234 static const struct dev_pm_ops spi_pm = {
235 .suspend = spi_pm_suspend,
236 .resume = spi_pm_resume,
237 .freeze = spi_pm_freeze,
239 .poweroff = spi_pm_poweroff,
240 .restore = spi_pm_restore,
242 pm_generic_runtime_suspend,
243 pm_generic_runtime_resume,
248 struct bus_type spi_bus_type = {
250 .dev_groups = spi_dev_groups,
251 .match = spi_match_device,
252 .uevent = spi_uevent,
255 EXPORT_SYMBOL_GPL(spi_bus_type);
258 static int spi_drv_probe(struct device *dev)
260 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
263 ret = of_clk_set_defaults(dev->of_node, false);
267 acpi_dev_pm_attach(dev, true);
268 ret = sdrv->probe(to_spi_device(dev));
270 acpi_dev_pm_detach(dev, true);
275 static int spi_drv_remove(struct device *dev)
277 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
280 ret = sdrv->remove(to_spi_device(dev));
281 acpi_dev_pm_detach(dev, true);
286 static void spi_drv_shutdown(struct device *dev)
288 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
290 sdrv->shutdown(to_spi_device(dev));
294 * spi_register_driver - register a SPI driver
295 * @sdrv: the driver to register
298 int spi_register_driver(struct spi_driver *sdrv)
300 sdrv->driver.bus = &spi_bus_type;
302 sdrv->driver.probe = spi_drv_probe;
304 sdrv->driver.remove = spi_drv_remove;
306 sdrv->driver.shutdown = spi_drv_shutdown;
307 return driver_register(&sdrv->driver);
309 EXPORT_SYMBOL_GPL(spi_register_driver);
311 /*-------------------------------------------------------------------------*/
313 /* SPI devices should normally not be created by SPI device drivers; that
314 * would make them board-specific. Similarly with SPI master drivers.
315 * Device registration normally goes into like arch/.../mach.../board-YYY.c
316 * with other readonly (flashable) information about mainboard devices.
320 struct list_head list;
321 struct spi_board_info board_info;
324 static LIST_HEAD(board_list);
325 static LIST_HEAD(spi_master_list);
328 * Used to protect add/del opertion for board_info list and
329 * spi_master list, and their matching process
331 static DEFINE_MUTEX(board_lock);
334 * spi_alloc_device - Allocate a new SPI device
335 * @master: Controller to which device is connected
338 * Allows a driver to allocate and initialize a spi_device without
339 * registering it immediately. This allows a driver to directly
340 * fill the spi_device with device parameters before calling
341 * spi_add_device() on it.
343 * Caller is responsible to call spi_add_device() on the returned
344 * spi_device structure to add it to the SPI master. If the caller
345 * needs to discard the spi_device without adding it, then it should
346 * call spi_dev_put() on it.
348 * Returns a pointer to the new device, or NULL.
350 struct spi_device *spi_alloc_device(struct spi_master *master)
352 struct spi_device *spi;
354 if (!spi_master_get(master))
357 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
359 spi_master_put(master);
363 spi->master = master;
364 spi->dev.parent = &master->dev;
365 spi->dev.bus = &spi_bus_type;
366 spi->dev.release = spidev_release;
367 spi->cs_gpio = -ENOENT;
368 device_initialize(&spi->dev);
371 EXPORT_SYMBOL_GPL(spi_alloc_device);
373 static void spi_dev_set_name(struct spi_device *spi)
375 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
378 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
382 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
386 static int spi_dev_check(struct device *dev, void *data)
388 struct spi_device *spi = to_spi_device(dev);
389 struct spi_device *new_spi = data;
391 if (spi->master == new_spi->master &&
392 spi->chip_select == new_spi->chip_select)
398 * spi_add_device - Add spi_device allocated with spi_alloc_device
399 * @spi: spi_device to register
401 * Companion function to spi_alloc_device. Devices allocated with
402 * spi_alloc_device can be added onto the spi bus with this function.
404 * Returns 0 on success; negative errno on failure
406 int spi_add_device(struct spi_device *spi)
408 static DEFINE_MUTEX(spi_add_lock);
409 struct spi_master *master = spi->master;
410 struct device *dev = master->dev.parent;
413 /* Chipselects are numbered 0..max; validate. */
414 if (spi->chip_select >= master->num_chipselect) {
415 dev_err(dev, "cs%d >= max %d\n",
417 master->num_chipselect);
421 /* Set the bus ID string */
422 spi_dev_set_name(spi);
424 /* We need to make sure there's no other device with this
425 * chipselect **BEFORE** we call setup(), else we'll trash
426 * its configuration. Lock against concurrent add() calls.
428 mutex_lock(&spi_add_lock);
430 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
432 dev_err(dev, "chipselect %d already in use\n",
437 if (master->cs_gpios)
438 spi->cs_gpio = master->cs_gpios[spi->chip_select];
440 /* Drivers may modify this initial i/o setup, but will
441 * normally rely on the device being setup. Devices
442 * using SPI_CS_HIGH can't coexist well otherwise...
444 status = spi_setup(spi);
446 dev_err(dev, "can't setup %s, status %d\n",
447 dev_name(&spi->dev), status);
451 /* Device may be bound to an active driver when this returns */
452 status = device_add(&spi->dev);
454 dev_err(dev, "can't add %s, status %d\n",
455 dev_name(&spi->dev), status);
457 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
460 mutex_unlock(&spi_add_lock);
463 EXPORT_SYMBOL_GPL(spi_add_device);
466 * spi_new_device - instantiate one new SPI device
467 * @master: Controller to which device is connected
468 * @chip: Describes the SPI device
471 * On typical mainboards, this is purely internal; and it's not needed
472 * after board init creates the hard-wired devices. Some development
473 * platforms may not be able to use spi_register_board_info though, and
474 * this is exported so that for example a USB or parport based adapter
475 * driver could add devices (which it would learn about out-of-band).
477 * Returns the new device, or NULL.
479 struct spi_device *spi_new_device(struct spi_master *master,
480 struct spi_board_info *chip)
482 struct spi_device *proxy;
485 /* NOTE: caller did any chip->bus_num checks necessary.
487 * Also, unless we change the return value convention to use
488 * error-or-pointer (not NULL-or-pointer), troubleshootability
489 * suggests syslogged diagnostics are best here (ugh).
492 proxy = spi_alloc_device(master);
496 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
498 proxy->chip_select = chip->chip_select;
499 proxy->max_speed_hz = chip->max_speed_hz;
500 proxy->mode = chip->mode;
501 proxy->irq = chip->irq;
502 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
503 proxy->dev.platform_data = (void *) chip->platform_data;
504 proxy->controller_data = chip->controller_data;
505 proxy->controller_state = NULL;
507 status = spi_add_device(proxy);
515 EXPORT_SYMBOL_GPL(spi_new_device);
517 static void spi_match_master_to_boardinfo(struct spi_master *master,
518 struct spi_board_info *bi)
520 struct spi_device *dev;
522 if (master->bus_num != bi->bus_num)
525 dev = spi_new_device(master, bi);
527 dev_err(master->dev.parent, "can't create new device for %s\n",
532 * spi_register_board_info - register SPI devices for a given board
533 * @info: array of chip descriptors
534 * @n: how many descriptors are provided
537 * Board-specific early init code calls this (probably during arch_initcall)
538 * with segments of the SPI device table. Any device nodes are created later,
539 * after the relevant parent SPI controller (bus_num) is defined. We keep
540 * this table of devices forever, so that reloading a controller driver will
541 * not make Linux forget about these hard-wired devices.
543 * Other code can also call this, e.g. a particular add-on board might provide
544 * SPI devices through its expansion connector, so code initializing that board
545 * would naturally declare its SPI devices.
547 * The board info passed can safely be __initdata ... but be careful of
548 * any embedded pointers (platform_data, etc), they're copied as-is.
550 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
552 struct boardinfo *bi;
558 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
562 for (i = 0; i < n; i++, bi++, info++) {
563 struct spi_master *master;
565 memcpy(&bi->board_info, info, sizeof(*info));
566 mutex_lock(&board_lock);
567 list_add_tail(&bi->list, &board_list);
568 list_for_each_entry(master, &spi_master_list, list)
569 spi_match_master_to_boardinfo(master, &bi->board_info);
570 mutex_unlock(&board_lock);
576 /*-------------------------------------------------------------------------*/
578 static void spi_set_cs(struct spi_device *spi, bool enable)
580 if (spi->mode & SPI_CS_HIGH)
583 if (spi->cs_gpio >= 0)
584 gpio_set_value(spi->cs_gpio, !enable);
585 else if (spi->master->set_cs)
586 spi->master->set_cs(spi, !enable);
589 #ifdef CONFIG_HAS_DMA
590 static int spi_map_buf(struct spi_master *master, struct device *dev,
591 struct sg_table *sgt, void *buf, size_t len,
592 enum dma_data_direction dir)
594 const bool vmalloced_buf = is_vmalloc_addr(buf);
595 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
596 const int sgs = DIV_ROUND_UP(len, desc_len);
597 struct page *vm_page;
602 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
606 for (i = 0; i < sgs; i++) {
607 min = min_t(size_t, len, desc_len);
610 vm_page = vmalloc_to_page(buf);
615 sg_buf = page_address(vm_page) +
616 ((size_t)buf & ~PAGE_MASK);
621 sg_set_buf(&sgt->sgl[i], sg_buf, min);
627 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
640 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
641 struct sg_table *sgt, enum dma_data_direction dir)
643 if (sgt->orig_nents) {
644 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
649 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
651 struct device *tx_dev, *rx_dev;
652 struct spi_transfer *xfer;
655 if (!master->can_dma)
658 tx_dev = master->dma_tx->device->dev;
659 rx_dev = master->dma_rx->device->dev;
661 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
662 if (!master->can_dma(master, msg->spi, xfer))
665 if (xfer->tx_buf != NULL) {
666 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
667 (void *)xfer->tx_buf, xfer->len,
673 if (xfer->rx_buf != NULL) {
674 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
675 xfer->rx_buf, xfer->len,
678 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
685 master->cur_msg_mapped = true;
690 static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
692 struct spi_transfer *xfer;
693 struct device *tx_dev, *rx_dev;
695 if (!master->cur_msg_mapped || !master->can_dma)
698 tx_dev = master->dma_tx->device->dev;
699 rx_dev = master->dma_rx->device->dev;
701 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
702 if (!master->can_dma(master, msg->spi, xfer))
705 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
706 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
711 #else /* !CONFIG_HAS_DMA */
712 static inline int __spi_map_msg(struct spi_master *master,
713 struct spi_message *msg)
718 static inline int spi_unmap_msg(struct spi_master *master,
719 struct spi_message *msg)
723 #endif /* !CONFIG_HAS_DMA */
725 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
727 struct spi_transfer *xfer;
729 unsigned int max_tx, max_rx;
731 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
735 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
736 if ((master->flags & SPI_MASTER_MUST_TX) &&
738 max_tx = max(xfer->len, max_tx);
739 if ((master->flags & SPI_MASTER_MUST_RX) &&
741 max_rx = max(xfer->len, max_rx);
745 tmp = krealloc(master->dummy_tx, max_tx,
746 GFP_KERNEL | GFP_DMA);
749 master->dummy_tx = tmp;
750 memset(tmp, 0, max_tx);
754 tmp = krealloc(master->dummy_rx, max_rx,
755 GFP_KERNEL | GFP_DMA);
758 master->dummy_rx = tmp;
761 if (max_tx || max_rx) {
762 list_for_each_entry(xfer, &msg->transfers,
765 xfer->tx_buf = master->dummy_tx;
767 xfer->rx_buf = master->dummy_rx;
772 return __spi_map_msg(master, msg);
776 * spi_transfer_one_message - Default implementation of transfer_one_message()
778 * This is a standard implementation of transfer_one_message() for
779 * drivers which impelment a transfer_one() operation. It provides
780 * standard handling of delays and chip select management.
782 static int spi_transfer_one_message(struct spi_master *master,
783 struct spi_message *msg)
785 struct spi_transfer *xfer;
786 bool keep_cs = false;
790 spi_set_cs(msg->spi, true);
792 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
793 trace_spi_transfer_start(msg, xfer);
795 if (xfer->tx_buf || xfer->rx_buf) {
796 reinit_completion(&master->xfer_completion);
798 ret = master->transfer_one(master, msg->spi, xfer);
800 dev_err(&msg->spi->dev,
801 "SPI transfer failed: %d\n", ret);
807 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
808 ms += ms + 100; /* some tolerance */
810 ms = wait_for_completion_timeout(&master->xfer_completion,
811 msecs_to_jiffies(ms));
815 dev_err(&msg->spi->dev,
816 "SPI transfer timed out\n");
817 msg->status = -ETIMEDOUT;
821 dev_err(&msg->spi->dev,
822 "Bufferless transfer has length %u\n",
826 trace_spi_transfer_stop(msg, xfer);
828 if (msg->status != -EINPROGRESS)
831 if (xfer->delay_usecs)
832 udelay(xfer->delay_usecs);
834 if (xfer->cs_change) {
835 if (list_is_last(&xfer->transfer_list,
839 spi_set_cs(msg->spi, false);
841 spi_set_cs(msg->spi, true);
845 msg->actual_length += xfer->len;
849 if (ret != 0 || !keep_cs)
850 spi_set_cs(msg->spi, false);
852 if (msg->status == -EINPROGRESS)
855 spi_finalize_current_message(master);
861 * spi_finalize_current_transfer - report completion of a transfer
862 * @master: the master reporting completion
864 * Called by SPI drivers using the core transfer_one_message()
865 * implementation to notify it that the current interrupt driven
866 * transfer has finished and the next one may be scheduled.
868 void spi_finalize_current_transfer(struct spi_master *master)
870 complete(&master->xfer_completion);
872 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
875 * spi_pump_messages - kthread work function which processes spi message queue
876 * @work: pointer to kthread work struct contained in the master struct
878 * This function checks if there is any spi message in the queue that
879 * needs processing and if so call out to the driver to initialize hardware
880 * and transfer each message.
883 static void spi_pump_messages(struct kthread_work *work)
885 struct spi_master *master =
886 container_of(work, struct spi_master, pump_messages);
888 bool was_busy = false;
891 /* Lock queue and check for queue work */
892 spin_lock_irqsave(&master->queue_lock, flags);
893 if (list_empty(&master->queue) || !master->running) {
895 spin_unlock_irqrestore(&master->queue_lock, flags);
898 master->busy = false;
899 spin_unlock_irqrestore(&master->queue_lock, flags);
900 kfree(master->dummy_rx);
901 master->dummy_rx = NULL;
902 kfree(master->dummy_tx);
903 master->dummy_tx = NULL;
904 if (master->unprepare_transfer_hardware &&
905 master->unprepare_transfer_hardware(master))
906 dev_err(&master->dev,
907 "failed to unprepare transfer hardware\n");
908 if (master->auto_runtime_pm) {
909 pm_runtime_mark_last_busy(master->dev.parent);
910 pm_runtime_put_autosuspend(master->dev.parent);
912 trace_spi_master_idle(master);
916 /* Make sure we are not already running a message */
917 if (master->cur_msg) {
918 spin_unlock_irqrestore(&master->queue_lock, flags);
921 /* Extract head of queue */
923 list_first_entry(&master->queue, struct spi_message, queue);
925 list_del_init(&master->cur_msg->queue);
930 spin_unlock_irqrestore(&master->queue_lock, flags);
932 if (!was_busy && master->auto_runtime_pm) {
933 ret = pm_runtime_get_sync(master->dev.parent);
935 dev_err(&master->dev, "Failed to power device: %d\n",
942 trace_spi_master_busy(master);
944 if (!was_busy && master->prepare_transfer_hardware) {
945 ret = master->prepare_transfer_hardware(master);
947 dev_err(&master->dev,
948 "failed to prepare transfer hardware\n");
950 if (master->auto_runtime_pm)
951 pm_runtime_put(master->dev.parent);
956 trace_spi_message_start(master->cur_msg);
958 if (master->prepare_message) {
959 ret = master->prepare_message(master, master->cur_msg);
961 dev_err(&master->dev,
962 "failed to prepare message: %d\n", ret);
963 master->cur_msg->status = ret;
964 spi_finalize_current_message(master);
967 master->cur_msg_prepared = true;
970 ret = spi_map_msg(master, master->cur_msg);
972 master->cur_msg->status = ret;
973 spi_finalize_current_message(master);
977 ret = master->transfer_one_message(master, master->cur_msg);
979 dev_err(&master->dev,
980 "failed to transfer one message from queue\n");
985 static int spi_init_queue(struct spi_master *master)
987 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
989 INIT_LIST_HEAD(&master->queue);
990 spin_lock_init(&master->queue_lock);
992 master->running = false;
993 master->busy = false;
995 init_kthread_worker(&master->kworker);
996 master->kworker_task = kthread_run(kthread_worker_fn,
997 &master->kworker, "%s",
998 dev_name(&master->dev));
999 if (IS_ERR(master->kworker_task)) {
1000 dev_err(&master->dev, "failed to create message pump task\n");
1003 init_kthread_work(&master->pump_messages, spi_pump_messages);
1006 * Master config will indicate if this controller should run the
1007 * message pump with high (realtime) priority to reduce the transfer
1008 * latency on the bus by minimising the delay between a transfer
1009 * request and the scheduling of the message pump thread. Without this
1010 * setting the message pump thread will remain at default priority.
1013 dev_info(&master->dev,
1014 "will run message pump with realtime priority\n");
1015 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m);
1022 * spi_get_next_queued_message() - called by driver to check for queued
1024 * @master: the master to check for queued messages
1026 * If there are more messages in the queue, the next message is returned from
1029 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1031 struct spi_message *next;
1032 unsigned long flags;
1034 /* get a pointer to the next message, if any */
1035 spin_lock_irqsave(&master->queue_lock, flags);
1036 next = list_first_entry_or_null(&master->queue, struct spi_message,
1038 spin_unlock_irqrestore(&master->queue_lock, flags);
1042 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1045 * spi_finalize_current_message() - the current message is complete
1046 * @master: the master to return the message to
1048 * Called by the driver to notify the core that the message in the front of the
1049 * queue is complete and can be removed from the queue.
1051 void spi_finalize_current_message(struct spi_master *master)
1053 struct spi_message *mesg;
1054 unsigned long flags;
1057 spin_lock_irqsave(&master->queue_lock, flags);
1058 mesg = master->cur_msg;
1059 master->cur_msg = NULL;
1061 queue_kthread_work(&master->kworker, &master->pump_messages);
1062 spin_unlock_irqrestore(&master->queue_lock, flags);
1064 spi_unmap_msg(master, mesg);
1066 if (master->cur_msg_prepared && master->unprepare_message) {
1067 ret = master->unprepare_message(master, mesg);
1069 dev_err(&master->dev,
1070 "failed to unprepare message: %d\n", ret);
1073 master->cur_msg_prepared = false;
1077 mesg->complete(mesg->context);
1079 trace_spi_message_done(mesg);
1081 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1083 static int spi_start_queue(struct spi_master *master)
1085 unsigned long flags;
1087 spin_lock_irqsave(&master->queue_lock, flags);
1089 if (master->running || master->busy) {
1090 spin_unlock_irqrestore(&master->queue_lock, flags);
1094 master->running = true;
1095 master->cur_msg = NULL;
1096 spin_unlock_irqrestore(&master->queue_lock, flags);
1098 queue_kthread_work(&master->kworker, &master->pump_messages);
1103 static int spi_stop_queue(struct spi_master *master)
1105 unsigned long flags;
1106 unsigned limit = 500;
1109 spin_lock_irqsave(&master->queue_lock, flags);
1112 * This is a bit lame, but is optimized for the common execution path.
1113 * A wait_queue on the master->busy could be used, but then the common
1114 * execution path (pump_messages) would be required to call wake_up or
1115 * friends on every SPI message. Do this instead.
1117 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1118 spin_unlock_irqrestore(&master->queue_lock, flags);
1119 usleep_range(10000, 11000);
1120 spin_lock_irqsave(&master->queue_lock, flags);
1123 if (!list_empty(&master->queue) || master->busy)
1126 master->running = false;
1128 spin_unlock_irqrestore(&master->queue_lock, flags);
1131 dev_warn(&master->dev,
1132 "could not stop message queue\n");
1138 static int spi_destroy_queue(struct spi_master *master)
1142 ret = spi_stop_queue(master);
1145 * flush_kthread_worker will block until all work is done.
1146 * If the reason that stop_queue timed out is that the work will never
1147 * finish, then it does no good to call flush/stop thread, so
1151 dev_err(&master->dev, "problem destroying queue\n");
1155 flush_kthread_worker(&master->kworker);
1156 kthread_stop(master->kworker_task);
1162 * spi_queued_transfer - transfer function for queued transfers
1163 * @spi: spi device which is requesting transfer
1164 * @msg: spi message which is to handled is queued to driver queue
1166 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1168 struct spi_master *master = spi->master;
1169 unsigned long flags;
1171 spin_lock_irqsave(&master->queue_lock, flags);
1173 if (!master->running) {
1174 spin_unlock_irqrestore(&master->queue_lock, flags);
1177 msg->actual_length = 0;
1178 msg->status = -EINPROGRESS;
1180 list_add_tail(&msg->queue, &master->queue);
1182 queue_kthread_work(&master->kworker, &master->pump_messages);
1184 spin_unlock_irqrestore(&master->queue_lock, flags);
1188 static int spi_master_initialize_queue(struct spi_master *master)
1192 master->transfer = spi_queued_transfer;
1193 if (!master->transfer_one_message)
1194 master->transfer_one_message = spi_transfer_one_message;
1196 /* Initialize and start queue */
1197 ret = spi_init_queue(master);
1199 dev_err(&master->dev, "problem initializing queue\n");
1200 goto err_init_queue;
1202 master->queued = true;
1203 ret = spi_start_queue(master);
1205 dev_err(&master->dev, "problem starting queue\n");
1206 goto err_start_queue;
1212 spi_destroy_queue(master);
1217 /*-------------------------------------------------------------------------*/
1219 #if defined(CONFIG_OF)
1221 * of_register_spi_devices() - Register child devices onto the SPI bus
1222 * @master: Pointer to spi_master device
1224 * Registers an spi_device for each child node of master node which has a 'reg'
1227 static void of_register_spi_devices(struct spi_master *master)
1229 struct spi_device *spi;
1230 struct device_node *nc;
1234 if (!master->dev.of_node)
1237 for_each_available_child_of_node(master->dev.of_node, nc) {
1238 /* Alloc an spi_device */
1239 spi = spi_alloc_device(master);
1241 dev_err(&master->dev, "spi_device alloc error for %s\n",
1247 /* Select device driver */
1248 if (of_modalias_node(nc, spi->modalias,
1249 sizeof(spi->modalias)) < 0) {
1250 dev_err(&master->dev, "cannot find modalias for %s\n",
1256 /* Device address */
1257 rc = of_property_read_u32(nc, "reg", &value);
1259 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1264 spi->chip_select = value;
1266 /* Mode (clock phase/polarity/etc.) */
1267 if (of_find_property(nc, "spi-cpha", NULL))
1268 spi->mode |= SPI_CPHA;
1269 if (of_find_property(nc, "spi-cpol", NULL))
1270 spi->mode |= SPI_CPOL;
1271 if (of_find_property(nc, "spi-cs-high", NULL))
1272 spi->mode |= SPI_CS_HIGH;
1273 if (of_find_property(nc, "spi-3wire", NULL))
1274 spi->mode |= SPI_3WIRE;
1275 if (of_find_property(nc, "spi-lsb-first", NULL))
1276 spi->mode |= SPI_LSB_FIRST;
1278 /* Device DUAL/QUAD mode */
1279 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1284 spi->mode |= SPI_TX_DUAL;
1287 spi->mode |= SPI_TX_QUAD;
1290 dev_warn(&master->dev,
1291 "spi-tx-bus-width %d not supported\n",
1297 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1302 spi->mode |= SPI_RX_DUAL;
1305 spi->mode |= SPI_RX_QUAD;
1308 dev_warn(&master->dev,
1309 "spi-rx-bus-width %d not supported\n",
1316 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1318 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1323 spi->max_speed_hz = value;
1326 spi->irq = irq_of_parse_and_map(nc, 0);
1328 /* Store a pointer to the node in the device structure */
1330 spi->dev.of_node = nc;
1332 /* Register the new device */
1333 request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1334 rc = spi_add_device(spi);
1336 dev_err(&master->dev, "spi_device register error %s\n",
1344 static void of_register_spi_devices(struct spi_master *master) { }
1348 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1350 struct spi_device *spi = data;
1352 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1353 struct acpi_resource_spi_serialbus *sb;
1355 sb = &ares->data.spi_serial_bus;
1356 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1357 spi->chip_select = sb->device_selection;
1358 spi->max_speed_hz = sb->connection_speed;
1360 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1361 spi->mode |= SPI_CPHA;
1362 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1363 spi->mode |= SPI_CPOL;
1364 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1365 spi->mode |= SPI_CS_HIGH;
1367 } else if (spi->irq < 0) {
1370 if (acpi_dev_resource_interrupt(ares, 0, &r))
1374 /* Always tell the ACPI core to skip this resource */
1378 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1379 void *data, void **return_value)
1381 struct spi_master *master = data;
1382 struct list_head resource_list;
1383 struct acpi_device *adev;
1384 struct spi_device *spi;
1387 if (acpi_bus_get_device(handle, &adev))
1389 if (acpi_bus_get_status(adev) || !adev->status.present)
1392 spi = spi_alloc_device(master);
1394 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1395 dev_name(&adev->dev));
1396 return AE_NO_MEMORY;
1399 ACPI_COMPANION_SET(&spi->dev, adev);
1402 INIT_LIST_HEAD(&resource_list);
1403 ret = acpi_dev_get_resources(adev, &resource_list,
1404 acpi_spi_add_resource, spi);
1405 acpi_dev_free_resource_list(&resource_list);
1407 if (ret < 0 || !spi->max_speed_hz) {
1412 adev->power.flags.ignore_parent = true;
1413 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1414 if (spi_add_device(spi)) {
1415 adev->power.flags.ignore_parent = false;
1416 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1417 dev_name(&adev->dev));
1424 static void acpi_register_spi_devices(struct spi_master *master)
1429 handle = ACPI_HANDLE(master->dev.parent);
1433 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1434 acpi_spi_add_device, NULL,
1436 if (ACPI_FAILURE(status))
1437 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1440 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1441 #endif /* CONFIG_ACPI */
1443 static void spi_master_release(struct device *dev)
1445 struct spi_master *master;
1447 master = container_of(dev, struct spi_master, dev);
1451 static struct class spi_master_class = {
1452 .name = "spi_master",
1453 .owner = THIS_MODULE,
1454 .dev_release = spi_master_release,
1460 * spi_alloc_master - allocate SPI master controller
1461 * @dev: the controller, possibly using the platform_bus
1462 * @size: how much zeroed driver-private data to allocate; the pointer to this
1463 * memory is in the driver_data field of the returned device,
1464 * accessible with spi_master_get_devdata().
1465 * Context: can sleep
1467 * This call is used only by SPI master controller drivers, which are the
1468 * only ones directly touching chip registers. It's how they allocate
1469 * an spi_master structure, prior to calling spi_register_master().
1471 * This must be called from context that can sleep. It returns the SPI
1472 * master structure on success, else NULL.
1474 * The caller is responsible for assigning the bus number and initializing
1475 * the master's methods before calling spi_register_master(); and (after errors
1476 * adding the device) calling spi_master_put() and kfree() to prevent a memory
1479 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1481 struct spi_master *master;
1486 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1490 device_initialize(&master->dev);
1491 master->bus_num = -1;
1492 master->num_chipselect = 1;
1493 master->dev.class = &spi_master_class;
1494 master->dev.parent = get_device(dev);
1495 spi_master_set_devdata(master, &master[1]);
1499 EXPORT_SYMBOL_GPL(spi_alloc_master);
1502 static int of_spi_register_master(struct spi_master *master)
1505 struct device_node *np = master->dev.of_node;
1510 nb = of_gpio_named_count(np, "cs-gpios");
1511 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1513 /* Return error only for an incorrectly formed cs-gpios property */
1514 if (nb == 0 || nb == -ENOENT)
1519 cs = devm_kzalloc(&master->dev,
1520 sizeof(int) * master->num_chipselect,
1522 master->cs_gpios = cs;
1524 if (!master->cs_gpios)
1527 for (i = 0; i < master->num_chipselect; i++)
1530 for (i = 0; i < nb; i++)
1531 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1536 static int of_spi_register_master(struct spi_master *master)
1543 * spi_register_master - register SPI master controller
1544 * @master: initialized master, originally from spi_alloc_master()
1545 * Context: can sleep
1547 * SPI master controllers connect to their drivers using some non-SPI bus,
1548 * such as the platform bus. The final stage of probe() in that code
1549 * includes calling spi_register_master() to hook up to this SPI bus glue.
1551 * SPI controllers use board specific (often SOC specific) bus numbers,
1552 * and board-specific addressing for SPI devices combines those numbers
1553 * with chip select numbers. Since SPI does not directly support dynamic
1554 * device identification, boards need configuration tables telling which
1555 * chip is at which address.
1557 * This must be called from context that can sleep. It returns zero on
1558 * success, else a negative error code (dropping the master's refcount).
1559 * After a successful return, the caller is responsible for calling
1560 * spi_unregister_master().
1562 int spi_register_master(struct spi_master *master)
1564 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1565 struct device *dev = master->dev.parent;
1566 struct boardinfo *bi;
1567 int status = -ENODEV;
1573 status = of_spi_register_master(master);
1577 /* even if it's just one always-selected device, there must
1578 * be at least one chipselect
1580 if (master->num_chipselect == 0)
1583 if ((master->bus_num < 0) && master->dev.of_node)
1584 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1586 /* convention: dynamically assigned bus IDs count down from the max */
1587 if (master->bus_num < 0) {
1588 /* FIXME switch to an IDR based scheme, something like
1589 * I2C now uses, so we can't run out of "dynamic" IDs
1591 master->bus_num = atomic_dec_return(&dyn_bus_id);
1595 spin_lock_init(&master->bus_lock_spinlock);
1596 mutex_init(&master->bus_lock_mutex);
1597 master->bus_lock_flag = 0;
1598 init_completion(&master->xfer_completion);
1599 if (!master->max_dma_len)
1600 master->max_dma_len = INT_MAX;
1602 /* register the device, then userspace will see it.
1603 * registration fails if the bus ID is in use.
1605 dev_set_name(&master->dev, "spi%u", master->bus_num);
1606 status = device_add(&master->dev);
1609 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1610 dynamic ? " (dynamic)" : "");
1612 /* If we're using a queued driver, start the queue */
1613 if (master->transfer)
1614 dev_info(dev, "master is unqueued, this is deprecated\n");
1616 status = spi_master_initialize_queue(master);
1618 device_del(&master->dev);
1623 mutex_lock(&board_lock);
1624 list_add_tail(&master->list, &spi_master_list);
1625 list_for_each_entry(bi, &board_list, list)
1626 spi_match_master_to_boardinfo(master, &bi->board_info);
1627 mutex_unlock(&board_lock);
1629 /* Register devices from the device tree and ACPI */
1630 of_register_spi_devices(master);
1631 acpi_register_spi_devices(master);
1635 EXPORT_SYMBOL_GPL(spi_register_master);
1637 static void devm_spi_unregister(struct device *dev, void *res)
1639 spi_unregister_master(*(struct spi_master **)res);
1643 * dev_spi_register_master - register managed SPI master controller
1644 * @dev: device managing SPI master
1645 * @master: initialized master, originally from spi_alloc_master()
1646 * Context: can sleep
1648 * Register a SPI device as with spi_register_master() which will
1649 * automatically be unregister
1651 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1653 struct spi_master **ptr;
1656 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1660 ret = spi_register_master(master);
1663 devres_add(dev, ptr);
1670 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1672 static int __unregister(struct device *dev, void *null)
1674 spi_unregister_device(to_spi_device(dev));
1679 * spi_unregister_master - unregister SPI master controller
1680 * @master: the master being unregistered
1681 * Context: can sleep
1683 * This call is used only by SPI master controller drivers, which are the
1684 * only ones directly touching chip registers.
1686 * This must be called from context that can sleep.
1688 void spi_unregister_master(struct spi_master *master)
1692 if (master->queued) {
1693 if (spi_destroy_queue(master))
1694 dev_err(&master->dev, "queue remove failed\n");
1697 mutex_lock(&board_lock);
1698 list_del(&master->list);
1699 mutex_unlock(&board_lock);
1701 dummy = device_for_each_child(&master->dev, NULL, __unregister);
1702 device_unregister(&master->dev);
1704 EXPORT_SYMBOL_GPL(spi_unregister_master);
1706 int spi_master_suspend(struct spi_master *master)
1710 /* Basically no-ops for non-queued masters */
1711 if (!master->queued)
1714 ret = spi_stop_queue(master);
1716 dev_err(&master->dev, "queue stop failed\n");
1720 EXPORT_SYMBOL_GPL(spi_master_suspend);
1722 int spi_master_resume(struct spi_master *master)
1726 if (!master->queued)
1729 ret = spi_start_queue(master);
1731 dev_err(&master->dev, "queue restart failed\n");
1735 EXPORT_SYMBOL_GPL(spi_master_resume);
1737 static int __spi_master_match(struct device *dev, const void *data)
1739 struct spi_master *m;
1740 const u16 *bus_num = data;
1742 m = container_of(dev, struct spi_master, dev);
1743 return m->bus_num == *bus_num;
1747 * spi_busnum_to_master - look up master associated with bus_num
1748 * @bus_num: the master's bus number
1749 * Context: can sleep
1751 * This call may be used with devices that are registered after
1752 * arch init time. It returns a refcounted pointer to the relevant
1753 * spi_master (which the caller must release), or NULL if there is
1754 * no such master registered.
1756 struct spi_master *spi_busnum_to_master(u16 bus_num)
1759 struct spi_master *master = NULL;
1761 dev = class_find_device(&spi_master_class, NULL, &bus_num,
1762 __spi_master_match);
1764 master = container_of(dev, struct spi_master, dev);
1765 /* reference got in class_find_device */
1768 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1771 /*-------------------------------------------------------------------------*/
1773 /* Core methods for SPI master protocol drivers. Some of the
1774 * other core methods are currently defined as inline functions.
1778 * spi_setup - setup SPI mode and clock rate
1779 * @spi: the device whose settings are being modified
1780 * Context: can sleep, and no requests are queued to the device
1782 * SPI protocol drivers may need to update the transfer mode if the
1783 * device doesn't work with its default. They may likewise need
1784 * to update clock rates or word sizes from initial values. This function
1785 * changes those settings, and must be called from a context that can sleep.
1786 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1787 * effect the next time the device is selected and data is transferred to
1788 * or from it. When this function returns, the spi device is deselected.
1790 * Note that this call will fail if the protocol driver specifies an option
1791 * that the underlying controller or its driver does not support. For
1792 * example, not all hardware supports wire transfers using nine bit words,
1793 * LSB-first wire encoding, or active-high chipselects.
1795 int spi_setup(struct spi_device *spi)
1797 unsigned bad_bits, ugly_bits;
1800 /* check mode to prevent that DUAL and QUAD set at the same time
1802 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1803 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1805 "setup: can not select dual and quad at the same time\n");
1808 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1810 if ((spi->mode & SPI_3WIRE) && (spi->mode &
1811 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1813 /* help drivers fail *cleanly* when they need options
1814 * that aren't supported with their current master
1816 bad_bits = spi->mode & ~spi->master->mode_bits;
1817 ugly_bits = bad_bits &
1818 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1821 "setup: ignoring unsupported mode bits %x\n",
1823 spi->mode &= ~ugly_bits;
1824 bad_bits &= ~ugly_bits;
1827 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1832 if (!spi->bits_per_word)
1833 spi->bits_per_word = 8;
1835 if (!spi->max_speed_hz)
1836 spi->max_speed_hz = spi->master->max_speed_hz;
1838 if (spi->master->setup)
1839 status = spi->master->setup(spi);
1841 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1842 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1843 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1844 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1845 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
1846 (spi->mode & SPI_LOOP) ? "loopback, " : "",
1847 spi->bits_per_word, spi->max_speed_hz,
1852 EXPORT_SYMBOL_GPL(spi_setup);
1854 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1856 struct spi_master *master = spi->master;
1857 struct spi_transfer *xfer;
1860 if (list_empty(&message->transfers))
1863 /* Half-duplex links include original MicroWire, and ones with
1864 * only one data pin like SPI_3WIRE (switches direction) or where
1865 * either MOSI or MISO is missing. They can also be caused by
1866 * software limitations.
1868 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1869 || (spi->mode & SPI_3WIRE)) {
1870 unsigned flags = master->flags;
1872 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1873 if (xfer->rx_buf && xfer->tx_buf)
1875 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1877 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1883 * Set transfer bits_per_word and max speed as spi device default if
1884 * it is not set for this transfer.
1885 * Set transfer tx_nbits and rx_nbits as single transfer default
1886 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1888 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1889 message->frame_length += xfer->len;
1890 if (!xfer->bits_per_word)
1891 xfer->bits_per_word = spi->bits_per_word;
1893 if (!xfer->speed_hz)
1894 xfer->speed_hz = spi->max_speed_hz;
1896 if (master->max_speed_hz &&
1897 xfer->speed_hz > master->max_speed_hz)
1898 xfer->speed_hz = master->max_speed_hz;
1900 if (master->bits_per_word_mask) {
1901 /* Only 32 bits fit in the mask */
1902 if (xfer->bits_per_word > 32)
1904 if (!(master->bits_per_word_mask &
1905 BIT(xfer->bits_per_word - 1)))
1910 * SPI transfer length should be multiple of SPI word size
1911 * where SPI word size should be power-of-two multiple
1913 if (xfer->bits_per_word <= 8)
1915 else if (xfer->bits_per_word <= 16)
1920 /* No partial transfers accepted */
1921 if (xfer->len % w_size)
1924 if (xfer->speed_hz && master->min_speed_hz &&
1925 xfer->speed_hz < master->min_speed_hz)
1928 if (xfer->tx_buf && !xfer->tx_nbits)
1929 xfer->tx_nbits = SPI_NBITS_SINGLE;
1930 if (xfer->rx_buf && !xfer->rx_nbits)
1931 xfer->rx_nbits = SPI_NBITS_SINGLE;
1932 /* check transfer tx/rx_nbits:
1933 * 1. check the value matches one of single, dual and quad
1934 * 2. check tx/rx_nbits match the mode in spi_device
1937 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
1938 xfer->tx_nbits != SPI_NBITS_DUAL &&
1939 xfer->tx_nbits != SPI_NBITS_QUAD)
1941 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
1942 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
1944 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
1945 !(spi->mode & SPI_TX_QUAD))
1948 /* check transfer rx_nbits */
1950 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
1951 xfer->rx_nbits != SPI_NBITS_DUAL &&
1952 xfer->rx_nbits != SPI_NBITS_QUAD)
1954 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
1955 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
1957 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
1958 !(spi->mode & SPI_RX_QUAD))
1963 message->status = -EINPROGRESS;
1968 static int __spi_async(struct spi_device *spi, struct spi_message *message)
1970 struct spi_master *master = spi->master;
1974 trace_spi_message_submit(message);
1976 return master->transfer(spi, message);
1980 * spi_async - asynchronous SPI transfer
1981 * @spi: device with which data will be exchanged
1982 * @message: describes the data transfers, including completion callback
1983 * Context: any (irqs may be blocked, etc)
1985 * This call may be used in_irq and other contexts which can't sleep,
1986 * as well as from task contexts which can sleep.
1988 * The completion callback is invoked in a context which can't sleep.
1989 * Before that invocation, the value of message->status is undefined.
1990 * When the callback is issued, message->status holds either zero (to
1991 * indicate complete success) or a negative error code. After that
1992 * callback returns, the driver which issued the transfer request may
1993 * deallocate the associated memory; it's no longer in use by any SPI
1994 * core or controller driver code.
1996 * Note that although all messages to a spi_device are handled in
1997 * FIFO order, messages may go to different devices in other orders.
1998 * Some device might be higher priority, or have various "hard" access
1999 * time requirements, for example.
2001 * On detection of any fault during the transfer, processing of
2002 * the entire message is aborted, and the device is deselected.
2003 * Until returning from the associated message completion callback,
2004 * no other spi_message queued to that device will be processed.
2005 * (This rule applies equally to all the synchronous transfer calls,
2006 * which are wrappers around this core asynchronous primitive.)
2008 int spi_async(struct spi_device *spi, struct spi_message *message)
2010 struct spi_master *master = spi->master;
2012 unsigned long flags;
2014 ret = __spi_validate(spi, message);
2018 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2020 if (master->bus_lock_flag)
2023 ret = __spi_async(spi, message);
2025 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2029 EXPORT_SYMBOL_GPL(spi_async);
2032 * spi_async_locked - version of spi_async with exclusive bus usage
2033 * @spi: device with which data will be exchanged
2034 * @message: describes the data transfers, including completion callback
2035 * Context: any (irqs may be blocked, etc)
2037 * This call may be used in_irq and other contexts which can't sleep,
2038 * as well as from task contexts which can sleep.
2040 * The completion callback is invoked in a context which can't sleep.
2041 * Before that invocation, the value of message->status is undefined.
2042 * When the callback is issued, message->status holds either zero (to
2043 * indicate complete success) or a negative error code. After that
2044 * callback returns, the driver which issued the transfer request may
2045 * deallocate the associated memory; it's no longer in use by any SPI
2046 * core or controller driver code.
2048 * Note that although all messages to a spi_device are handled in
2049 * FIFO order, messages may go to different devices in other orders.
2050 * Some device might be higher priority, or have various "hard" access
2051 * time requirements, for example.
2053 * On detection of any fault during the transfer, processing of
2054 * the entire message is aborted, and the device is deselected.
2055 * Until returning from the associated message completion callback,
2056 * no other spi_message queued to that device will be processed.
2057 * (This rule applies equally to all the synchronous transfer calls,
2058 * which are wrappers around this core asynchronous primitive.)
2060 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2062 struct spi_master *master = spi->master;
2064 unsigned long flags;
2066 ret = __spi_validate(spi, message);
2070 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2072 ret = __spi_async(spi, message);
2074 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2079 EXPORT_SYMBOL_GPL(spi_async_locked);
2082 /*-------------------------------------------------------------------------*/
2084 /* Utility methods for SPI master protocol drivers, layered on
2085 * top of the core. Some other utility methods are defined as
2089 static void spi_complete(void *arg)
2094 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2097 DECLARE_COMPLETION_ONSTACK(done);
2099 struct spi_master *master = spi->master;
2101 message->complete = spi_complete;
2102 message->context = &done;
2105 mutex_lock(&master->bus_lock_mutex);
2107 status = spi_async_locked(spi, message);
2110 mutex_unlock(&master->bus_lock_mutex);
2113 wait_for_completion(&done);
2114 status = message->status;
2116 message->context = NULL;
2121 * spi_sync - blocking/synchronous SPI data transfers
2122 * @spi: device with which data will be exchanged
2123 * @message: describes the data transfers
2124 * Context: can sleep
2126 * This call may only be used from a context that may sleep. The sleep
2127 * is non-interruptible, and has no timeout. Low-overhead controller
2128 * drivers may DMA directly into and out of the message buffers.
2130 * Note that the SPI device's chip select is active during the message,
2131 * and then is normally disabled between messages. Drivers for some
2132 * frequently-used devices may want to minimize costs of selecting a chip,
2133 * by leaving it selected in anticipation that the next message will go
2134 * to the same chip. (That may increase power usage.)
2136 * Also, the caller is guaranteeing that the memory associated with the
2137 * message will not be freed before this call returns.
2139 * It returns zero on success, else a negative error code.
2141 int spi_sync(struct spi_device *spi, struct spi_message *message)
2143 return __spi_sync(spi, message, 0);
2145 EXPORT_SYMBOL_GPL(spi_sync);
2148 * spi_sync_locked - version of spi_sync with exclusive bus usage
2149 * @spi: device with which data will be exchanged
2150 * @message: describes the data transfers
2151 * Context: can sleep
2153 * This call may only be used from a context that may sleep. The sleep
2154 * is non-interruptible, and has no timeout. Low-overhead controller
2155 * drivers may DMA directly into and out of the message buffers.
2157 * This call should be used by drivers that require exclusive access to the
2158 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2159 * be released by a spi_bus_unlock call when the exclusive access is over.
2161 * It returns zero on success, else a negative error code.
2163 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2165 return __spi_sync(spi, message, 1);
2167 EXPORT_SYMBOL_GPL(spi_sync_locked);
2170 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2171 * @master: SPI bus master that should be locked for exclusive bus access
2172 * Context: can sleep
2174 * This call may only be used from a context that may sleep. The sleep
2175 * is non-interruptible, and has no timeout.
2177 * This call should be used by drivers that require exclusive access to the
2178 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2179 * exclusive access is over. Data transfer must be done by spi_sync_locked
2180 * and spi_async_locked calls when the SPI bus lock is held.
2182 * It returns zero on success, else a negative error code.
2184 int spi_bus_lock(struct spi_master *master)
2186 unsigned long flags;
2188 mutex_lock(&master->bus_lock_mutex);
2190 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2191 master->bus_lock_flag = 1;
2192 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2194 /* mutex remains locked until spi_bus_unlock is called */
2198 EXPORT_SYMBOL_GPL(spi_bus_lock);
2201 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2202 * @master: SPI bus master that was locked for exclusive bus access
2203 * Context: can sleep
2205 * This call may only be used from a context that may sleep. The sleep
2206 * is non-interruptible, and has no timeout.
2208 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2211 * It returns zero on success, else a negative error code.
2213 int spi_bus_unlock(struct spi_master *master)
2215 master->bus_lock_flag = 0;
2217 mutex_unlock(&master->bus_lock_mutex);
2221 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2223 /* portable code must never pass more than 32 bytes */
2224 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2229 * spi_write_then_read - SPI synchronous write followed by read
2230 * @spi: device with which data will be exchanged
2231 * @txbuf: data to be written (need not be dma-safe)
2232 * @n_tx: size of txbuf, in bytes
2233 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2234 * @n_rx: size of rxbuf, in bytes
2235 * Context: can sleep
2237 * This performs a half duplex MicroWire style transaction with the
2238 * device, sending txbuf and then reading rxbuf. The return value
2239 * is zero for success, else a negative errno status code.
2240 * This call may only be used from a context that may sleep.
2242 * Parameters to this routine are always copied using a small buffer;
2243 * portable code should never use this for more than 32 bytes.
2244 * Performance-sensitive or bulk transfer code should instead use
2245 * spi_{async,sync}() calls with dma-safe buffers.
2247 int spi_write_then_read(struct spi_device *spi,
2248 const void *txbuf, unsigned n_tx,
2249 void *rxbuf, unsigned n_rx)
2251 static DEFINE_MUTEX(lock);
2254 struct spi_message message;
2255 struct spi_transfer x[2];
2258 /* Use preallocated DMA-safe buffer if we can. We can't avoid
2259 * copying here, (as a pure convenience thing), but we can
2260 * keep heap costs out of the hot path unless someone else is
2261 * using the pre-allocated buffer or the transfer is too large.
2263 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2264 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2265 GFP_KERNEL | GFP_DMA);
2272 spi_message_init(&message);
2273 memset(x, 0, sizeof(x));
2276 spi_message_add_tail(&x[0], &message);
2280 spi_message_add_tail(&x[1], &message);
2283 memcpy(local_buf, txbuf, n_tx);
2284 x[0].tx_buf = local_buf;
2285 x[1].rx_buf = local_buf + n_tx;
2288 status = spi_sync(spi, &message);
2290 memcpy(rxbuf, x[1].rx_buf, n_rx);
2292 if (x[0].tx_buf == buf)
2293 mutex_unlock(&lock);
2299 EXPORT_SYMBOL_GPL(spi_write_then_read);
2301 /*-------------------------------------------------------------------------*/
2303 static int __init spi_init(void)
2307 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2313 status = bus_register(&spi_bus_type);
2317 status = class_register(&spi_master_class);
2323 bus_unregister(&spi_bus_type);
2331 /* board_info is normally registered in arch_initcall(),
2332 * but even essential drivers wait till later
2334 * REVISIT only boardinfo really needs static linking. the rest (device and
2335 * driver registration) _could_ be dynamically linked (modular) ... costs
2336 * include needing to have boardinfo data structures be much more public.
2338 postcore_initcall(spi_init);