Merge remote-tracking branch 'spi/topic/dma' into spi-next
authorMark Brown <broonie@linaro.org>
Sun, 30 Mar 2014 00:50:56 +0000 (00:50 +0000)
committerMark Brown <broonie@linaro.org>
Sun, 30 Mar 2014 00:50:56 +0000 (00:50 +0000)
1  2 
drivers/spi/spi.c
include/linux/spi/spi.h

diff --combined drivers/spi/spi.c
@@@ -24,6 -24,8 +24,8 @@@
  #include <linux/device.h>
  #include <linux/init.h>
  #include <linux/cache.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
  #include <linux/mutex.h>
  #include <linux/of_device.h>
  #include <linux/of_irq.h>
@@@ -255,12 -257,13 +257,12 @@@ EXPORT_SYMBOL_GPL(spi_bus_type)
  static int spi_drv_probe(struct device *dev)
  {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 -      struct spi_device               *spi = to_spi_device(dev);
        int ret;
  
 -      acpi_dev_pm_attach(&spi->dev, true);
 -      ret = sdrv->probe(spi);
 +      acpi_dev_pm_attach(dev, true);
 +      ret = sdrv->probe(to_spi_device(dev));
        if (ret)
 -              acpi_dev_pm_detach(&spi->dev, true);
 +              acpi_dev_pm_detach(dev, true);
  
        return ret;
  }
  static int spi_drv_remove(struct device *dev)
  {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 -      struct spi_device               *spi = to_spi_device(dev);
        int ret;
  
 -      ret = sdrv->remove(spi);
 -      acpi_dev_pm_detach(&spi->dev, true);
 +      ret = sdrv->remove(to_spi_device(dev));
 +      acpi_dev_pm_detach(dev, true);
  
        return ret;
  }
@@@ -578,6 -582,169 +580,169 @@@ static void spi_set_cs(struct spi_devic
                spi->master->set_cs(spi, !enable);
  }
  
+ static int spi_map_buf(struct spi_master *master, struct device *dev,
+                      struct sg_table *sgt, void *buf, size_t len,
+                      enum dma_data_direction dir)
+ {
+       const bool vmalloced_buf = is_vmalloc_addr(buf);
+       const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+       const int sgs = DIV_ROUND_UP(len, desc_len);
+       struct page *vm_page;
+       void *sg_buf;
+       size_t min;
+       int i, ret;
+       ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+       if (ret != 0)
+               return ret;
+       for (i = 0; i < sgs; i++) {
+               min = min_t(size_t, len, desc_len);
+               if (vmalloced_buf) {
+                       vm_page = vmalloc_to_page(buf);
+                       if (!vm_page) {
+                               sg_free_table(sgt);
+                               return -ENOMEM;
+                       }
+                       sg_buf = page_address(vm_page) +
+                               ((size_t)buf & ~PAGE_MASK);
+               } else {
+                       sg_buf = buf;
+               }
+               sg_set_buf(&sgt->sgl[i], sg_buf, min);
+               buf += min;
+               len -= min;
+       }
+       ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+       if (ret < 0) {
+               sg_free_table(sgt);
+               return ret;
+       }
+       sgt->nents = ret;
+       return 0;
+ }
+ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+                         struct sg_table *sgt, enum dma_data_direction dir)
+ {
+       if (sgt->orig_nents) {
+               dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+               sg_free_table(sgt);
+       }
+ }
+ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+ {
+       struct device *tx_dev, *rx_dev;
+       struct spi_transfer *xfer;
+       void *tmp;
+       unsigned int max_tx, max_rx;
+       int ret;
+       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+               max_tx = 0;
+               max_rx = 0;
+               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+                       if ((master->flags & SPI_MASTER_MUST_TX) &&
+                           !xfer->tx_buf)
+                               max_tx = max(xfer->len, max_tx);
+                       if ((master->flags & SPI_MASTER_MUST_RX) &&
+                           !xfer->rx_buf)
+                               max_rx = max(xfer->len, max_rx);
+               }
+               if (max_tx) {
+                       tmp = krealloc(master->dummy_tx, max_tx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_tx = tmp;
+                       memset(tmp, 0, max_tx);
+               }
+               if (max_rx) {
+                       tmp = krealloc(master->dummy_rx, max_rx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_rx = tmp;
+               }
+               if (max_tx || max_rx) {
+                       list_for_each_entry(xfer, &msg->transfers,
+                                           transfer_list) {
+                               if (!xfer->tx_buf)
+                                       xfer->tx_buf = master->dummy_tx;
+                               if (!xfer->rx_buf)
+                                       xfer->rx_buf = master->dummy_rx;
+                       }
+               }
+       }
+       if (!master->can_dma)
+               return 0;
+       tx_dev = &master->dma_tx->dev->device;
+       rx_dev = &master->dma_rx->dev->device;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (!master->can_dma(master, msg->spi, xfer))
+                       continue;
+               if (xfer->tx_buf != NULL) {
+                       ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+                                         (void *)xfer->tx_buf, xfer->len,
+                                         DMA_TO_DEVICE);
+                       if (ret != 0)
+                               return ret;
+               }
+               if (xfer->rx_buf != NULL) {
+                       ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+                                         xfer->rx_buf, xfer->len,
+                                         DMA_FROM_DEVICE);
+                       if (ret != 0) {
+                               spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+                                             DMA_TO_DEVICE);
+                               return ret;
+                       }
+               }
+       }
+       master->cur_msg_mapped = true;
+       return 0;
+ }
+ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+ {
+       struct spi_transfer *xfer;
+       struct device *tx_dev, *rx_dev;
+       if (!master->cur_msg_mapped || !master->can_dma)
+               return 0;
+       tx_dev = &master->dma_tx->dev->device;
+       rx_dev = &master->dma_rx->dev->device;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (!master->can_dma(master, msg->spi, xfer))
+                       continue;
+               spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+               spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+       }
+       return 0;
+ }
  /*
   * spi_transfer_one_message - Default implementation of transfer_one_message()
   *
@@@ -589,6 -756,7 +754,6 @@@ static int spi_transfer_one_message(str
                                    struct spi_message *msg)
  {
        struct spi_transfer *xfer;
 -      bool cur_cs = true;
        bool keep_cs = false;
        int ret = 0;
  
                                         &msg->transfers)) {
                                keep_cs = true;
                        } else {
 -                              cur_cs = !cur_cs;
 -                              spi_set_cs(msg->spi, cur_cs);
 +                              spi_set_cs(msg->spi, false);
 +                              udelay(10);
 +                              spi_set_cs(msg->spi, true);
                        }
                }
  
@@@ -684,6 -851,10 +849,10 @@@ static void spi_pump_messages(struct kt
                }
                master->busy = false;
                spin_unlock_irqrestore(&master->queue_lock, flags);
+               kfree(master->dummy_rx);
+               master->dummy_rx = NULL;
+               kfree(master->dummy_tx);
+               master->dummy_tx = NULL;
                if (master->unprepare_transfer_hardware &&
                    master->unprepare_transfer_hardware(master))
                        dev_err(&master->dev,
                master->cur_msg_prepared = true;
        }
  
+       ret = spi_map_msg(master, master->cur_msg);
+       if (ret) {
+               master->cur_msg->status = ret;
+               spi_finalize_current_message(master);
+               return;
+       }
        ret = master->transfer_one_message(master, master->cur_msg);
        if (ret) {
                dev_err(&master->dev,
 -                      "failed to transfer one message from queue: %d\n", ret);
 -              master->cur_msg->status = ret;
 -              spi_finalize_current_message(master);
 +                      "failed to transfer one message from queue\n");
                return;
        }
  }
@@@ -837,6 -1017,8 +1013,8 @@@ void spi_finalize_current_message(struc
        queue_kthread_work(&master->kworker, &master->pump_messages);
        spin_unlock_irqrestore(&master->queue_lock, flags);
  
+       spi_unmap_msg(master, mesg);
        if (master->cur_msg_prepared && master->unprepare_message) {
                ret = master->unprepare_message(master, mesg);
                if (ret) {
@@@ -890,7 -1072,7 +1068,7 @@@ static int spi_stop_queue(struct spi_ma
         */
        while ((!list_empty(&master->queue) || master->busy) && limit--) {
                spin_unlock_irqrestore(&master->queue_lock, flags);
 -              msleep(10);
 +              usleep_range(10000, 11000);
                spin_lock_irqsave(&master->queue_lock, flags);
        }
  
@@@ -1370,6 -1552,8 +1548,8 @@@ int spi_register_master(struct spi_mast
        mutex_init(&master->bus_lock_mutex);
        master->bus_lock_flag = 0;
        init_completion(&master->xfer_completion);
+       if (!master->max_dma_len)
+               master->max_dma_len = INT_MAX;
  
        /* register the device, then userspace will see it.
         * registration fails if the bus ID is in use.
@@@ -1615,10 -1799,11 +1795,10 @@@ static int __spi_validate(struct spi_de
  {
        struct spi_master *master = spi->master;
        struct spi_transfer *xfer;
 +      int w_size;
  
        if (list_empty(&message->transfers))
                return -EINVAL;
 -      if (!message->complete)
 -              return -EINVAL;
  
        /* Half-duplex links include original MicroWire, and ones with
         * only one data pin like SPI_3WIRE (switches direction) or where
                message->frame_length += xfer->len;
                if (!xfer->bits_per_word)
                        xfer->bits_per_word = spi->bits_per_word;
 -              if (!xfer->speed_hz) {
 +
 +              if (!xfer->speed_hz)
                        xfer->speed_hz = spi->max_speed_hz;
 -                      if (master->max_speed_hz &&
 -                          xfer->speed_hz > master->max_speed_hz)
 -                              xfer->speed_hz = master->max_speed_hz;
 -              }
 +
 +              if (master->max_speed_hz &&
 +                  xfer->speed_hz > master->max_speed_hz)
 +                      xfer->speed_hz = master->max_speed_hz;
  
                if (master->bits_per_word_mask) {
                        /* Only 32 bits fit in the mask */
                                return -EINVAL;
                }
  
 +              /*
 +               * SPI transfer length should be multiple of SPI word size
 +               * where SPI word size should be power-of-two multiple
 +               */
 +              if (xfer->bits_per_word <= 8)
 +                      w_size = 1;
 +              else if (xfer->bits_per_word <= 16)
 +                      w_size = 2;
 +              else
 +                      w_size = 4;
 +
 +              /* No partial transfers accepted */
 +              if (xfer->len % w_size)
 +                      return -EINVAL;
 +
                if (xfer->speed_hz && master->min_speed_hz &&
                    xfer->speed_hz < master->min_speed_hz)
                        return -EINVAL;
 -              if (xfer->speed_hz && master->max_speed_hz &&
 -                  xfer->speed_hz > master->max_speed_hz)
 -                      return -EINVAL;
  
                if (xfer->tx_buf && !xfer->tx_nbits)
                        xfer->tx_nbits = SPI_NBITS_SINGLE;
diff --combined include/linux/spi/spi.h
@@@ -24,6 -24,9 +24,9 @@@
  #include <linux/slab.h>
  #include <linux/kthread.h>
  #include <linux/completion.h>
+ #include <linux/scatterlist.h>
+ struct dma_chan;
  
  /*
   * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@@ -266,6 -269,7 +269,7 @@@ static inline void spi_unregister_drive
   * @auto_runtime_pm: the core should ensure a runtime PM reference is held
   *                   while the hardware is prepared, using the parent
   *                   device for the spidev
+  * @max_dma_len: Maximum length of a DMA transfer for the device.
   * @prepare_transfer_hardware: a message will soon arrive from the queue
   *    so the subsystem requests the driver to prepare the transfer hardware
   *    by issuing this call
   *    message while queuing transfers that arrive in the meantime. When the
   *    driver is finished with this message, it must call
   *    spi_finalize_current_message() so the subsystem can issue the next
 - *    transfer
 + *    message
   * @unprepare_transfer_hardware: there are currently no more messages on the
   *    queue so the subsystem notifies the driver that it may relax the
   *    hardware by issuing this call
   *                  - return 1 if the transfer is still in progress. When
   *                    the driver is finished with this transfer it must
   *                    call spi_finalize_current_transfer() so the subsystem
 - *                    can issue the next transfer
 + *                    can issue the next transfer. Note: transfer_one and
 + *                    transfer_one_message are mutually exclusive; when both
 + *                    are set, the generic subsystem does not call your
 + *                    transfer_one callback.
   * @unprepare_message: undo any work done by prepare_message().
   * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
   *    number. Any individual value may be -ENOENT for CS lines that
@@@ -348,6 -349,8 +352,8 @@@ struct spi_master 
  #define SPI_MASTER_HALF_DUPLEX        BIT(0)          /* can't do full duplex */
  #define SPI_MASTER_NO_RX      BIT(1)          /* can't do buffer read */
  #define SPI_MASTER_NO_TX      BIT(2)          /* can't do buffer write */
+ #define SPI_MASTER_MUST_RX      BIT(3)                /* requires rx */
+ #define SPI_MASTER_MUST_TX      BIT(4)                /* requires tx */
  
        /* lock and mutex for SPI bus locking */
        spinlock_t              bus_lock_spinlock;
        /* called on release() to free memory provided by spi_master */
        void                    (*cleanup)(struct spi_device *spi);
  
+       /*
+        * Used to enable core support for DMA handling, if can_dma()
+        * exists and returns true then the transfer will be mapped
+        * prior to transfer_one() being called.  The driver should
+        * not modify or store xfer and dma_tx and dma_rx must be set
+        * while the device is prepared.
+        */
+       bool                    (*can_dma)(struct spi_master *master,
+                                          struct spi_device *spi,
+                                          struct spi_transfer *xfer);
        /*
         * These hooks are for drivers that want to use the generic
         * master transfer queueing mechanism. If these are used, the
        bool                            rt;
        bool                            auto_runtime_pm;
        bool                            cur_msg_prepared;
+       bool                            cur_msg_mapped;
        struct completion               xfer_completion;
+       size_t                          max_dma_len;
  
        int (*prepare_transfer_hardware)(struct spi_master *master);
        int (*transfer_one_message)(struct spi_master *master,
  
        /* gpio chip select */
        int                     *cs_gpios;
+       /* DMA channels for use with core dmaengine helpers */
+       struct dma_chan         *dma_tx;
+       struct dma_chan         *dma_rx;
+       /* dummy data for full duplex devices */
+       void                    *dummy_rx;
+       void                    *dummy_tx;
  };
  
  static inline void *spi_master_get_devdata(struct spi_master *master)
@@@ -512,6 -536,8 +539,8 @@@ extern struct spi_master *spi_busnum_to
   *    (optionally) changing the chipselect status, then starting
   *    the next transfer or completing this @spi_message.
   * @transfer_list: transfers are sequenced through @spi_message.transfers
+  * @tx_sg: Scatterlist for transmit, currently not for client use
+  * @rx_sg: Scatterlist for receive, currently not for client use
   *
   * SPI transfers always write the same number of bytes as they read.
   * Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@@ -579,6 -605,8 +608,8 @@@ struct spi_transfer 
  
        dma_addr_t      tx_dma;
        dma_addr_t      rx_dma;
+       struct sg_table tx_sg;
+       struct sg_table rx_sg;
  
        unsigned        cs_change:1;
        unsigned        tx_nbits:3;