mmc: sdhci: avoid walking SG list for writes
authorRussell King <rmk+kernel@arm.linux.org.uk>
Tue, 26 Jan 2016 13:40:06 +0000 (13:40 +0000)
committerUlf Hansson <ulf.hansson@linaro.org>
Mon, 29 Feb 2016 10:03:19 +0000 (11:03 +0100)
If we are writing data to the card, there is no point in walking the
scatterlist to find out if there are any unaligned entries; this is a
needless waste of CPU cycles.  Avoid this by checking for a non-read
tranfer first.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
drivers/mmc/host/sdhci.c

index bc645bb..b28aa0f 100644 (file)
@@ -559,37 +559,39 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
        void *align;
        char *buffer;
        unsigned long flags;
-       bool has_unaligned;
 
        if (data->flags & MMC_DATA_READ)
                direction = DMA_FROM_DEVICE;
        else
                direction = DMA_TO_DEVICE;
 
-       /* Do a quick scan of the SG list for any unaligned mappings */
-       has_unaligned = false;
-       for_each_sg(data->sg, sg, host->sg_count, i)
-               if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
-                       has_unaligned = true;
-                       break;
-               }
+       if (data->flags & MMC_DATA_READ) {
+               bool has_unaligned = false;
 
-       if (has_unaligned && data->flags & MMC_DATA_READ) {
-               dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
-                       data->sg_len, direction);
+               /* Do a quick scan of the SG list for any unaligned mappings */
+               for_each_sg(data->sg, sg, host->sg_count, i)
+                       if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+                               has_unaligned = true;
+                               break;
+                       }
 
-               align = host->align_buffer;
+               if (has_unaligned) {
+                       dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+                               data->sg_len, direction);
 
-               for_each_sg(data->sg, sg, host->sg_count, i) {
-                       if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
-                               size = SDHCI_ADMA2_ALIGN -
-                                      (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
+                       align = host->align_buffer;
 
-                               buffer = sdhci_kmap_atomic(sg, &flags);
-                               memcpy(buffer, align, size);
-                               sdhci_kunmap_atomic(buffer, &flags);
+                       for_each_sg(data->sg, sg, host->sg_count, i) {
+                               if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+                                       size = SDHCI_ADMA2_ALIGN -
+                                              (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
 
-                               align += SDHCI_ADMA2_ALIGN;
+                                       buffer = sdhci_kmap_atomic(sg, &flags);
+                                       memcpy(buffer, align, size);
+                                       sdhci_kunmap_atomic(buffer, &flags);
+
+                                       align += SDHCI_ADMA2_ALIGN;
+                               }
                        }
                }
        }