2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DMA Coherent API Notes
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
16 * The default DMA address == Phy address which is 0x8000_0000 based.
19 #include <linux/dma-mapping.h>
20 #include <asm/cache.h>
21 #include <asm/cacheflush.h>
24 static void *arc_dma_alloc(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
27 unsigned long order = get_order(size);
32 page = alloc_pages(gfp, order);
36 /* This is linear addr (0x8000_0000 based) */
37 paddr = page_to_phys(page);
39 /* For now bus address is exactly same as paddr */
43 * IOC relies on all data (even coherent DMA data) being in cache
44 * Thus allocate normal cached memory
46 * The gains with IOC are two pronged:
47 * -For streaming data, elides needs for cache maintenance, saving
48 * cycles in flush code, and bus bandwidth as all the lines of a
49 * buffer need to be flushed out to memory
50 * -For coherent data, Read/Write to buffers terminate early in cache
51 * (vs. always going to memory - thus are faster)
53 if ((is_isa_arcv2() && ioc_exists) ||
54 dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
57 /* This is kernel Virtual address (0x7000_0000 based) */
58 kvaddr = ioremap_nocache((unsigned long)paddr, size);
60 __free_pages(page, order);
65 * Evict any existing L1 and/or L2 lines for the backing page
66 * in case it was used earlier as a normal "cached" page.
67 * Yeah this bit us - STAR 9000898266
69 * Although core does call flush_cache_vmap(), it gets kvaddr hence
70 * can't be used to efficiently flush L1 and/or L2 which need paddr
71 * Currently flush_cache_vmap nukes the L1 cache completely which
72 * will be optimized as a separate commit
74 dma_cache_wback_inv((unsigned long)paddr, size);
79 static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
80 dma_addr_t dma_handle, struct dma_attrs *attrs)
82 struct page *page = virt_to_page(dma_handle);
84 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
85 !(is_isa_arcv2() && ioc_exists))
86 iounmap((void __force __iomem *)vaddr);
88 __free_pages(page, get_order(size));
92 * streaming DMA Mapping API...
93 * CPU accesses page via normal paddr, thus needs to explicitly made
94 * consistent before each use
96 static void _dma_cache_sync(unsigned long paddr, size_t size,
97 enum dma_data_direction dir)
100 case DMA_FROM_DEVICE:
101 dma_cache_inv(paddr, size);
104 dma_cache_wback(paddr, size);
106 case DMA_BIDIRECTIONAL:
107 dma_cache_wback_inv(paddr, size);
110 pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
114 static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
115 unsigned long offset, size_t size, enum dma_data_direction dir,
116 struct dma_attrs *attrs)
118 unsigned long paddr = page_to_phys(page) + offset;
119 _dma_cache_sync(paddr, size, dir);
120 return (dma_addr_t)paddr;
123 static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
124 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
126 struct scatterlist *s;
129 for_each_sg(sg, s, nents, i)
130 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
136 static void arc_dma_sync_single_for_cpu(struct device *dev,
137 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
139 _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
142 static void arc_dma_sync_single_for_device(struct device *dev,
143 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
145 _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
148 static void arc_dma_sync_sg_for_cpu(struct device *dev,
149 struct scatterlist *sglist, int nelems,
150 enum dma_data_direction dir)
153 struct scatterlist *sg;
155 for_each_sg(sglist, sg, nelems, i)
156 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
159 static void arc_dma_sync_sg_for_device(struct device *dev,
160 struct scatterlist *sglist, int nelems,
161 enum dma_data_direction dir)
164 struct scatterlist *sg;
166 for_each_sg(sglist, sg, nelems, i)
167 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
170 static int arc_dma_supported(struct device *dev, u64 dma_mask)
172 /* Support 32 bit DMA mask exclusively */
173 return dma_mask == DMA_BIT_MASK(32);
176 struct dma_map_ops arc_dma_ops = {
177 .alloc = arc_dma_alloc,
178 .free = arc_dma_free,
179 .map_page = arc_dma_map_page,
180 .map_sg = arc_dma_map_sg,
181 .sync_single_for_device = arc_dma_sync_single_for_device,
182 .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
183 .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
184 .sync_sg_for_device = arc_dma_sync_sg_for_device,
185 .dma_supported = arc_dma_supported,
187 EXPORT_SYMBOL(arc_dma_ops);