2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
12 * the kernels original.
14 #include <linux/types.h>
16 #include <linux/module.h>
17 #include <linux/string.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
22 #include <linux/cache.h>
25 #include <asm/octeon/octeon.h>
26 #include <asm/octeon/cvmx-npi-defs.h>
27 #include <asm/octeon/cvmx-pci-defs.h>
29 #include <dma-coherence.h>
32 #include <asm/octeon/pci-octeon.h>
35 #define BAR2_PCI_ADDRESS 0x8000000000ul
37 struct bar1_index_state {
38 int16_t ref_count; /* Number of PCI mappings using this index */
39 uint16_t address_bits; /* Upper bits of physical address. This is
44 static DEFINE_RAW_SPINLOCK(bar1_lock);
45 static struct bar1_index_state bar1_state[32];
48 dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
51 /* Without PCI/PCIe this function can be called for Octeon internal
52 devices such as USB. These devices all support 64bit addressing */
54 return virt_to_phys(ptr);
59 dma_addr_t result = -1;
60 uint64_t physical = virt_to_phys(ptr);
65 * Use the DMA masks to determine the allowed memory
66 * region. For us it doesn't limit the actual memory, just the
67 * address visible over PCI. Devices with limits need to use
68 * lower indexed Bar1 entries.
71 dma_mask = dev->coherent_dma_mask;
73 dma_mask = *dev->dma_mask;
75 dma_mask = 0xfffffffful;
79 * Platform devices, such as the internal USB, skip all
80 * translation and use Octeon physical addresses directly.
82 if (!dev || dev->bus == &platform_bus_type)
85 switch (octeon_dma_bar_type) {
86 case OCTEON_DMA_BAR_TYPE_PCIE:
87 if (unlikely(physical < (16ul << 10)))
88 panic("dma_map_single: Not allowed to map first 16KB."
89 " It interferes with BAR0 special area\n");
90 else if ((physical + size >= (256ul << 20)) &&
91 (physical < (512ul << 20)))
92 panic("dma_map_single: Not allowed to map bootbus\n");
93 else if ((physical + size >= 0x400000000ull) &&
94 physical < 0x410000000ull)
95 panic("dma_map_single: "
96 "Attempt to map illegal memory address 0x%llx\n",
98 else if (physical >= 0x420000000ull)
99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n",
102 else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
103 physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
104 result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
106 if (((result+size-1) & dma_mask) != result+size-1)
107 panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
108 physical, physical+size-1, dma_mask);
112 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
113 if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
114 result = physical - 0x400000000ull;
117 if (((result+size-1) & dma_mask) != result+size-1)
118 panic("dma_map_single: Attempt to map address "
119 "0x%llx-0x%llx, which can't be accessed "
120 "according to the dma mask 0x%llx\n",
121 physical, physical+size-1, dma_mask);
124 case OCTEON_DMA_BAR_TYPE_BIG:
126 /* If the device supports 64bit addressing, then use BAR2 */
127 if (dma_mask > BAR2_PCI_ADDRESS) {
128 result = physical + BAR2_PCI_ADDRESS;
132 if (unlikely(physical < (4ul << 10))) {
133 panic("dma_map_single: Not allowed to map first 4KB. "
134 "It interferes with BAR0 special area\n");
135 } else if (physical < (256ul << 20)) {
136 if (unlikely(physical + size > (256ul << 20)))
137 panic("dma_map_single: Requested memory spans "
138 "Bar0 0:256MB and bootbus\n");
141 } else if (unlikely(physical < (512ul << 20))) {
142 panic("dma_map_single: Not allowed to map bootbus\n");
143 } else if (physical < (2ul << 30)) {
144 if (unlikely(physical + size > (2ul << 30)))
145 panic("dma_map_single: Requested memory spans "
146 "Bar0 512MB:2GB and BAR1\n");
149 } else if (physical < (2ul << 30) + (128 << 20)) {
151 } else if (physical <
152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
155 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
156 panic("dma_map_single: Requested memory "
157 "extends past Bar1 (4GB-%luMB)\n",
158 OCTEON_PCI_BAR1_HOLE_SIZE);
161 } else if ((physical >= 0x410000000ull) &&
162 (physical < 0x420000000ull)) {
163 if (unlikely(physical + size > 0x420000000ull))
164 panic("dma_map_single: Requested memory spans "
165 "non existant memory\n");
166 /* BAR0 fixed mapping 256MB:512MB ->
167 * 16GB+256MB:16GB+512MB */
168 result = physical - 0x400000000ull;
171 /* Continued below switch statement */
175 case OCTEON_DMA_BAR_TYPE_SMALL:
177 /* If the device supports 64bit addressing, then use BAR2 */
178 if (dma_mask > BAR2_PCI_ADDRESS) {
179 result = physical + BAR2_PCI_ADDRESS;
183 /* Continued below switch statement */
187 panic("dma_map_single: Invalid octeon_dma_bar_type\n");
190 /* Don't allow mapping to span multiple Bar entries. The hardware guys
191 won't guarantee that DMA across boards work */
192 if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
193 panic("dma_map_single: "
194 "Requested memory spans more than one Bar1 entry\n");
196 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
198 else if (unlikely(dma_mask < (1ul << 27)))
199 start_index = (dma_mask >> 22);
203 /* Only one processor can access the Bar register at once */
204 raw_spin_lock_irqsave(&bar1_lock, flags);
206 /* Look through Bar1 for existing mapping that will work */
207 for (index = start_index; index >= 0; index--) {
208 if ((bar1_state[index].address_bits == physical >> 22) &&
209 (bar1_state[index].ref_count)) {
210 /* An existing mapping will work, use it */
211 bar1_state[index].ref_count++;
212 if (unlikely(bar1_state[index].ref_count < 0))
213 panic("dma_map_single: "
214 "Bar1[%d] reference count overflowed\n",
216 result = (index << 22) | (physical & ((1 << 22) - 1));
217 /* Large BAR1 is offset at 2GB */
218 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
224 /* No existing mappings, look for a free entry */
225 for (index = start_index; index >= 0; index--) {
226 if (unlikely(bar1_state[index].ref_count == 0)) {
227 union cvmx_pci_bar1_indexx bar1_index;
228 /* We have a free entry, use it */
229 bar1_state[index].ref_count = 1;
230 bar1_state[index].address_bits = physical >> 22;
232 /* Address bits[35:22] sent to L2C */
233 bar1_index.s.addr_idx = physical >> 22;
234 /* Don't put PCI accesses in L2. */
236 /* Endian Swap Mode */
237 bar1_index.s.end_swp = 1;
238 /* Set '1' when the selected address range is valid. */
239 bar1_index.s.addr_v = 1;
240 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
242 /* An existing mapping will work, use it */
243 result = (index << 22) | (physical & ((1 << 22) - 1));
244 /* Large BAR1 is offset at 2GB */
245 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
251 pr_err("dma_map_single: "
252 "Can't find empty BAR1 index for physical mapping 0x%llx\n",
253 (unsigned long long) physical);
256 raw_spin_unlock_irqrestore(&bar1_lock, flags);
258 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result);
263 void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
267 * Without PCI/PCIe this function can be called for Octeon internal
268 * devices such as USB. These devices all support 64bit addressing.
276 * Platform devices, such as the internal USB, skip all
277 * translation and use Octeon physical addresses directly.
279 if (dev->bus == &platform_bus_type)
282 switch (octeon_dma_bar_type) {
283 case OCTEON_DMA_BAR_TYPE_PCIE:
284 /* Nothing to do, all mappings are static */
287 case OCTEON_DMA_BAR_TYPE_BIG:
289 /* Nothing to do for addresses using BAR2 */
290 if (dma_addr >= BAR2_PCI_ADDRESS)
293 if (unlikely(dma_addr < (4ul << 10)))
294 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
296 else if (dma_addr < (2ul << 30))
297 /* Nothing to do for addresses using BAR0 */
299 else if (dma_addr < (2ul << 30) + (128ul << 20))
300 /* Need to unmap, fall through */
301 index = (dma_addr - (2ul << 30)) >> 22;
303 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
304 goto done; /* Nothing to do for the rest of BAR1 */
306 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
308 /* Continued below switch statement */
311 case OCTEON_DMA_BAR_TYPE_SMALL:
313 /* Nothing to do for addresses using BAR2 */
314 if (dma_addr >= BAR2_PCI_ADDRESS)
317 index = dma_addr >> 22;
318 /* Continued below switch statement */
322 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
325 if (unlikely(index > 31))
326 panic("dma_unmap_single: "
327 "Attempt to unmap an invalid address (0x%llx)\n",
330 raw_spin_lock_irqsave(&bar1_lock, flags);
331 bar1_state[index].ref_count--;
332 if (bar1_state[index].ref_count == 0)
333 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
334 else if (unlikely(bar1_state[index].ref_count < 0))
335 panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
337 raw_spin_unlock_irqrestore(&bar1_lock, flags);
339 pr_debug("dma_unmap_single 0x%llx\n", dma_addr);