3ce9dc1efb0c222586b56fd9a8a0fde78ad0e864
[cascardo/linux.git] / arch / arm / xen / mm32.c
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/gfp.h>
4 #include <linux/highmem.h>
5
6 #include <xen/features.h>
7 enum dma_cache_op {
8        DMA_UNMAP,
9        DMA_MAP,
10 };
11
12 /* functions called by SWIOTLB */
13
14 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
15         size_t size, enum dma_data_direction dir, enum dma_cache_op op)
16 {
17         unsigned long pfn;
18         size_t left = size;
19
20         pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
21         offset %= PAGE_SIZE;
22
23         do {
24                 size_t len = left;
25         
26                 /* TODO: cache flush */
27
28                 offset = 0;
29                 pfn++;
30                 left -= len;
31         } while (left);
32 }
33
34 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
35                 size_t size, enum dma_data_direction dir)
36 {
37         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
38 }
39
40 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
41                 size_t size, enum dma_data_direction dir)
42 {
43         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
44 }
45
46 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
47                 size_t size, enum dma_data_direction dir,
48                 struct dma_attrs *attrs)
49
50 {
51         if (is_device_dma_coherent(hwdev))
52                 return;
53         if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
54                 return;
55
56         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
57 }
58
59 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
60                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
61 {
62         if (is_device_dma_coherent(hwdev))
63                 return;
64         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
65 }
66
67 void __xen_dma_sync_single_for_device(struct device *hwdev,
68                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
69 {
70         if (is_device_dma_coherent(hwdev))
71                 return;
72         __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
73 }
74
75 int __init xen_mm32_init(void)
76 {
77         if (!xen_initial_domain())
78                 return 0;
79
80         return 0;
81 }
82 arch_initcall(xen_mm32_init);