ARC: module: print pretty section names
[cascardo/linux.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
1 /*
2  * Copyright (C) 2016 Etnaviv Project
3   *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/iommu.h>
18 #include <linux/platform_device.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/bitops.h>
23
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
29
30 #define MMUv2_PTE_PRESENT               BIT(0)
31 #define MMUv2_PTE_EXCEPTION             BIT(1)
32 #define MMUv2_PTE_WRITEABLE             BIT(2)
33
34 #define MMUv2_MTLB_MASK                 0xffc00000
35 #define MMUv2_MTLB_SHIFT                22
36 #define MMUv2_STLB_MASK                 0x003ff000
37 #define MMUv2_STLB_SHIFT                12
38
39 #define MMUv2_MAX_STLB_ENTRIES          1024
40
41 struct etnaviv_iommuv2_domain {
42         struct iommu_domain domain;
43         struct device *dev;
44         void *bad_page_cpu;
45         dma_addr_t bad_page_dma;
46         /* M(aster) TLB aka first level pagetable */
47         u32 *mtlb_cpu;
48         dma_addr_t mtlb_dma;
49         /* S(lave) TLB aka second level pagetable */
50         u32 *stlb_cpu[1024];
51         dma_addr_t stlb_dma[1024];
52 };
53
54 static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
55 {
56         return container_of(domain, struct etnaviv_iommuv2_domain, domain);
57 }
58
59 static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
60            phys_addr_t paddr, size_t size, int prot)
61 {
62         struct etnaviv_iommuv2_domain *etnaviv_domain =
63                         to_etnaviv_domain(domain);
64         int mtlb_entry, stlb_entry;
65         u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
66
67         if (size != SZ_4K)
68                 return -EINVAL;
69
70         if (prot & IOMMU_WRITE)
71                 entry |= MMUv2_PTE_WRITEABLE;
72
73         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
74         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
75
76         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
77
78         return 0;
79 }
80
81 static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
82         unsigned long iova, size_t size)
83 {
84         struct etnaviv_iommuv2_domain *etnaviv_domain =
85                         to_etnaviv_domain(domain);
86         int mtlb_entry, stlb_entry;
87
88         if (size != SZ_4K)
89                 return -EINVAL;
90
91         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
92         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
93
94         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
95
96         return SZ_4K;
97 }
98
99 static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
100         dma_addr_t iova)
101 {
102         struct etnaviv_iommuv2_domain *etnaviv_domain =
103                         to_etnaviv_domain(domain);
104         int mtlb_entry, stlb_entry;
105
106         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
107         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
108
109         return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
110 }
111
112 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
113 {
114         u32 *p;
115         int ret, i, j;
116
117         /* allocate scratch page */
118         etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
119                                                   SZ_4K,
120                                                   &etnaviv_domain->bad_page_dma,
121                                                   GFP_KERNEL);
122         if (!etnaviv_domain->bad_page_cpu) {
123                 ret = -ENOMEM;
124                 goto fail_mem;
125         }
126         p = etnaviv_domain->bad_page_cpu;
127         for (i = 0; i < SZ_4K / 4; i++)
128                 *p++ = 0xdead55aa;
129
130         etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
131                                                   SZ_4K,
132                                                   &etnaviv_domain->mtlb_dma,
133                                                   GFP_KERNEL);
134         if (!etnaviv_domain->mtlb_cpu) {
135                 ret = -ENOMEM;
136                 goto fail_mem;
137         }
138
139         /* pre-populate STLB pages (may want to switch to on-demand later) */
140         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
141                 etnaviv_domain->stlb_cpu[i] =
142                                 dma_alloc_coherent(etnaviv_domain->dev,
143                                                    SZ_4K,
144                                                    &etnaviv_domain->stlb_dma[i],
145                                                    GFP_KERNEL);
146                 if (!etnaviv_domain->stlb_cpu[i]) {
147                         ret = -ENOMEM;
148                         goto fail_mem;
149                 }
150                 p = etnaviv_domain->stlb_cpu[i];
151                 for (j = 0; j < SZ_4K / 4; j++)
152                         *p++ = MMUv2_PTE_EXCEPTION;
153
154                 etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
155                                               MMUv2_PTE_PRESENT;
156         }
157
158         return 0;
159
160 fail_mem:
161         if (etnaviv_domain->bad_page_cpu)
162                 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
163                                   etnaviv_domain->bad_page_cpu,
164                                   etnaviv_domain->bad_page_dma);
165
166         if (etnaviv_domain->mtlb_cpu)
167                 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
168                                   etnaviv_domain->mtlb_cpu,
169                                   etnaviv_domain->mtlb_dma);
170
171         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
172                 if (etnaviv_domain->stlb_cpu[i])
173                         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
174                                           etnaviv_domain->stlb_cpu[i],
175                                           etnaviv_domain->stlb_dma[i]);
176         }
177
178         return ret;
179 }
180
181 static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
182 {
183         struct etnaviv_iommuv2_domain *etnaviv_domain =
184                         to_etnaviv_domain(domain);
185         int i;
186
187         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
188                           etnaviv_domain->bad_page_cpu,
189                           etnaviv_domain->bad_page_dma);
190
191         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
192                           etnaviv_domain->mtlb_cpu,
193                           etnaviv_domain->mtlb_dma);
194
195         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
196                 if (etnaviv_domain->stlb_cpu[i])
197                         dma_free_coherent(etnaviv_domain->dev, SZ_4K,
198                                           etnaviv_domain->stlb_cpu[i],
199                                           etnaviv_domain->stlb_dma[i]);
200         }
201
202         vfree(etnaviv_domain);
203 }
204
205 static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
206 {
207         struct etnaviv_iommuv2_domain *etnaviv_domain =
208                         to_etnaviv_domain(domain);
209         size_t dump_size = SZ_4K;
210         int i;
211
212         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
213                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
214                         dump_size += SZ_4K;
215
216         return dump_size;
217 }
218
219 static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
220 {
221         struct etnaviv_iommuv2_domain *etnaviv_domain =
222                         to_etnaviv_domain(domain);
223         int i;
224
225         memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
226         buf += SZ_4K;
227         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
228                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
229                         memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
230 }
231
232 static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
233         .ops = {
234                 .domain_free = etnaviv_iommuv2_domain_free,
235                 .map = etnaviv_iommuv2_map,
236                 .unmap = etnaviv_iommuv2_unmap,
237                 .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
238                 .pgsize_bitmap = SZ_4K,
239         },
240         .dump_size = etnaviv_iommuv2_dump_size,
241         .dump = etnaviv_iommuv2_dump,
242 };
243
244 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
245 {
246         struct etnaviv_iommuv2_domain *etnaviv_domain =
247                         to_etnaviv_domain(gpu->mmu->domain);
248         u16 prefetch;
249
250         /* If the MMU is already enabled the state is still there. */
251         if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
252                 return;
253
254         prefetch = etnaviv_buffer_config_mmuv2(gpu,
255                                 (u32)etnaviv_domain->mtlb_dma,
256                                 (u32)etnaviv_domain->bad_page_dma);
257         etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
258         etnaviv_gpu_wait_idle(gpu, 100);
259
260         gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
261 }
262 struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
263 {
264         struct etnaviv_iommuv2_domain *etnaviv_domain;
265         int ret;
266
267         etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
268         if (!etnaviv_domain)
269                 return NULL;
270
271         etnaviv_domain->dev = gpu->dev;
272
273         etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
274         etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
275         etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
276         etnaviv_domain->domain.geometry.aperture_start = 0;
277         etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
278
279         ret = etnaviv_iommuv2_init(etnaviv_domain);
280         if (ret)
281                 goto out_free;
282
283         return &etnaviv_domain->domain;
284
285 out_free:
286         vfree(etnaviv_domain);
287         return NULL;
288 }