driver core: Make Kconfig text for DEBUG_TEST_DRIVER_REMOVE stronger
[cascardo/linux.git] / drivers / gpu / drm / nouveau / nouveau_ttm.c
1 /*
2  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3  * All Rights Reserved.
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sub license,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26
27 #include "nouveau_drv.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
30
31 #include "drm_legacy.h"
32
33 #include <core/tegra.h>
34
35 static int
36 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37 {
38         struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39         struct nvkm_fb *fb = nvxx_fb(&drm->device);
40         man->priv = fb;
41         return 0;
42 }
43
44 static int
45 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
46 {
47         man->priv = NULL;
48         return 0;
49 }
50
51 static inline void
52 nvkm_mem_node_cleanup(struct nvkm_mem *node)
53 {
54         if (node->vma[0].node) {
55                 nvkm_vm_unmap(&node->vma[0]);
56                 nvkm_vm_put(&node->vma[0]);
57         }
58
59         if (node->vma[1].node) {
60                 nvkm_vm_unmap(&node->vma[1]);
61                 nvkm_vm_put(&node->vma[1]);
62         }
63 }
64
65 static void
66 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
67                          struct ttm_mem_reg *mem)
68 {
69         struct nouveau_drm *drm = nouveau_bdev(man->bdev);
70         struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
71         nvkm_mem_node_cleanup(mem->mm_node);
72         ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
73 }
74
75 static int
76 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
77                          struct ttm_buffer_object *bo,
78                          const struct ttm_place *place,
79                          struct ttm_mem_reg *mem)
80 {
81         struct nouveau_drm *drm = nouveau_bdev(man->bdev);
82         struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
83         struct nouveau_bo *nvbo = nouveau_bo(bo);
84         struct nvkm_mem *node;
85         u32 size_nc = 0;
86         int ret;
87
88         if (drm->device.info.ram_size == 0)
89                 return -ENOMEM;
90
91         if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
92                 size_nc = 1 << nvbo->page_shift;
93
94         ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
95                              mem->page_alignment << PAGE_SHIFT, size_nc,
96                              (nvbo->tile_flags >> 8) & 0x3ff, &node);
97         if (ret) {
98                 mem->mm_node = NULL;
99                 return (ret == -ENOSPC) ? 0 : ret;
100         }
101
102         node->page_shift = nvbo->page_shift;
103
104         mem->mm_node = node;
105         mem->start   = node->offset >> PAGE_SHIFT;
106         return 0;
107 }
108
109 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
110         nouveau_vram_manager_init,
111         nouveau_vram_manager_fini,
112         nouveau_vram_manager_new,
113         nouveau_vram_manager_del,
114 };
115
116 static int
117 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
118 {
119         return 0;
120 }
121
122 static int
123 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
124 {
125         return 0;
126 }
127
128 static void
129 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
130                          struct ttm_mem_reg *mem)
131 {
132         nvkm_mem_node_cleanup(mem->mm_node);
133         kfree(mem->mm_node);
134         mem->mm_node = NULL;
135 }
136
137 static int
138 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
139                          struct ttm_buffer_object *bo,
140                          const struct ttm_place *place,
141                          struct ttm_mem_reg *mem)
142 {
143         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
144         struct nouveau_bo *nvbo = nouveau_bo(bo);
145         struct nvkm_mem *node;
146
147         node = kzalloc(sizeof(*node), GFP_KERNEL);
148         if (!node)
149                 return -ENOMEM;
150
151         node->page_shift = 12;
152
153         switch (drm->device.info.family) {
154         case NV_DEVICE_INFO_V0_TNT:
155         case NV_DEVICE_INFO_V0_CELSIUS:
156         case NV_DEVICE_INFO_V0_KELVIN:
157         case NV_DEVICE_INFO_V0_RANKINE:
158         case NV_DEVICE_INFO_V0_CURIE:
159                 break;
160         case NV_DEVICE_INFO_V0_TESLA:
161                 if (drm->device.info.chipset != 0x50)
162                         node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
163                 break;
164         case NV_DEVICE_INFO_V0_FERMI:
165         case NV_DEVICE_INFO_V0_KEPLER:
166         case NV_DEVICE_INFO_V0_MAXWELL:
167         case NV_DEVICE_INFO_V0_PASCAL:
168                 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
169                 break;
170         default:
171                 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
172                         drm->device.info.family);
173                 break;
174         }
175
176         mem->mm_node = node;
177         mem->start   = 0;
178         return 0;
179 }
180
181 static void
182 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
183 {
184 }
185
186 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
187         nouveau_gart_manager_init,
188         nouveau_gart_manager_fini,
189         nouveau_gart_manager_new,
190         nouveau_gart_manager_del,
191         nouveau_gart_manager_debug
192 };
193
194 /*XXX*/
195 #include <subdev/mmu/nv04.h>
196 static int
197 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
198 {
199         struct nouveau_drm *drm = nouveau_bdev(man->bdev);
200         struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
201         struct nv04_mmu *priv = (void *)mmu;
202         struct nvkm_vm *vm = NULL;
203         nvkm_vm_ref(priv->vm, &vm, NULL);
204         man->priv = vm;
205         return 0;
206 }
207
208 static int
209 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
210 {
211         struct nvkm_vm *vm = man->priv;
212         nvkm_vm_ref(NULL, &vm, NULL);
213         man->priv = NULL;
214         return 0;
215 }
216
217 static void
218 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
219 {
220         struct nvkm_mem *node = mem->mm_node;
221         if (node->vma[0].node)
222                 nvkm_vm_put(&node->vma[0]);
223         kfree(mem->mm_node);
224         mem->mm_node = NULL;
225 }
226
227 static int
228 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
229                       struct ttm_buffer_object *bo,
230                       const struct ttm_place *place,
231                       struct ttm_mem_reg *mem)
232 {
233         struct nvkm_mem *node;
234         int ret;
235
236         node = kzalloc(sizeof(*node), GFP_KERNEL);
237         if (!node)
238                 return -ENOMEM;
239
240         node->page_shift = 12;
241
242         ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
243                           NV_MEM_ACCESS_RW, &node->vma[0]);
244         if (ret) {
245                 kfree(node);
246                 return ret;
247         }
248
249         mem->mm_node = node;
250         mem->start   = node->vma[0].offset >> PAGE_SHIFT;
251         return 0;
252 }
253
254 static void
255 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
256 {
257 }
258
259 const struct ttm_mem_type_manager_func nv04_gart_manager = {
260         nv04_gart_manager_init,
261         nv04_gart_manager_fini,
262         nv04_gart_manager_new,
263         nv04_gart_manager_del,
264         nv04_gart_manager_debug
265 };
266
267 int
268 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
269 {
270         struct drm_file *file_priv = filp->private_data;
271         struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
272
273         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
274                 return drm_legacy_mmap(filp, vma);
275
276         return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
277 }
278
279 static int
280 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
281 {
282         return ttm_mem_global_init(ref->object);
283 }
284
285 static void
286 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
287 {
288         ttm_mem_global_release(ref->object);
289 }
290
291 int
292 nouveau_ttm_global_init(struct nouveau_drm *drm)
293 {
294         struct drm_global_reference *global_ref;
295         int ret;
296
297         global_ref = &drm->ttm.mem_global_ref;
298         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
299         global_ref->size = sizeof(struct ttm_mem_global);
300         global_ref->init = &nouveau_ttm_mem_global_init;
301         global_ref->release = &nouveau_ttm_mem_global_release;
302
303         ret = drm_global_item_ref(global_ref);
304         if (unlikely(ret != 0)) {
305                 DRM_ERROR("Failed setting up TTM memory accounting\n");
306                 drm->ttm.mem_global_ref.release = NULL;
307                 return ret;
308         }
309
310         drm->ttm.bo_global_ref.mem_glob = global_ref->object;
311         global_ref = &drm->ttm.bo_global_ref.ref;
312         global_ref->global_type = DRM_GLOBAL_TTM_BO;
313         global_ref->size = sizeof(struct ttm_bo_global);
314         global_ref->init = &ttm_bo_global_init;
315         global_ref->release = &ttm_bo_global_release;
316
317         ret = drm_global_item_ref(global_ref);
318         if (unlikely(ret != 0)) {
319                 DRM_ERROR("Failed setting up TTM BO subsystem\n");
320                 drm_global_item_unref(&drm->ttm.mem_global_ref);
321                 drm->ttm.mem_global_ref.release = NULL;
322                 return ret;
323         }
324
325         return 0;
326 }
327
328 void
329 nouveau_ttm_global_release(struct nouveau_drm *drm)
330 {
331         if (drm->ttm.mem_global_ref.release == NULL)
332                 return;
333
334         drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
335         drm_global_item_unref(&drm->ttm.mem_global_ref);
336         drm->ttm.mem_global_ref.release = NULL;
337 }
338
339 int
340 nouveau_ttm_init(struct nouveau_drm *drm)
341 {
342         struct nvkm_device *device = nvxx_device(&drm->device);
343         struct nvkm_pci *pci = device->pci;
344         struct drm_device *dev = drm->dev;
345         u8 bits;
346         int ret;
347
348         if (pci && pci->agp.bridge) {
349                 drm->agp.bridge = pci->agp.bridge;
350                 drm->agp.base = pci->agp.base;
351                 drm->agp.size = pci->agp.size;
352                 drm->agp.cma = pci->agp.cma;
353         }
354
355         bits = nvxx_mmu(&drm->device)->dma_bits;
356         if (nvxx_device(&drm->device)->func->pci) {
357                 if (drm->agp.bridge)
358                         bits = 32;
359         } else if (device->func->tegra) {
360                 struct nvkm_device_tegra *tegra = device->func->tegra(device);
361
362                 /*
363                  * If the platform can use a IOMMU, then the addressable DMA
364                  * space is constrained by the IOMMU bit
365                  */
366                 if (tegra->func->iommu_bit)
367                         bits = min(bits, tegra->func->iommu_bit);
368
369         }
370
371         ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
372         if (ret && bits != 32) {
373                 bits = 32;
374                 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
375         }
376         if (ret)
377                 return ret;
378
379         ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
380         if (ret)
381                 dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
382
383         ret = nouveau_ttm_global_init(drm);
384         if (ret)
385                 return ret;
386
387         ret = ttm_bo_device_init(&drm->ttm.bdev,
388                                   drm->ttm.bo_global_ref.ref.object,
389                                   &nouveau_bo_driver,
390                                   dev->anon_inode->i_mapping,
391                                   DRM_FILE_PAGE_OFFSET,
392                                   bits <= 32 ? true : false);
393         if (ret) {
394                 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
395                 return ret;
396         }
397
398         /* VRAM init */
399         drm->gem.vram_available = drm->device.info.ram_user;
400
401         ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
402                               drm->gem.vram_available >> PAGE_SHIFT);
403         if (ret) {
404                 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
405                 return ret;
406         }
407
408         drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
409                                          device->func->resource_size(device, 1));
410
411         /* GART init */
412         if (!drm->agp.bridge) {
413                 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
414         } else {
415                 drm->gem.gart_available = drm->agp.size;
416         }
417
418         ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
419                               drm->gem.gart_available >> PAGE_SHIFT);
420         if (ret) {
421                 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
422                 return ret;
423         }
424
425         NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
426         NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
427         return 0;
428 }
429
430 void
431 nouveau_ttm_fini(struct nouveau_drm *drm)
432 {
433         ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
434         ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
435
436         ttm_bo_device_release(&drm->ttm.bdev);
437
438         nouveau_ttm_global_release(drm);
439
440         arch_phys_wc_del(drm->ttm.mtrr);
441         drm->ttm.mtrr = 0;
442 }