drm/gem: Warn on illegal use of the dumb buffer interface v2
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43                                                    bool force);
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46                                bool readonly);
47 static void
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57                                              struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59                                             struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
61                                  unsigned long event,
62                                  void *ptr);
63 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
64
65 static bool cpu_cache_is_coherent(struct drm_device *dev,
66                                   enum i915_cache_level level)
67 {
68         return HAS_LLC(dev) || level != I915_CACHE_NONE;
69 }
70
71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72 {
73         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74                 return true;
75
76         return obj->pin_display;
77 }
78
79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80 {
81         if (obj->tiling_mode)
82                 i915_gem_release_mmap(obj);
83
84         /* As we do not have an associated fence register, we will force
85          * a tiling change if we ever need to acquire one.
86          */
87         obj->fence_dirty = false;
88         obj->fence_reg = I915_FENCE_REG_NONE;
89 }
90
91 /* some bookkeeping */
92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93                                   size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count++;
97         dev_priv->mm.object_memory += size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102                                      size_t size)
103 {
104         spin_lock(&dev_priv->mm.object_stat_lock);
105         dev_priv->mm.object_count--;
106         dev_priv->mm.object_memory -= size;
107         spin_unlock(&dev_priv->mm.object_stat_lock);
108 }
109
110 static int
111 i915_gem_wait_for_error(struct i915_gpu_error *error)
112 {
113         int ret;
114
115 #define EXIT_COND (!i915_reset_in_progress(error) || \
116                    i915_terminally_wedged(error))
117         if (EXIT_COND)
118                 return 0;
119
120         /*
121          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122          * userspace. If it takes that long something really bad is going on and
123          * we should simply try to bail out and fail as gracefully as possible.
124          */
125         ret = wait_event_interruptible_timeout(error->reset_queue,
126                                                EXIT_COND,
127                                                10*HZ);
128         if (ret == 0) {
129                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130                 return -EIO;
131         } else if (ret < 0) {
132                 return ret;
133         }
134 #undef EXIT_COND
135
136         return 0;
137 }
138
139 int i915_mutex_lock_interruptible(struct drm_device *dev)
140 {
141         struct drm_i915_private *dev_priv = dev->dev_private;
142         int ret;
143
144         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
145         if (ret)
146                 return ret;
147
148         ret = mutex_lock_interruptible(&dev->struct_mutex);
149         if (ret)
150                 return ret;
151
152         WARN_ON(i915_verify_lists(dev));
153         return 0;
154 }
155
156 static inline bool
157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
158 {
159         return i915_gem_obj_bound_any(obj) && !obj->active;
160 }
161
162 int
163 i915_gem_init_ioctl(struct drm_device *dev, void *data,
164                     struct drm_file *file)
165 {
166         struct drm_i915_private *dev_priv = dev->dev_private;
167         struct drm_i915_gem_init *args = data;
168
169         if (drm_core_check_feature(dev, DRIVER_MODESET))
170                 return -ENODEV;
171
172         if (args->gtt_start >= args->gtt_end ||
173             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174                 return -EINVAL;
175
176         /* GEM with user mode setting was never supported on ilk and later. */
177         if (INTEL_INFO(dev)->gen >= 5)
178                 return -ENODEV;
179
180         mutex_lock(&dev->struct_mutex);
181         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182                                   args->gtt_end);
183         dev_priv->gtt.mappable_end = args->gtt_end;
184         mutex_unlock(&dev->struct_mutex);
185
186         return 0;
187 }
188
189 int
190 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191                             struct drm_file *file)
192 {
193         struct drm_i915_private *dev_priv = dev->dev_private;
194         struct drm_i915_gem_get_aperture *args = data;
195         struct drm_i915_gem_object *obj;
196         size_t pinned;
197
198         pinned = 0;
199         mutex_lock(&dev->struct_mutex);
200         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
201                 if (i915_gem_obj_is_pinned(obj))
202                         pinned += i915_gem_obj_ggtt_size(obj);
203         mutex_unlock(&dev->struct_mutex);
204
205         args->aper_size = dev_priv->gtt.base.total;
206         args->aper_available_size = args->aper_size - pinned;
207
208         return 0;
209 }
210
211 static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
212 {
213         drm_dma_handle_t *phys = obj->phys_handle;
214
215         if (!phys)
216                 return;
217
218         if (obj->madv == I915_MADV_WILLNEED) {
219                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
220                 char *vaddr = phys->vaddr;
221                 int i;
222
223                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
224                         struct page *page = shmem_read_mapping_page(mapping, i);
225                         if (!IS_ERR(page)) {
226                                 char *dst = kmap_atomic(page);
227                                 memcpy(dst, vaddr, PAGE_SIZE);
228                                 drm_clflush_virt_range(dst, PAGE_SIZE);
229                                 kunmap_atomic(dst);
230
231                                 set_page_dirty(page);
232                                 mark_page_accessed(page);
233                                 page_cache_release(page);
234                         }
235                         vaddr += PAGE_SIZE;
236                 }
237                 i915_gem_chipset_flush(obj->base.dev);
238         }
239
240 #ifdef CONFIG_X86
241         set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
242 #endif
243         drm_pci_free(obj->base.dev, phys);
244         obj->phys_handle = NULL;
245 }
246
247 int
248 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
249                             int align)
250 {
251         drm_dma_handle_t *phys;
252         struct address_space *mapping;
253         char *vaddr;
254         int i;
255
256         if (obj->phys_handle) {
257                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
258                         return -EBUSY;
259
260                 return 0;
261         }
262
263         if (obj->madv != I915_MADV_WILLNEED)
264                 return -EFAULT;
265
266         if (obj->base.filp == NULL)
267                 return -EINVAL;
268
269         /* create a new object */
270         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
271         if (!phys)
272                 return -ENOMEM;
273
274         vaddr = phys->vaddr;
275 #ifdef CONFIG_X86
276         set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
277 #endif
278         mapping = file_inode(obj->base.filp)->i_mapping;
279         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
280                 struct page *page;
281                 char *src;
282
283                 page = shmem_read_mapping_page(mapping, i);
284                 if (IS_ERR(page)) {
285 #ifdef CONFIG_X86
286                         set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
287 #endif
288                         drm_pci_free(obj->base.dev, phys);
289                         return PTR_ERR(page);
290                 }
291
292                 src = kmap_atomic(page);
293                 memcpy(vaddr, src, PAGE_SIZE);
294                 kunmap_atomic(src);
295
296                 mark_page_accessed(page);
297                 page_cache_release(page);
298
299                 vaddr += PAGE_SIZE;
300         }
301
302         obj->phys_handle = phys;
303         return 0;
304 }
305
306 static int
307 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
308                      struct drm_i915_gem_pwrite *args,
309                      struct drm_file *file_priv)
310 {
311         struct drm_device *dev = obj->base.dev;
312         void *vaddr = obj->phys_handle->vaddr + args->offset;
313         char __user *user_data = to_user_ptr(args->data_ptr);
314
315         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
316                 unsigned long unwritten;
317
318                 /* The physical object once assigned is fixed for the lifetime
319                  * of the obj, so we can safely drop the lock and continue
320                  * to access vaddr.
321                  */
322                 mutex_unlock(&dev->struct_mutex);
323                 unwritten = copy_from_user(vaddr, user_data, args->size);
324                 mutex_lock(&dev->struct_mutex);
325                 if (unwritten)
326                         return -EFAULT;
327         }
328
329         i915_gem_chipset_flush(dev);
330         return 0;
331 }
332
333 void *i915_gem_object_alloc(struct drm_device *dev)
334 {
335         struct drm_i915_private *dev_priv = dev->dev_private;
336         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
337 }
338
339 void i915_gem_object_free(struct drm_i915_gem_object *obj)
340 {
341         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
342         kmem_cache_free(dev_priv->slab, obj);
343 }
344
345 static int
346 i915_gem_create(struct drm_file *file,
347                 struct drm_device *dev,
348                 uint64_t size,
349                 bool dumb,
350                 uint32_t *handle_p)
351 {
352         struct drm_i915_gem_object *obj;
353         int ret;
354         u32 handle;
355
356         size = roundup(size, PAGE_SIZE);
357         if (size == 0)
358                 return -EINVAL;
359
360         /* Allocate the new object */
361         obj = i915_gem_alloc_object(dev, size);
362         if (obj == NULL)
363                 return -ENOMEM;
364
365         obj->base.dumb = dumb;
366         ret = drm_gem_handle_create(file, &obj->base, &handle);
367         /* drop reference from allocate - handle holds it now */
368         drm_gem_object_unreference_unlocked(&obj->base);
369         if (ret)
370                 return ret;
371
372         *handle_p = handle;
373         return 0;
374 }
375
376 int
377 i915_gem_dumb_create(struct drm_file *file,
378                      struct drm_device *dev,
379                      struct drm_mode_create_dumb *args)
380 {
381         /* have to work out size/pitch and return them */
382         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
383         args->size = args->pitch * args->height;
384         return i915_gem_create(file, dev,
385                                args->size, true, &args->handle);
386 }
387
388 /**
389  * Creates a new mm object and returns a handle to it.
390  */
391 int
392 i915_gem_create_ioctl(struct drm_device *dev, void *data,
393                       struct drm_file *file)
394 {
395         struct drm_i915_gem_create *args = data;
396
397         return i915_gem_create(file, dev,
398                                args->size, false, &args->handle);
399 }
400
401 static inline int
402 __copy_to_user_swizzled(char __user *cpu_vaddr,
403                         const char *gpu_vaddr, int gpu_offset,
404                         int length)
405 {
406         int ret, cpu_offset = 0;
407
408         while (length > 0) {
409                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
410                 int this_length = min(cacheline_end - gpu_offset, length);
411                 int swizzled_gpu_offset = gpu_offset ^ 64;
412
413                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
414                                      gpu_vaddr + swizzled_gpu_offset,
415                                      this_length);
416                 if (ret)
417                         return ret + length;
418
419                 cpu_offset += this_length;
420                 gpu_offset += this_length;
421                 length -= this_length;
422         }
423
424         return 0;
425 }
426
427 static inline int
428 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
429                           const char __user *cpu_vaddr,
430                           int length)
431 {
432         int ret, cpu_offset = 0;
433
434         while (length > 0) {
435                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
436                 int this_length = min(cacheline_end - gpu_offset, length);
437                 int swizzled_gpu_offset = gpu_offset ^ 64;
438
439                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
440                                        cpu_vaddr + cpu_offset,
441                                        this_length);
442                 if (ret)
443                         return ret + length;
444
445                 cpu_offset += this_length;
446                 gpu_offset += this_length;
447                 length -= this_length;
448         }
449
450         return 0;
451 }
452
453 /*
454  * Pins the specified object's pages and synchronizes the object with
455  * GPU accesses. Sets needs_clflush to non-zero if the caller should
456  * flush the object from the CPU cache.
457  */
458 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
459                                     int *needs_clflush)
460 {
461         int ret;
462
463         *needs_clflush = 0;
464
465         if (!obj->base.filp)
466                 return -EINVAL;
467
468         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
469                 /* If we're not in the cpu read domain, set ourself into the gtt
470                  * read domain and manually flush cachelines (if required). This
471                  * optimizes for the case when the gpu will dirty the data
472                  * anyway again before the next pread happens. */
473                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
474                                                         obj->cache_level);
475                 ret = i915_gem_object_wait_rendering(obj, true);
476                 if (ret)
477                         return ret;
478
479                 i915_gem_object_retire(obj);
480         }
481
482         ret = i915_gem_object_get_pages(obj);
483         if (ret)
484                 return ret;
485
486         i915_gem_object_pin_pages(obj);
487
488         return ret;
489 }
490
491 /* Per-page copy function for the shmem pread fastpath.
492  * Flushes invalid cachelines before reading the target if
493  * needs_clflush is set. */
494 static int
495 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
496                  char __user *user_data,
497                  bool page_do_bit17_swizzling, bool needs_clflush)
498 {
499         char *vaddr;
500         int ret;
501
502         if (unlikely(page_do_bit17_swizzling))
503                 return -EINVAL;
504
505         vaddr = kmap_atomic(page);
506         if (needs_clflush)
507                 drm_clflush_virt_range(vaddr + shmem_page_offset,
508                                        page_length);
509         ret = __copy_to_user_inatomic(user_data,
510                                       vaddr + shmem_page_offset,
511                                       page_length);
512         kunmap_atomic(vaddr);
513
514         return ret ? -EFAULT : 0;
515 }
516
517 static void
518 shmem_clflush_swizzled_range(char *addr, unsigned long length,
519                              bool swizzled)
520 {
521         if (unlikely(swizzled)) {
522                 unsigned long start = (unsigned long) addr;
523                 unsigned long end = (unsigned long) addr + length;
524
525                 /* For swizzling simply ensure that we always flush both
526                  * channels. Lame, but simple and it works. Swizzled
527                  * pwrite/pread is far from a hotpath - current userspace
528                  * doesn't use it at all. */
529                 start = round_down(start, 128);
530                 end = round_up(end, 128);
531
532                 drm_clflush_virt_range((void *)start, end - start);
533         } else {
534                 drm_clflush_virt_range(addr, length);
535         }
536
537 }
538
539 /* Only difference to the fast-path function is that this can handle bit17
540  * and uses non-atomic copy and kmap functions. */
541 static int
542 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
543                  char __user *user_data,
544                  bool page_do_bit17_swizzling, bool needs_clflush)
545 {
546         char *vaddr;
547         int ret;
548
549         vaddr = kmap(page);
550         if (needs_clflush)
551                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
552                                              page_length,
553                                              page_do_bit17_swizzling);
554
555         if (page_do_bit17_swizzling)
556                 ret = __copy_to_user_swizzled(user_data,
557                                               vaddr, shmem_page_offset,
558                                               page_length);
559         else
560                 ret = __copy_to_user(user_data,
561                                      vaddr + shmem_page_offset,
562                                      page_length);
563         kunmap(page);
564
565         return ret ? - EFAULT : 0;
566 }
567
568 static int
569 i915_gem_shmem_pread(struct drm_device *dev,
570                      struct drm_i915_gem_object *obj,
571                      struct drm_i915_gem_pread *args,
572                      struct drm_file *file)
573 {
574         char __user *user_data;
575         ssize_t remain;
576         loff_t offset;
577         int shmem_page_offset, page_length, ret = 0;
578         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
579         int prefaulted = 0;
580         int needs_clflush = 0;
581         struct sg_page_iter sg_iter;
582
583         user_data = to_user_ptr(args->data_ptr);
584         remain = args->size;
585
586         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
587
588         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
589         if (ret)
590                 return ret;
591
592         offset = args->offset;
593
594         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
595                          offset >> PAGE_SHIFT) {
596                 struct page *page = sg_page_iter_page(&sg_iter);
597
598                 if (remain <= 0)
599                         break;
600
601                 /* Operation in this page
602                  *
603                  * shmem_page_offset = offset within page in shmem file
604                  * page_length = bytes to copy for this page
605                  */
606                 shmem_page_offset = offset_in_page(offset);
607                 page_length = remain;
608                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
609                         page_length = PAGE_SIZE - shmem_page_offset;
610
611                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
612                         (page_to_phys(page) & (1 << 17)) != 0;
613
614                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
615                                        user_data, page_do_bit17_swizzling,
616                                        needs_clflush);
617                 if (ret == 0)
618                         goto next_page;
619
620                 mutex_unlock(&dev->struct_mutex);
621
622                 if (likely(!i915.prefault_disable) && !prefaulted) {
623                         ret = fault_in_multipages_writeable(user_data, remain);
624                         /* Userspace is tricking us, but we've already clobbered
625                          * its pages with the prefault and promised to write the
626                          * data up to the first fault. Hence ignore any errors
627                          * and just continue. */
628                         (void)ret;
629                         prefaulted = 1;
630                 }
631
632                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
633                                        user_data, page_do_bit17_swizzling,
634                                        needs_clflush);
635
636                 mutex_lock(&dev->struct_mutex);
637
638                 if (ret)
639                         goto out;
640
641 next_page:
642                 remain -= page_length;
643                 user_data += page_length;
644                 offset += page_length;
645         }
646
647 out:
648         i915_gem_object_unpin_pages(obj);
649
650         return ret;
651 }
652
653 /**
654  * Reads data from the object referenced by handle.
655  *
656  * On error, the contents of *data are undefined.
657  */
658 int
659 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
660                      struct drm_file *file)
661 {
662         struct drm_i915_gem_pread *args = data;
663         struct drm_i915_gem_object *obj;
664         int ret = 0;
665
666         if (args->size == 0)
667                 return 0;
668
669         if (!access_ok(VERIFY_WRITE,
670                        to_user_ptr(args->data_ptr),
671                        args->size))
672                 return -EFAULT;
673
674         ret = i915_mutex_lock_interruptible(dev);
675         if (ret)
676                 return ret;
677
678         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
679         if (&obj->base == NULL) {
680                 ret = -ENOENT;
681                 goto unlock;
682         }
683
684         /* Bounds check source.  */
685         if (args->offset > obj->base.size ||
686             args->size > obj->base.size - args->offset) {
687                 ret = -EINVAL;
688                 goto out;
689         }
690
691         /* prime objects have no backing filp to GEM pread/pwrite
692          * pages from.
693          */
694         if (!obj->base.filp) {
695                 ret = -EINVAL;
696                 goto out;
697         }
698
699         trace_i915_gem_object_pread(obj, args->offset, args->size);
700
701         ret = i915_gem_shmem_pread(dev, obj, args, file);
702
703 out:
704         drm_gem_object_unreference(&obj->base);
705 unlock:
706         mutex_unlock(&dev->struct_mutex);
707         return ret;
708 }
709
710 /* This is the fast write path which cannot handle
711  * page faults in the source data
712  */
713
714 static inline int
715 fast_user_write(struct io_mapping *mapping,
716                 loff_t page_base, int page_offset,
717                 char __user *user_data,
718                 int length)
719 {
720         void __iomem *vaddr_atomic;
721         void *vaddr;
722         unsigned long unwritten;
723
724         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
725         /* We can use the cpu mem copy function because this is X86. */
726         vaddr = (void __force*)vaddr_atomic + page_offset;
727         unwritten = __copy_from_user_inatomic_nocache(vaddr,
728                                                       user_data, length);
729         io_mapping_unmap_atomic(vaddr_atomic);
730         return unwritten;
731 }
732
733 /**
734  * This is the fast pwrite path, where we copy the data directly from the
735  * user into the GTT, uncached.
736  */
737 static int
738 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
739                          struct drm_i915_gem_object *obj,
740                          struct drm_i915_gem_pwrite *args,
741                          struct drm_file *file)
742 {
743         struct drm_i915_private *dev_priv = dev->dev_private;
744         ssize_t remain;
745         loff_t offset, page_base;
746         char __user *user_data;
747         int page_offset, page_length, ret;
748
749         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
750         if (ret)
751                 goto out;
752
753         ret = i915_gem_object_set_to_gtt_domain(obj, true);
754         if (ret)
755                 goto out_unpin;
756
757         ret = i915_gem_object_put_fence(obj);
758         if (ret)
759                 goto out_unpin;
760
761         user_data = to_user_ptr(args->data_ptr);
762         remain = args->size;
763
764         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
765
766         while (remain > 0) {
767                 /* Operation in this page
768                  *
769                  * page_base = page offset within aperture
770                  * page_offset = offset within page
771                  * page_length = bytes to copy for this page
772                  */
773                 page_base = offset & PAGE_MASK;
774                 page_offset = offset_in_page(offset);
775                 page_length = remain;
776                 if ((page_offset + remain) > PAGE_SIZE)
777                         page_length = PAGE_SIZE - page_offset;
778
779                 /* If we get a fault while copying data, then (presumably) our
780                  * source page isn't available.  Return the error and we'll
781                  * retry in the slow path.
782                  */
783                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
784                                     page_offset, user_data, page_length)) {
785                         ret = -EFAULT;
786                         goto out_unpin;
787                 }
788
789                 remain -= page_length;
790                 user_data += page_length;
791                 offset += page_length;
792         }
793
794 out_unpin:
795         i915_gem_object_ggtt_unpin(obj);
796 out:
797         return ret;
798 }
799
800 /* Per-page copy function for the shmem pwrite fastpath.
801  * Flushes invalid cachelines before writing to the target if
802  * needs_clflush_before is set and flushes out any written cachelines after
803  * writing if needs_clflush is set. */
804 static int
805 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
806                   char __user *user_data,
807                   bool page_do_bit17_swizzling,
808                   bool needs_clflush_before,
809                   bool needs_clflush_after)
810 {
811         char *vaddr;
812         int ret;
813
814         if (unlikely(page_do_bit17_swizzling))
815                 return -EINVAL;
816
817         vaddr = kmap_atomic(page);
818         if (needs_clflush_before)
819                 drm_clflush_virt_range(vaddr + shmem_page_offset,
820                                        page_length);
821         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
822                                         user_data, page_length);
823         if (needs_clflush_after)
824                 drm_clflush_virt_range(vaddr + shmem_page_offset,
825                                        page_length);
826         kunmap_atomic(vaddr);
827
828         return ret ? -EFAULT : 0;
829 }
830
831 /* Only difference to the fast-path function is that this can handle bit17
832  * and uses non-atomic copy and kmap functions. */
833 static int
834 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
835                   char __user *user_data,
836                   bool page_do_bit17_swizzling,
837                   bool needs_clflush_before,
838                   bool needs_clflush_after)
839 {
840         char *vaddr;
841         int ret;
842
843         vaddr = kmap(page);
844         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
845                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
846                                              page_length,
847                                              page_do_bit17_swizzling);
848         if (page_do_bit17_swizzling)
849                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
850                                                 user_data,
851                                                 page_length);
852         else
853                 ret = __copy_from_user(vaddr + shmem_page_offset,
854                                        user_data,
855                                        page_length);
856         if (needs_clflush_after)
857                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
858                                              page_length,
859                                              page_do_bit17_swizzling);
860         kunmap(page);
861
862         return ret ? -EFAULT : 0;
863 }
864
865 static int
866 i915_gem_shmem_pwrite(struct drm_device *dev,
867                       struct drm_i915_gem_object *obj,
868                       struct drm_i915_gem_pwrite *args,
869                       struct drm_file *file)
870 {
871         ssize_t remain;
872         loff_t offset;
873         char __user *user_data;
874         int shmem_page_offset, page_length, ret = 0;
875         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
876         int hit_slowpath = 0;
877         int needs_clflush_after = 0;
878         int needs_clflush_before = 0;
879         struct sg_page_iter sg_iter;
880
881         user_data = to_user_ptr(args->data_ptr);
882         remain = args->size;
883
884         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
885
886         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
887                 /* If we're not in the cpu write domain, set ourself into the gtt
888                  * write domain and manually flush cachelines (if required). This
889                  * optimizes for the case when the gpu will use the data
890                  * right away and we therefore have to clflush anyway. */
891                 needs_clflush_after = cpu_write_needs_clflush(obj);
892                 ret = i915_gem_object_wait_rendering(obj, false);
893                 if (ret)
894                         return ret;
895
896                 i915_gem_object_retire(obj);
897         }
898         /* Same trick applies to invalidate partially written cachelines read
899          * before writing. */
900         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
901                 needs_clflush_before =
902                         !cpu_cache_is_coherent(dev, obj->cache_level);
903
904         ret = i915_gem_object_get_pages(obj);
905         if (ret)
906                 return ret;
907
908         i915_gem_object_pin_pages(obj);
909
910         offset = args->offset;
911         obj->dirty = 1;
912
913         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
914                          offset >> PAGE_SHIFT) {
915                 struct page *page = sg_page_iter_page(&sg_iter);
916                 int partial_cacheline_write;
917
918                 if (remain <= 0)
919                         break;
920
921                 /* Operation in this page
922                  *
923                  * shmem_page_offset = offset within page in shmem file
924                  * page_length = bytes to copy for this page
925                  */
926                 shmem_page_offset = offset_in_page(offset);
927
928                 page_length = remain;
929                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
930                         page_length = PAGE_SIZE - shmem_page_offset;
931
932                 /* If we don't overwrite a cacheline completely we need to be
933                  * careful to have up-to-date data by first clflushing. Don't
934                  * overcomplicate things and flush the entire patch. */
935                 partial_cacheline_write = needs_clflush_before &&
936                         ((shmem_page_offset | page_length)
937                                 & (boot_cpu_data.x86_clflush_size - 1));
938
939                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
940                         (page_to_phys(page) & (1 << 17)) != 0;
941
942                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
943                                         user_data, page_do_bit17_swizzling,
944                                         partial_cacheline_write,
945                                         needs_clflush_after);
946                 if (ret == 0)
947                         goto next_page;
948
949                 hit_slowpath = 1;
950                 mutex_unlock(&dev->struct_mutex);
951                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
952                                         user_data, page_do_bit17_swizzling,
953                                         partial_cacheline_write,
954                                         needs_clflush_after);
955
956                 mutex_lock(&dev->struct_mutex);
957
958                 if (ret)
959                         goto out;
960
961 next_page:
962                 remain -= page_length;
963                 user_data += page_length;
964                 offset += page_length;
965         }
966
967 out:
968         i915_gem_object_unpin_pages(obj);
969
970         if (hit_slowpath) {
971                 /*
972                  * Fixup: Flush cpu caches in case we didn't flush the dirty
973                  * cachelines in-line while writing and the object moved
974                  * out of the cpu write domain while we've dropped the lock.
975                  */
976                 if (!needs_clflush_after &&
977                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
978                         if (i915_gem_clflush_object(obj, obj->pin_display))
979                                 i915_gem_chipset_flush(dev);
980                 }
981         }
982
983         if (needs_clflush_after)
984                 i915_gem_chipset_flush(dev);
985
986         return ret;
987 }
988
989 /**
990  * Writes data to the object referenced by handle.
991  *
992  * On error, the contents of the buffer that were to be modified are undefined.
993  */
994 int
995 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
996                       struct drm_file *file)
997 {
998         struct drm_i915_gem_pwrite *args = data;
999         struct drm_i915_gem_object *obj;
1000         int ret;
1001
1002         if (args->size == 0)
1003                 return 0;
1004
1005         if (!access_ok(VERIFY_READ,
1006                        to_user_ptr(args->data_ptr),
1007                        args->size))
1008                 return -EFAULT;
1009
1010         if (likely(!i915.prefault_disable)) {
1011                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1012                                                    args->size);
1013                 if (ret)
1014                         return -EFAULT;
1015         }
1016
1017         ret = i915_mutex_lock_interruptible(dev);
1018         if (ret)
1019                 return ret;
1020
1021         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1022         if (&obj->base == NULL) {
1023                 ret = -ENOENT;
1024                 goto unlock;
1025         }
1026
1027         /* Bounds check destination. */
1028         if (args->offset > obj->base.size ||
1029             args->size > obj->base.size - args->offset) {
1030                 ret = -EINVAL;
1031                 goto out;
1032         }
1033
1034         /* prime objects have no backing filp to GEM pread/pwrite
1035          * pages from.
1036          */
1037         if (!obj->base.filp) {
1038                 ret = -EINVAL;
1039                 goto out;
1040         }
1041
1042         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1043
1044         ret = -EFAULT;
1045         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1046          * it would end up going through the fenced access, and we'll get
1047          * different detiling behavior between reading and writing.
1048          * pread/pwrite currently are reading and writing from the CPU
1049          * perspective, requiring manual detiling by the client.
1050          */
1051         if (obj->phys_handle) {
1052                 ret = i915_gem_phys_pwrite(obj, args, file);
1053                 goto out;
1054         }
1055
1056         if (obj->tiling_mode == I915_TILING_NONE &&
1057             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1058             cpu_write_needs_clflush(obj)) {
1059                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1060                 /* Note that the gtt paths might fail with non-page-backed user
1061                  * pointers (e.g. gtt mappings when moving data between
1062                  * textures). Fallback to the shmem path in that case. */
1063         }
1064
1065         if (ret == -EFAULT || ret == -ENOSPC)
1066                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1067
1068 out:
1069         drm_gem_object_unreference(&obj->base);
1070 unlock:
1071         mutex_unlock(&dev->struct_mutex);
1072         return ret;
1073 }
1074
1075 int
1076 i915_gem_check_wedge(struct i915_gpu_error *error,
1077                      bool interruptible)
1078 {
1079         if (i915_reset_in_progress(error)) {
1080                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1081                  * -EIO unconditionally for these. */
1082                 if (!interruptible)
1083                         return -EIO;
1084
1085                 /* Recovery complete, but the reset failed ... */
1086                 if (i915_terminally_wedged(error))
1087                         return -EIO;
1088
1089                 /*
1090                  * Check if GPU Reset is in progress - we need intel_ring_begin
1091                  * to work properly to reinit the hw state while the gpu is
1092                  * still marked as reset-in-progress. Handle this with a flag.
1093                  */
1094                 if (!error->reload_in_reset)
1095                         return -EAGAIN;
1096         }
1097
1098         return 0;
1099 }
1100
1101 /*
1102  * Compare seqno against outstanding lazy request. Emit a request if they are
1103  * equal.
1104  */
1105 int
1106 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1107 {
1108         int ret;
1109
1110         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1111
1112         ret = 0;
1113         if (seqno == ring->outstanding_lazy_seqno)
1114                 ret = i915_add_request(ring, NULL);
1115
1116         return ret;
1117 }
1118
1119 static void fake_irq(unsigned long data)
1120 {
1121         wake_up_process((struct task_struct *)data);
1122 }
1123
1124 static bool missed_irq(struct drm_i915_private *dev_priv,
1125                        struct intel_engine_cs *ring)
1126 {
1127         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1128 }
1129
1130 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1131 {
1132         if (file_priv == NULL)
1133                 return true;
1134
1135         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1136 }
1137
1138 /**
1139  * __i915_wait_seqno - wait until execution of seqno has finished
1140  * @ring: the ring expected to report seqno
1141  * @seqno: duh!
1142  * @reset_counter: reset sequence associated with the given seqno
1143  * @interruptible: do an interruptible wait (normally yes)
1144  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1145  *
1146  * Note: It is of utmost importance that the passed in seqno and reset_counter
1147  * values have been read by the caller in an smp safe manner. Where read-side
1148  * locks are involved, it is sufficient to read the reset_counter before
1149  * unlocking the lock that protects the seqno. For lockless tricks, the
1150  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1151  * inserted.
1152  *
1153  * Returns 0 if the seqno was found within the alloted time. Else returns the
1154  * errno with remaining time filled in timeout argument.
1155  */
1156 int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1157                         unsigned reset_counter,
1158                         bool interruptible,
1159                         s64 *timeout,
1160                         struct drm_i915_file_private *file_priv)
1161 {
1162         struct drm_device *dev = ring->dev;
1163         struct drm_i915_private *dev_priv = dev->dev_private;
1164         const bool irq_test_in_progress =
1165                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1166         DEFINE_WAIT(wait);
1167         unsigned long timeout_expire;
1168         s64 before, now;
1169         int ret;
1170
1171         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1172
1173         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1174                 return 0;
1175
1176         timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1177
1178         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1179                 gen6_rps_boost(dev_priv);
1180                 if (file_priv)
1181                         mod_delayed_work(dev_priv->wq,
1182                                          &file_priv->mm.idle_work,
1183                                          msecs_to_jiffies(100));
1184         }
1185
1186         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1187                 return -ENODEV;
1188
1189         /* Record current time in case interrupted by signal, or wedged */
1190         trace_i915_gem_request_wait_begin(ring, seqno);
1191         before = ktime_get_raw_ns();
1192         for (;;) {
1193                 struct timer_list timer;
1194
1195                 prepare_to_wait(&ring->irq_queue, &wait,
1196                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1197
1198                 /* We need to check whether any gpu reset happened in between
1199                  * the caller grabbing the seqno and now ... */
1200                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1201                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1202                          * is truely gone. */
1203                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1204                         if (ret == 0)
1205                                 ret = -EAGAIN;
1206                         break;
1207                 }
1208
1209                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1210                         ret = 0;
1211                         break;
1212                 }
1213
1214                 if (interruptible && signal_pending(current)) {
1215                         ret = -ERESTARTSYS;
1216                         break;
1217                 }
1218
1219                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1220                         ret = -ETIME;
1221                         break;
1222                 }
1223
1224                 timer.function = NULL;
1225                 if (timeout || missed_irq(dev_priv, ring)) {
1226                         unsigned long expire;
1227
1228                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1229                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1230                         mod_timer(&timer, expire);
1231                 }
1232
1233                 io_schedule();
1234
1235                 if (timer.function) {
1236                         del_singleshot_timer_sync(&timer);
1237                         destroy_timer_on_stack(&timer);
1238                 }
1239         }
1240         now = ktime_get_raw_ns();
1241         trace_i915_gem_request_wait_end(ring, seqno);
1242
1243         if (!irq_test_in_progress)
1244                 ring->irq_put(ring);
1245
1246         finish_wait(&ring->irq_queue, &wait);
1247
1248         if (timeout) {
1249                 s64 tres = *timeout - (now - before);
1250
1251                 *timeout = tres < 0 ? 0 : tres;
1252         }
1253
1254         return ret;
1255 }
1256
1257 /**
1258  * Waits for a sequence number to be signaled, and cleans up the
1259  * request and object lists appropriately for that event.
1260  */
1261 int
1262 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1263 {
1264         struct drm_device *dev = ring->dev;
1265         struct drm_i915_private *dev_priv = dev->dev_private;
1266         bool interruptible = dev_priv->mm.interruptible;
1267         unsigned reset_counter;
1268         int ret;
1269
1270         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1271         BUG_ON(seqno == 0);
1272
1273         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1274         if (ret)
1275                 return ret;
1276
1277         ret = i915_gem_check_olr(ring, seqno);
1278         if (ret)
1279                 return ret;
1280
1281         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1282         return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
1283                                  NULL, NULL);
1284 }
1285
1286 static int
1287 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1288 {
1289         if (!obj->active)
1290                 return 0;
1291
1292         /* Manually manage the write flush as we may have not yet
1293          * retired the buffer.
1294          *
1295          * Note that the last_write_seqno is always the earlier of
1296          * the two (read/write) seqno, so if we haved successfully waited,
1297          * we know we have passed the last write.
1298          */
1299         obj->last_write_seqno = 0;
1300
1301         return 0;
1302 }
1303
1304 /**
1305  * Ensures that all rendering to the object has completed and the object is
1306  * safe to unbind from the GTT or access from the CPU.
1307  */
1308 static __must_check int
1309 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1310                                bool readonly)
1311 {
1312         struct intel_engine_cs *ring = obj->ring;
1313         u32 seqno;
1314         int ret;
1315
1316         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1317         if (seqno == 0)
1318                 return 0;
1319
1320         ret = i915_wait_seqno(ring, seqno);
1321         if (ret)
1322                 return ret;
1323
1324         return i915_gem_object_wait_rendering__tail(obj);
1325 }
1326
1327 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1328  * as the object state may change during this call.
1329  */
1330 static __must_check int
1331 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1332                                             struct drm_i915_file_private *file_priv,
1333                                             bool readonly)
1334 {
1335         struct drm_device *dev = obj->base.dev;
1336         struct drm_i915_private *dev_priv = dev->dev_private;
1337         struct intel_engine_cs *ring = obj->ring;
1338         unsigned reset_counter;
1339         u32 seqno;
1340         int ret;
1341
1342         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1343         BUG_ON(!dev_priv->mm.interruptible);
1344
1345         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1346         if (seqno == 0)
1347                 return 0;
1348
1349         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1350         if (ret)
1351                 return ret;
1352
1353         ret = i915_gem_check_olr(ring, seqno);
1354         if (ret)
1355                 return ret;
1356
1357         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1358         mutex_unlock(&dev->struct_mutex);
1359         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
1360                                 file_priv);
1361         mutex_lock(&dev->struct_mutex);
1362         if (ret)
1363                 return ret;
1364
1365         return i915_gem_object_wait_rendering__tail(obj);
1366 }
1367
1368 /**
1369  * Called when user space prepares to use an object with the CPU, either
1370  * through the mmap ioctl's mapping or a GTT mapping.
1371  */
1372 int
1373 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1374                           struct drm_file *file)
1375 {
1376         struct drm_i915_gem_set_domain *args = data;
1377         struct drm_i915_gem_object *obj;
1378         uint32_t read_domains = args->read_domains;
1379         uint32_t write_domain = args->write_domain;
1380         int ret;
1381
1382         /* Only handle setting domains to types used by the CPU. */
1383         if (write_domain & I915_GEM_GPU_DOMAINS)
1384                 return -EINVAL;
1385
1386         if (read_domains & I915_GEM_GPU_DOMAINS)
1387                 return -EINVAL;
1388
1389         /* Having something in the write domain implies it's in the read
1390          * domain, and only that read domain.  Enforce that in the request.
1391          */
1392         if (write_domain != 0 && read_domains != write_domain)
1393                 return -EINVAL;
1394
1395         ret = i915_mutex_lock_interruptible(dev);
1396         if (ret)
1397                 return ret;
1398
1399         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1400         if (&obj->base == NULL) {
1401                 ret = -ENOENT;
1402                 goto unlock;
1403         }
1404
1405         /* Try to flush the object off the GPU without holding the lock.
1406          * We will repeat the flush holding the lock in the normal manner
1407          * to catch cases where we are gazumped.
1408          */
1409         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1410                                                           file->driver_priv,
1411                                                           !write_domain);
1412         if (ret)
1413                 goto unref;
1414
1415         if (read_domains & I915_GEM_DOMAIN_GTT) {
1416                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1417
1418                 /* Silently promote "you're not bound, there was nothing to do"
1419                  * to success, since the client was just asking us to
1420                  * make sure everything was done.
1421                  */
1422                 if (ret == -EINVAL)
1423                         ret = 0;
1424         } else {
1425                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1426         }
1427
1428 unref:
1429         drm_gem_object_unreference(&obj->base);
1430 unlock:
1431         mutex_unlock(&dev->struct_mutex);
1432         return ret;
1433 }
1434
1435 /**
1436  * Called when user space has done writes to this buffer
1437  */
1438 int
1439 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1440                          struct drm_file *file)
1441 {
1442         struct drm_i915_gem_sw_finish *args = data;
1443         struct drm_i915_gem_object *obj;
1444         int ret = 0;
1445
1446         ret = i915_mutex_lock_interruptible(dev);
1447         if (ret)
1448                 return ret;
1449
1450         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1451         if (&obj->base == NULL) {
1452                 ret = -ENOENT;
1453                 goto unlock;
1454         }
1455
1456         /* Pinned buffers may be scanout, so flush the cache */
1457         if (obj->pin_display)
1458                 i915_gem_object_flush_cpu_write_domain(obj, true);
1459
1460         drm_gem_object_unreference(&obj->base);
1461 unlock:
1462         mutex_unlock(&dev->struct_mutex);
1463         return ret;
1464 }
1465
1466 /**
1467  * Maps the contents of an object, returning the address it is mapped
1468  * into.
1469  *
1470  * While the mapping holds a reference on the contents of the object, it doesn't
1471  * imply a ref on the object itself.
1472  *
1473  * IMPORTANT:
1474  *
1475  * DRM driver writers who look a this function as an example for how to do GEM
1476  * mmap support, please don't implement mmap support like here. The modern way
1477  * to implement DRM mmap support is with an mmap offset ioctl (like
1478  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1479  * That way debug tooling like valgrind will understand what's going on, hiding
1480  * the mmap call in a driver private ioctl will break that. The i915 driver only
1481  * does cpu mmaps this way because we didn't know better.
1482  */
1483 int
1484 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1485                     struct drm_file *file)
1486 {
1487         struct drm_i915_gem_mmap *args = data;
1488         struct drm_gem_object *obj;
1489         unsigned long addr;
1490
1491         obj = drm_gem_object_lookup(dev, file, args->handle);
1492         if (obj == NULL)
1493                 return -ENOENT;
1494
1495         /* prime objects have no backing filp to GEM mmap
1496          * pages from.
1497          */
1498         if (!obj->filp) {
1499                 drm_gem_object_unreference_unlocked(obj);
1500                 return -EINVAL;
1501         }
1502
1503         addr = vm_mmap(obj->filp, 0, args->size,
1504                        PROT_READ | PROT_WRITE, MAP_SHARED,
1505                        args->offset);
1506         drm_gem_object_unreference_unlocked(obj);
1507         if (IS_ERR((void *)addr))
1508                 return addr;
1509
1510         args->addr_ptr = (uint64_t) addr;
1511
1512         return 0;
1513 }
1514
1515 /**
1516  * i915_gem_fault - fault a page into the GTT
1517  * vma: VMA in question
1518  * vmf: fault info
1519  *
1520  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1521  * from userspace.  The fault handler takes care of binding the object to
1522  * the GTT (if needed), allocating and programming a fence register (again,
1523  * only if needed based on whether the old reg is still valid or the object
1524  * is tiled) and inserting a new PTE into the faulting process.
1525  *
1526  * Note that the faulting process may involve evicting existing objects
1527  * from the GTT and/or fence registers to make room.  So performance may
1528  * suffer if the GTT working set is large or there are few fence registers
1529  * left.
1530  */
1531 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1532 {
1533         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1534         struct drm_device *dev = obj->base.dev;
1535         struct drm_i915_private *dev_priv = dev->dev_private;
1536         pgoff_t page_offset;
1537         unsigned long pfn;
1538         int ret = 0;
1539         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1540
1541         intel_runtime_pm_get(dev_priv);
1542
1543         /* We don't use vmf->pgoff since that has the fake offset */
1544         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1545                 PAGE_SHIFT;
1546
1547         ret = i915_mutex_lock_interruptible(dev);
1548         if (ret)
1549                 goto out;
1550
1551         trace_i915_gem_object_fault(obj, page_offset, true, write);
1552
1553         /* Try to flush the object off the GPU first without holding the lock.
1554          * Upon reacquiring the lock, we will perform our sanity checks and then
1555          * repeat the flush holding the lock in the normal manner to catch cases
1556          * where we are gazumped.
1557          */
1558         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1559         if (ret)
1560                 goto unlock;
1561
1562         /* Access to snoopable pages through the GTT is incoherent. */
1563         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1564                 ret = -EFAULT;
1565                 goto unlock;
1566         }
1567
1568         /* Now bind it into the GTT if needed */
1569         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1570         if (ret)
1571                 goto unlock;
1572
1573         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1574         if (ret)
1575                 goto unpin;
1576
1577         ret = i915_gem_object_get_fence(obj);
1578         if (ret)
1579                 goto unpin;
1580
1581         /* Finally, remap it using the new GTT offset */
1582         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1583         pfn >>= PAGE_SHIFT;
1584
1585         if (!obj->fault_mappable) {
1586                 unsigned long size = min_t(unsigned long,
1587                                            vma->vm_end - vma->vm_start,
1588                                            obj->base.size);
1589                 int i;
1590
1591                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1592                         ret = vm_insert_pfn(vma,
1593                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1594                                             pfn + i);
1595                         if (ret)
1596                                 break;
1597                 }
1598
1599                 obj->fault_mappable = true;
1600         } else
1601                 ret = vm_insert_pfn(vma,
1602                                     (unsigned long)vmf->virtual_address,
1603                                     pfn + page_offset);
1604 unpin:
1605         i915_gem_object_ggtt_unpin(obj);
1606 unlock:
1607         mutex_unlock(&dev->struct_mutex);
1608 out:
1609         switch (ret) {
1610         case -EIO:
1611                 /*
1612                  * We eat errors when the gpu is terminally wedged to avoid
1613                  * userspace unduly crashing (gl has no provisions for mmaps to
1614                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1615                  * and so needs to be reported.
1616                  */
1617                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1618                         ret = VM_FAULT_SIGBUS;
1619                         break;
1620                 }
1621         case -EAGAIN:
1622                 /*
1623                  * EAGAIN means the gpu is hung and we'll wait for the error
1624                  * handler to reset everything when re-faulting in
1625                  * i915_mutex_lock_interruptible.
1626                  */
1627         case 0:
1628         case -ERESTARTSYS:
1629         case -EINTR:
1630         case -EBUSY:
1631                 /*
1632                  * EBUSY is ok: this just means that another thread
1633                  * already did the job.
1634                  */
1635                 ret = VM_FAULT_NOPAGE;
1636                 break;
1637         case -ENOMEM:
1638                 ret = VM_FAULT_OOM;
1639                 break;
1640         case -ENOSPC:
1641         case -EFAULT:
1642                 ret = VM_FAULT_SIGBUS;
1643                 break;
1644         default:
1645                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1646                 ret = VM_FAULT_SIGBUS;
1647                 break;
1648         }
1649
1650         intel_runtime_pm_put(dev_priv);
1651         return ret;
1652 }
1653
1654 /**
1655  * i915_gem_release_mmap - remove physical page mappings
1656  * @obj: obj in question
1657  *
1658  * Preserve the reservation of the mmapping with the DRM core code, but
1659  * relinquish ownership of the pages back to the system.
1660  *
1661  * It is vital that we remove the page mapping if we have mapped a tiled
1662  * object through the GTT and then lose the fence register due to
1663  * resource pressure. Similarly if the object has been moved out of the
1664  * aperture, than pages mapped into userspace must be revoked. Removing the
1665  * mapping will then trigger a page fault on the next user access, allowing
1666  * fixup by i915_gem_fault().
1667  */
1668 void
1669 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1670 {
1671         if (!obj->fault_mappable)
1672                 return;
1673
1674         drm_vma_node_unmap(&obj->base.vma_node,
1675                            obj->base.dev->anon_inode->i_mapping);
1676         obj->fault_mappable = false;
1677 }
1678
1679 void
1680 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1681 {
1682         struct drm_i915_gem_object *obj;
1683
1684         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1685                 i915_gem_release_mmap(obj);
1686 }
1687
1688 uint32_t
1689 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1690 {
1691         uint32_t gtt_size;
1692
1693         if (INTEL_INFO(dev)->gen >= 4 ||
1694             tiling_mode == I915_TILING_NONE)
1695                 return size;
1696
1697         /* Previous chips need a power-of-two fence region when tiling */
1698         if (INTEL_INFO(dev)->gen == 3)
1699                 gtt_size = 1024*1024;
1700         else
1701                 gtt_size = 512*1024;
1702
1703         while (gtt_size < size)
1704                 gtt_size <<= 1;
1705
1706         return gtt_size;
1707 }
1708
1709 /**
1710  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1711  * @obj: object to check
1712  *
1713  * Return the required GTT alignment for an object, taking into account
1714  * potential fence register mapping.
1715  */
1716 uint32_t
1717 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1718                            int tiling_mode, bool fenced)
1719 {
1720         /*
1721          * Minimum alignment is 4k (GTT page size), but might be greater
1722          * if a fence register is needed for the object.
1723          */
1724         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1725             tiling_mode == I915_TILING_NONE)
1726                 return 4096;
1727
1728         /*
1729          * Previous chips need to be aligned to the size of the smallest
1730          * fence register that can contain the object.
1731          */
1732         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1733 }
1734
1735 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1736 {
1737         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1738         int ret;
1739
1740         if (drm_vma_node_has_offset(&obj->base.vma_node))
1741                 return 0;
1742
1743         dev_priv->mm.shrinker_no_lock_stealing = true;
1744
1745         ret = drm_gem_create_mmap_offset(&obj->base);
1746         if (ret != -ENOSPC)
1747                 goto out;
1748
1749         /* Badly fragmented mmap space? The only way we can recover
1750          * space is by destroying unwanted objects. We can't randomly release
1751          * mmap_offsets as userspace expects them to be persistent for the
1752          * lifetime of the objects. The closest we can is to release the
1753          * offsets on purgeable objects by truncating it and marking it purged,
1754          * which prevents userspace from ever using that object again.
1755          */
1756         i915_gem_shrink(dev_priv,
1757                         obj->base.size >> PAGE_SHIFT,
1758                         I915_SHRINK_BOUND |
1759                         I915_SHRINK_UNBOUND |
1760                         I915_SHRINK_PURGEABLE);
1761         ret = drm_gem_create_mmap_offset(&obj->base);
1762         if (ret != -ENOSPC)
1763                 goto out;
1764
1765         i915_gem_shrink_all(dev_priv);
1766         ret = drm_gem_create_mmap_offset(&obj->base);
1767 out:
1768         dev_priv->mm.shrinker_no_lock_stealing = false;
1769
1770         return ret;
1771 }
1772
1773 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1774 {
1775         drm_gem_free_mmap_offset(&obj->base);
1776 }
1777
1778 static int
1779 i915_gem_mmap_gtt(struct drm_file *file,
1780                   struct drm_device *dev,
1781                   uint32_t handle, bool dumb,
1782                   uint64_t *offset)
1783 {
1784         struct drm_i915_private *dev_priv = dev->dev_private;
1785         struct drm_i915_gem_object *obj;
1786         int ret;
1787
1788         ret = i915_mutex_lock_interruptible(dev);
1789         if (ret)
1790                 return ret;
1791
1792         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1793         if (&obj->base == NULL) {
1794                 ret = -ENOENT;
1795                 goto unlock;
1796         }
1797
1798         /*
1799          * We don't allow dumb mmaps on objects created using another
1800          * interface.
1801          */
1802         WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1803                   "Illegal dumb map of accelerated buffer.\n");
1804
1805         if (obj->base.size > dev_priv->gtt.mappable_end) {
1806                 ret = -E2BIG;
1807                 goto out;
1808         }
1809
1810         if (obj->madv != I915_MADV_WILLNEED) {
1811                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1812                 ret = -EFAULT;
1813                 goto out;
1814         }
1815
1816         ret = i915_gem_object_create_mmap_offset(obj);
1817         if (ret)
1818                 goto out;
1819
1820         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1821
1822 out:
1823         drm_gem_object_unreference(&obj->base);
1824 unlock:
1825         mutex_unlock(&dev->struct_mutex);
1826         return ret;
1827 }
1828
1829 int
1830 i915_gem_dumb_map_offset(struct drm_file *file,
1831                          struct drm_device *dev,
1832                          uint32_t handle,
1833                          uint64_t *offset)
1834 {
1835         return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1836 }
1837
1838 /**
1839  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1840  * @dev: DRM device
1841  * @data: GTT mapping ioctl data
1842  * @file: GEM object info
1843  *
1844  * Simply returns the fake offset to userspace so it can mmap it.
1845  * The mmap call will end up in drm_gem_mmap(), which will set things
1846  * up so we can get faults in the handler above.
1847  *
1848  * The fault handler will take care of binding the object into the GTT
1849  * (since it may have been evicted to make room for something), allocating
1850  * a fence register, and mapping the appropriate aperture address into
1851  * userspace.
1852  */
1853 int
1854 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1855                         struct drm_file *file)
1856 {
1857         struct drm_i915_gem_mmap_gtt *args = data;
1858
1859         return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
1860 }
1861
1862 static inline int
1863 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1864 {
1865         return obj->madv == I915_MADV_DONTNEED;
1866 }
1867
1868 /* Immediately discard the backing storage */
1869 static void
1870 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1871 {
1872         i915_gem_object_free_mmap_offset(obj);
1873
1874         if (obj->base.filp == NULL)
1875                 return;
1876
1877         /* Our goal here is to return as much of the memory as
1878          * is possible back to the system as we are called from OOM.
1879          * To do this we must instruct the shmfs to drop all of its
1880          * backing pages, *now*.
1881          */
1882         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1883         obj->madv = __I915_MADV_PURGED;
1884 }
1885
1886 /* Try to discard unwanted pages */
1887 static void
1888 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1889 {
1890         struct address_space *mapping;
1891
1892         switch (obj->madv) {
1893         case I915_MADV_DONTNEED:
1894                 i915_gem_object_truncate(obj);
1895         case __I915_MADV_PURGED:
1896                 return;
1897         }
1898
1899         if (obj->base.filp == NULL)
1900                 return;
1901
1902         mapping = file_inode(obj->base.filp)->i_mapping,
1903         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1904 }
1905
1906 static void
1907 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1908 {
1909         struct sg_page_iter sg_iter;
1910         int ret;
1911
1912         BUG_ON(obj->madv == __I915_MADV_PURGED);
1913
1914         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1915         if (ret) {
1916                 /* In the event of a disaster, abandon all caches and
1917                  * hope for the best.
1918                  */
1919                 WARN_ON(ret != -EIO);
1920                 i915_gem_clflush_object(obj, true);
1921                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1922         }
1923
1924         if (i915_gem_object_needs_bit17_swizzle(obj))
1925                 i915_gem_object_save_bit_17_swizzle(obj);
1926
1927         if (obj->madv == I915_MADV_DONTNEED)
1928                 obj->dirty = 0;
1929
1930         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1931                 struct page *page = sg_page_iter_page(&sg_iter);
1932
1933                 if (obj->dirty)
1934                         set_page_dirty(page);
1935
1936                 if (obj->madv == I915_MADV_WILLNEED)
1937                         mark_page_accessed(page);
1938
1939                 page_cache_release(page);
1940         }
1941         obj->dirty = 0;
1942
1943         sg_free_table(obj->pages);
1944         kfree(obj->pages);
1945 }
1946
1947 int
1948 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1949 {
1950         const struct drm_i915_gem_object_ops *ops = obj->ops;
1951
1952         if (obj->pages == NULL)
1953                 return 0;
1954
1955         if (obj->pages_pin_count)
1956                 return -EBUSY;
1957
1958         BUG_ON(i915_gem_obj_bound_any(obj));
1959
1960         /* ->put_pages might need to allocate memory for the bit17 swizzle
1961          * array, hence protect them from being reaped by removing them from gtt
1962          * lists early. */
1963         list_del(&obj->global_list);
1964
1965         ops->put_pages(obj);
1966         obj->pages = NULL;
1967
1968         i915_gem_object_invalidate(obj);
1969
1970         return 0;
1971 }
1972
1973 unsigned long
1974 i915_gem_shrink(struct drm_i915_private *dev_priv,
1975                 long target, unsigned flags)
1976 {
1977         const struct {
1978                 struct list_head *list;
1979                 unsigned int bit;
1980         } phases[] = {
1981                 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
1982                 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
1983                 { NULL, 0 },
1984         }, *phase;
1985         unsigned long count = 0;
1986
1987         /*
1988          * As we may completely rewrite the (un)bound list whilst unbinding
1989          * (due to retiring requests) we have to strictly process only
1990          * one element of the list at the time, and recheck the list
1991          * on every iteration.
1992          *
1993          * In particular, we must hold a reference whilst removing the
1994          * object as we may end up waiting for and/or retiring the objects.
1995          * This might release the final reference (held by the active list)
1996          * and result in the object being freed from under us. This is
1997          * similar to the precautions the eviction code must take whilst
1998          * removing objects.
1999          *
2000          * Also note that although these lists do not hold a reference to
2001          * the object we can safely grab one here: The final object
2002          * unreferencing and the bound_list are both protected by the
2003          * dev->struct_mutex and so we won't ever be able to observe an
2004          * object on the bound_list with a reference count equals 0.
2005          */
2006         for (phase = phases; phase->list; phase++) {
2007                 struct list_head still_in_list;
2008
2009                 if ((flags & phase->bit) == 0)
2010                         continue;
2011
2012                 INIT_LIST_HEAD(&still_in_list);
2013                 while (count < target && !list_empty(phase->list)) {
2014                         struct drm_i915_gem_object *obj;
2015                         struct i915_vma *vma, *v;
2016
2017                         obj = list_first_entry(phase->list,
2018                                                typeof(*obj), global_list);
2019                         list_move_tail(&obj->global_list, &still_in_list);
2020
2021                         if (flags & I915_SHRINK_PURGEABLE &&
2022                             !i915_gem_object_is_purgeable(obj))
2023                                 continue;
2024
2025                         drm_gem_object_reference(&obj->base);
2026
2027                         /* For the unbound phase, this should be a no-op! */
2028                         list_for_each_entry_safe(vma, v,
2029                                                  &obj->vma_list, vma_link)
2030                                 if (i915_vma_unbind(vma))
2031                                         break;
2032
2033                         if (i915_gem_object_put_pages(obj) == 0)
2034                                 count += obj->base.size >> PAGE_SHIFT;
2035
2036                         drm_gem_object_unreference(&obj->base);
2037                 }
2038                 list_splice(&still_in_list, phase->list);
2039         }
2040
2041         return count;
2042 }
2043
2044 static unsigned long
2045 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2046 {
2047         i915_gem_evict_everything(dev_priv->dev);
2048         return i915_gem_shrink(dev_priv, LONG_MAX,
2049                                I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2050 }
2051
2052 static int
2053 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2054 {
2055         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2056         int page_count, i;
2057         struct address_space *mapping;
2058         struct sg_table *st;
2059         struct scatterlist *sg;
2060         struct sg_page_iter sg_iter;
2061         struct page *page;
2062         unsigned long last_pfn = 0;     /* suppress gcc warning */
2063         gfp_t gfp;
2064
2065         /* Assert that the object is not currently in any GPU domain. As it
2066          * wasn't in the GTT, there shouldn't be any way it could have been in
2067          * a GPU cache
2068          */
2069         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2070         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2071
2072         st = kmalloc(sizeof(*st), GFP_KERNEL);
2073         if (st == NULL)
2074                 return -ENOMEM;
2075
2076         page_count = obj->base.size / PAGE_SIZE;
2077         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2078                 kfree(st);
2079                 return -ENOMEM;
2080         }
2081
2082         /* Get the list of pages out of our struct file.  They'll be pinned
2083          * at this point until we release them.
2084          *
2085          * Fail silently without starting the shrinker
2086          */
2087         mapping = file_inode(obj->base.filp)->i_mapping;
2088         gfp = mapping_gfp_mask(mapping);
2089         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2090         gfp &= ~(__GFP_IO | __GFP_WAIT);
2091         sg = st->sgl;
2092         st->nents = 0;
2093         for (i = 0; i < page_count; i++) {
2094                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2095                 if (IS_ERR(page)) {
2096                         i915_gem_shrink(dev_priv,
2097                                         page_count,
2098                                         I915_SHRINK_BOUND |
2099                                         I915_SHRINK_UNBOUND |
2100                                         I915_SHRINK_PURGEABLE);
2101                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2102                 }
2103                 if (IS_ERR(page)) {
2104                         /* We've tried hard to allocate the memory by reaping
2105                          * our own buffer, now let the real VM do its job and
2106                          * go down in flames if truly OOM.
2107                          */
2108                         i915_gem_shrink_all(dev_priv);
2109                         page = shmem_read_mapping_page(mapping, i);
2110                         if (IS_ERR(page))
2111                                 goto err_pages;
2112                 }
2113 #ifdef CONFIG_SWIOTLB
2114                 if (swiotlb_nr_tbl()) {
2115                         st->nents++;
2116                         sg_set_page(sg, page, PAGE_SIZE, 0);
2117                         sg = sg_next(sg);
2118                         continue;
2119                 }
2120 #endif
2121                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2122                         if (i)
2123                                 sg = sg_next(sg);
2124                         st->nents++;
2125                         sg_set_page(sg, page, PAGE_SIZE, 0);
2126                 } else {
2127                         sg->length += PAGE_SIZE;
2128                 }
2129                 last_pfn = page_to_pfn(page);
2130
2131                 /* Check that the i965g/gm workaround works. */
2132                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2133         }
2134 #ifdef CONFIG_SWIOTLB
2135         if (!swiotlb_nr_tbl())
2136 #endif
2137                 sg_mark_end(sg);
2138         obj->pages = st;
2139
2140         if (i915_gem_object_needs_bit17_swizzle(obj))
2141                 i915_gem_object_do_bit_17_swizzle(obj);
2142
2143         return 0;
2144
2145 err_pages:
2146         sg_mark_end(sg);
2147         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2148                 page_cache_release(sg_page_iter_page(&sg_iter));
2149         sg_free_table(st);
2150         kfree(st);
2151
2152         /* shmemfs first checks if there is enough memory to allocate the page
2153          * and reports ENOSPC should there be insufficient, along with the usual
2154          * ENOMEM for a genuine allocation failure.
2155          *
2156          * We use ENOSPC in our driver to mean that we have run out of aperture
2157          * space and so want to translate the error from shmemfs back to our
2158          * usual understanding of ENOMEM.
2159          */
2160         if (PTR_ERR(page) == -ENOSPC)
2161                 return -ENOMEM;
2162         else
2163                 return PTR_ERR(page);
2164 }
2165
2166 /* Ensure that the associated pages are gathered from the backing storage
2167  * and pinned into our object. i915_gem_object_get_pages() may be called
2168  * multiple times before they are released by a single call to
2169  * i915_gem_object_put_pages() - once the pages are no longer referenced
2170  * either as a result of memory pressure (reaping pages under the shrinker)
2171  * or as the object is itself released.
2172  */
2173 int
2174 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2175 {
2176         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2177         const struct drm_i915_gem_object_ops *ops = obj->ops;
2178         int ret;
2179
2180         if (obj->pages)
2181                 return 0;
2182
2183         if (obj->madv != I915_MADV_WILLNEED) {
2184                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2185                 return -EFAULT;
2186         }
2187
2188         BUG_ON(obj->pages_pin_count);
2189
2190         ret = ops->get_pages(obj);
2191         if (ret)
2192                 return ret;
2193
2194         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2195         return 0;
2196 }
2197
2198 static void
2199 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2200                                struct intel_engine_cs *ring)
2201 {
2202         u32 seqno = intel_ring_get_seqno(ring);
2203
2204         BUG_ON(ring == NULL);
2205         if (obj->ring != ring && obj->last_write_seqno) {
2206                 /* Keep the seqno relative to the current ring */
2207                 obj->last_write_seqno = seqno;
2208         }
2209         obj->ring = ring;
2210
2211         /* Add a reference if we're newly entering the active list. */
2212         if (!obj->active) {
2213                 drm_gem_object_reference(&obj->base);
2214                 obj->active = 1;
2215         }
2216
2217         list_move_tail(&obj->ring_list, &ring->active_list);
2218
2219         obj->last_read_seqno = seqno;
2220 }
2221
2222 void i915_vma_move_to_active(struct i915_vma *vma,
2223                              struct intel_engine_cs *ring)
2224 {
2225         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2226         return i915_gem_object_move_to_active(vma->obj, ring);
2227 }
2228
2229 static void
2230 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2231 {
2232         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2233         struct i915_address_space *vm;
2234         struct i915_vma *vma;
2235
2236         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2237         BUG_ON(!obj->active);
2238
2239         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2240                 vma = i915_gem_obj_to_vma(obj, vm);
2241                 if (vma && !list_empty(&vma->mm_list))
2242                         list_move_tail(&vma->mm_list, &vm->inactive_list);
2243         }
2244
2245         intel_fb_obj_flush(obj, true);
2246
2247         list_del_init(&obj->ring_list);
2248         obj->ring = NULL;
2249
2250         obj->last_read_seqno = 0;
2251         obj->last_write_seqno = 0;
2252         obj->base.write_domain = 0;
2253
2254         obj->last_fenced_seqno = 0;
2255
2256         obj->active = 0;
2257         drm_gem_object_unreference(&obj->base);
2258
2259         WARN_ON(i915_verify_lists(dev));
2260 }
2261
2262 static void
2263 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2264 {
2265         struct intel_engine_cs *ring = obj->ring;
2266
2267         if (ring == NULL)
2268                 return;
2269
2270         if (i915_seqno_passed(ring->get_seqno(ring, true),
2271                               obj->last_read_seqno))
2272                 i915_gem_object_move_to_inactive(obj);
2273 }
2274
2275 static int
2276 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2277 {
2278         struct drm_i915_private *dev_priv = dev->dev_private;
2279         struct intel_engine_cs *ring;
2280         int ret, i, j;
2281
2282         /* Carefully retire all requests without writing to the rings */
2283         for_each_ring(ring, dev_priv, i) {
2284                 ret = intel_ring_idle(ring);
2285                 if (ret)
2286                         return ret;
2287         }
2288         i915_gem_retire_requests(dev);
2289
2290         /* Finally reset hw state */
2291         for_each_ring(ring, dev_priv, i) {
2292                 intel_ring_init_seqno(ring, seqno);
2293
2294                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2295                         ring->semaphore.sync_seqno[j] = 0;
2296         }
2297
2298         return 0;
2299 }
2300
2301 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2302 {
2303         struct drm_i915_private *dev_priv = dev->dev_private;
2304         int ret;
2305
2306         if (seqno == 0)
2307                 return -EINVAL;
2308
2309         /* HWS page needs to be set less than what we
2310          * will inject to ring
2311          */
2312         ret = i915_gem_init_seqno(dev, seqno - 1);
2313         if (ret)
2314                 return ret;
2315
2316         /* Carefully set the last_seqno value so that wrap
2317          * detection still works
2318          */
2319         dev_priv->next_seqno = seqno;
2320         dev_priv->last_seqno = seqno - 1;
2321         if (dev_priv->last_seqno == 0)
2322                 dev_priv->last_seqno--;
2323
2324         return 0;
2325 }
2326
2327 int
2328 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2329 {
2330         struct drm_i915_private *dev_priv = dev->dev_private;
2331
2332         /* reserve 0 for non-seqno */
2333         if (dev_priv->next_seqno == 0) {
2334                 int ret = i915_gem_init_seqno(dev, 0);
2335                 if (ret)
2336                         return ret;
2337
2338                 dev_priv->next_seqno = 1;
2339         }
2340
2341         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2342         return 0;
2343 }
2344
2345 int __i915_add_request(struct intel_engine_cs *ring,
2346                        struct drm_file *file,
2347                        struct drm_i915_gem_object *obj,
2348                        u32 *out_seqno)
2349 {
2350         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2351         struct drm_i915_gem_request *request;
2352         struct intel_ringbuffer *ringbuf;
2353         u32 request_ring_position, request_start;
2354         int ret;
2355
2356         request = ring->preallocated_lazy_request;
2357         if (WARN_ON(request == NULL))
2358                 return -ENOMEM;
2359
2360         if (i915.enable_execlists) {
2361                 struct intel_context *ctx = request->ctx;
2362                 ringbuf = ctx->engine[ring->id].ringbuf;
2363         } else
2364                 ringbuf = ring->buffer;
2365
2366         request_start = intel_ring_get_tail(ringbuf);
2367         /*
2368          * Emit any outstanding flushes - execbuf can fail to emit the flush
2369          * after having emitted the batchbuffer command. Hence we need to fix
2370          * things up similar to emitting the lazy request. The difference here
2371          * is that the flush _must_ happen before the next request, no matter
2372          * what.
2373          */
2374         if (i915.enable_execlists) {
2375                 ret = logical_ring_flush_all_caches(ringbuf);
2376                 if (ret)
2377                         return ret;
2378         } else {
2379                 ret = intel_ring_flush_all_caches(ring);
2380                 if (ret)
2381                         return ret;
2382         }
2383
2384         /* Record the position of the start of the request so that
2385          * should we detect the updated seqno part-way through the
2386          * GPU processing the request, we never over-estimate the
2387          * position of the head.
2388          */
2389         request_ring_position = intel_ring_get_tail(ringbuf);
2390
2391         if (i915.enable_execlists) {
2392                 ret = ring->emit_request(ringbuf);
2393                 if (ret)
2394                         return ret;
2395         } else {
2396                 ret = ring->add_request(ring);
2397                 if (ret)
2398                         return ret;
2399         }
2400
2401         request->seqno = intel_ring_get_seqno(ring);
2402         request->ring = ring;
2403         request->head = request_start;
2404         request->tail = request_ring_position;
2405
2406         /* Whilst this request exists, batch_obj will be on the
2407          * active_list, and so will hold the active reference. Only when this
2408          * request is retired will the the batch_obj be moved onto the
2409          * inactive_list and lose its active reference. Hence we do not need
2410          * to explicitly hold another reference here.
2411          */
2412         request->batch_obj = obj;
2413
2414         if (!i915.enable_execlists) {
2415                 /* Hold a reference to the current context so that we can inspect
2416                  * it later in case a hangcheck error event fires.
2417                  */
2418                 request->ctx = ring->last_context;
2419                 if (request->ctx)
2420                         i915_gem_context_reference(request->ctx);
2421         }
2422
2423         request->emitted_jiffies = jiffies;
2424         list_add_tail(&request->list, &ring->request_list);
2425         request->file_priv = NULL;
2426
2427         if (file) {
2428                 struct drm_i915_file_private *file_priv = file->driver_priv;
2429
2430                 spin_lock(&file_priv->mm.lock);
2431                 request->file_priv = file_priv;
2432                 list_add_tail(&request->client_list,
2433                               &file_priv->mm.request_list);
2434                 spin_unlock(&file_priv->mm.lock);
2435         }
2436
2437         trace_i915_gem_request_add(ring, request->seqno);
2438         ring->outstanding_lazy_seqno = 0;
2439         ring->preallocated_lazy_request = NULL;
2440
2441         if (!dev_priv->ums.mm_suspended) {
2442                 i915_queue_hangcheck(ring->dev);
2443
2444                 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2445                 queue_delayed_work(dev_priv->wq,
2446                                    &dev_priv->mm.retire_work,
2447                                    round_jiffies_up_relative(HZ));
2448                 intel_mark_busy(dev_priv->dev);
2449         }
2450
2451         if (out_seqno)
2452                 *out_seqno = request->seqno;
2453         return 0;
2454 }
2455
2456 static inline void
2457 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2458 {
2459         struct drm_i915_file_private *file_priv = request->file_priv;
2460
2461         if (!file_priv)
2462                 return;
2463
2464         spin_lock(&file_priv->mm.lock);
2465         list_del(&request->client_list);
2466         request->file_priv = NULL;
2467         spin_unlock(&file_priv->mm.lock);
2468 }
2469
2470 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2471                                    const struct intel_context *ctx)
2472 {
2473         unsigned long elapsed;
2474
2475         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2476
2477         if (ctx->hang_stats.banned)
2478                 return true;
2479
2480         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2481                 if (!i915_gem_context_is_default(ctx)) {
2482                         DRM_DEBUG("context hanging too fast, banning!\n");
2483                         return true;
2484                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2485                         if (i915_stop_ring_allow_warn(dev_priv))
2486                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2487                         return true;
2488                 }
2489         }
2490
2491         return false;
2492 }
2493
2494 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2495                                   struct intel_context *ctx,
2496                                   const bool guilty)
2497 {
2498         struct i915_ctx_hang_stats *hs;
2499
2500         if (WARN_ON(!ctx))
2501                 return;
2502
2503         hs = &ctx->hang_stats;
2504
2505         if (guilty) {
2506                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2507                 hs->batch_active++;
2508                 hs->guilty_ts = get_seconds();
2509         } else {
2510                 hs->batch_pending++;
2511         }
2512 }
2513
2514 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2515 {
2516         list_del(&request->list);
2517         i915_gem_request_remove_from_client(request);
2518
2519         if (request->ctx)
2520                 i915_gem_context_unreference(request->ctx);
2521
2522         kfree(request);
2523 }
2524
2525 struct drm_i915_gem_request *
2526 i915_gem_find_active_request(struct intel_engine_cs *ring)
2527 {
2528         struct drm_i915_gem_request *request;
2529         u32 completed_seqno;
2530
2531         completed_seqno = ring->get_seqno(ring, false);
2532
2533         list_for_each_entry(request, &ring->request_list, list) {
2534                 if (i915_seqno_passed(completed_seqno, request->seqno))
2535                         continue;
2536
2537                 return request;
2538         }
2539
2540         return NULL;
2541 }
2542
2543 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2544                                        struct intel_engine_cs *ring)
2545 {
2546         struct drm_i915_gem_request *request;
2547         bool ring_hung;
2548
2549         request = i915_gem_find_active_request(ring);
2550
2551         if (request == NULL)
2552                 return;
2553
2554         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2555
2556         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2557
2558         list_for_each_entry_continue(request, &ring->request_list, list)
2559                 i915_set_reset_status(dev_priv, request->ctx, false);
2560 }
2561
2562 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2563                                         struct intel_engine_cs *ring)
2564 {
2565         while (!list_empty(&ring->active_list)) {
2566                 struct drm_i915_gem_object *obj;
2567
2568                 obj = list_first_entry(&ring->active_list,
2569                                        struct drm_i915_gem_object,
2570                                        ring_list);
2571
2572                 i915_gem_object_move_to_inactive(obj);
2573         }
2574
2575         /*
2576          * We must free the requests after all the corresponding objects have
2577          * been moved off active lists. Which is the same order as the normal
2578          * retire_requests function does. This is important if object hold
2579          * implicit references on things like e.g. ppgtt address spaces through
2580          * the request.
2581          */
2582         while (!list_empty(&ring->request_list)) {
2583                 struct drm_i915_gem_request *request;
2584
2585                 request = list_first_entry(&ring->request_list,
2586                                            struct drm_i915_gem_request,
2587                                            list);
2588
2589                 i915_gem_free_request(request);
2590         }
2591
2592         while (!list_empty(&ring->execlist_queue)) {
2593                 struct intel_ctx_submit_request *submit_req;
2594
2595                 submit_req = list_first_entry(&ring->execlist_queue,
2596                                 struct intel_ctx_submit_request,
2597                                 execlist_link);
2598                 list_del(&submit_req->execlist_link);
2599                 intel_runtime_pm_put(dev_priv);
2600                 i915_gem_context_unreference(submit_req->ctx);
2601                 kfree(submit_req);
2602         }
2603
2604         /* These may not have been flush before the reset, do so now */
2605         kfree(ring->preallocated_lazy_request);
2606         ring->preallocated_lazy_request = NULL;
2607         ring->outstanding_lazy_seqno = 0;
2608 }
2609
2610 void i915_gem_restore_fences(struct drm_device *dev)
2611 {
2612         struct drm_i915_private *dev_priv = dev->dev_private;
2613         int i;
2614
2615         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2616                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2617
2618                 /*
2619                  * Commit delayed tiling changes if we have an object still
2620                  * attached to the fence, otherwise just clear the fence.
2621                  */
2622                 if (reg->obj) {
2623                         i915_gem_object_update_fence(reg->obj, reg,
2624                                                      reg->obj->tiling_mode);
2625                 } else {
2626                         i915_gem_write_fence(dev, i, NULL);
2627                 }
2628         }
2629 }
2630
2631 void i915_gem_reset(struct drm_device *dev)
2632 {
2633         struct drm_i915_private *dev_priv = dev->dev_private;
2634         struct intel_engine_cs *ring;
2635         int i;
2636
2637         /*
2638          * Before we free the objects from the requests, we need to inspect
2639          * them for finding the guilty party. As the requests only borrow
2640          * their reference to the objects, the inspection must be done first.
2641          */
2642         for_each_ring(ring, dev_priv, i)
2643                 i915_gem_reset_ring_status(dev_priv, ring);
2644
2645         for_each_ring(ring, dev_priv, i)
2646                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2647
2648         i915_gem_context_reset(dev);
2649
2650         i915_gem_restore_fences(dev);
2651 }
2652
2653 /**
2654  * This function clears the request list as sequence numbers are passed.
2655  */
2656 void
2657 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2658 {
2659         uint32_t seqno;
2660
2661         if (list_empty(&ring->request_list))
2662                 return;
2663
2664         WARN_ON(i915_verify_lists(ring->dev));
2665
2666         seqno = ring->get_seqno(ring, true);
2667
2668         /* Move any buffers on the active list that are no longer referenced
2669          * by the ringbuffer to the flushing/inactive lists as appropriate,
2670          * before we free the context associated with the requests.
2671          */
2672         while (!list_empty(&ring->active_list)) {
2673                 struct drm_i915_gem_object *obj;
2674
2675                 obj = list_first_entry(&ring->active_list,
2676                                       struct drm_i915_gem_object,
2677                                       ring_list);
2678
2679                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2680                         break;
2681
2682                 i915_gem_object_move_to_inactive(obj);
2683         }
2684
2685
2686         while (!list_empty(&ring->request_list)) {
2687                 struct drm_i915_gem_request *request;
2688                 struct intel_ringbuffer *ringbuf;
2689
2690                 request = list_first_entry(&ring->request_list,
2691                                            struct drm_i915_gem_request,
2692                                            list);
2693
2694                 if (!i915_seqno_passed(seqno, request->seqno))
2695                         break;
2696
2697                 trace_i915_gem_request_retire(ring, request->seqno);
2698
2699                 /* This is one of the few common intersection points
2700                  * between legacy ringbuffer submission and execlists:
2701                  * we need to tell them apart in order to find the correct
2702                  * ringbuffer to which the request belongs to.
2703                  */
2704                 if (i915.enable_execlists) {
2705                         struct intel_context *ctx = request->ctx;
2706                         ringbuf = ctx->engine[ring->id].ringbuf;
2707                 } else
2708                         ringbuf = ring->buffer;
2709
2710                 /* We know the GPU must have read the request to have
2711                  * sent us the seqno + interrupt, so use the position
2712                  * of tail of the request to update the last known position
2713                  * of the GPU head.
2714                  */
2715                 ringbuf->last_retired_head = request->tail;
2716
2717                 i915_gem_free_request(request);
2718         }
2719
2720         if (unlikely(ring->trace_irq_seqno &&
2721                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2722                 ring->irq_put(ring);
2723                 ring->trace_irq_seqno = 0;
2724         }
2725
2726         WARN_ON(i915_verify_lists(ring->dev));
2727 }
2728
2729 bool
2730 i915_gem_retire_requests(struct drm_device *dev)
2731 {
2732         struct drm_i915_private *dev_priv = dev->dev_private;
2733         struct intel_engine_cs *ring;
2734         bool idle = true;
2735         int i;
2736
2737         for_each_ring(ring, dev_priv, i) {
2738                 i915_gem_retire_requests_ring(ring);
2739                 idle &= list_empty(&ring->request_list);
2740         }
2741
2742         if (idle)
2743                 mod_delayed_work(dev_priv->wq,
2744                                    &dev_priv->mm.idle_work,
2745                                    msecs_to_jiffies(100));
2746
2747         return idle;
2748 }
2749
2750 static void
2751 i915_gem_retire_work_handler(struct work_struct *work)
2752 {
2753         struct drm_i915_private *dev_priv =
2754                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2755         struct drm_device *dev = dev_priv->dev;
2756         bool idle;
2757
2758         /* Come back later if the device is busy... */
2759         idle = false;
2760         if (mutex_trylock(&dev->struct_mutex)) {
2761                 idle = i915_gem_retire_requests(dev);
2762                 mutex_unlock(&dev->struct_mutex);
2763         }
2764         if (!idle)
2765                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2766                                    round_jiffies_up_relative(HZ));
2767 }
2768
2769 static void
2770 i915_gem_idle_work_handler(struct work_struct *work)
2771 {
2772         struct drm_i915_private *dev_priv =
2773                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2774
2775         intel_mark_idle(dev_priv->dev);
2776 }
2777
2778 /**
2779  * Ensures that an object will eventually get non-busy by flushing any required
2780  * write domains, emitting any outstanding lazy request and retiring and
2781  * completed requests.
2782  */
2783 static int
2784 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2785 {
2786         int ret;
2787
2788         if (obj->active) {
2789                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2790                 if (ret)
2791                         return ret;
2792
2793                 i915_gem_retire_requests_ring(obj->ring);
2794         }
2795
2796         return 0;
2797 }
2798
2799 /**
2800  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2801  * @DRM_IOCTL_ARGS: standard ioctl arguments
2802  *
2803  * Returns 0 if successful, else an error is returned with the remaining time in
2804  * the timeout parameter.
2805  *  -ETIME: object is still busy after timeout
2806  *  -ERESTARTSYS: signal interrupted the wait
2807  *  -ENONENT: object doesn't exist
2808  * Also possible, but rare:
2809  *  -EAGAIN: GPU wedged
2810  *  -ENOMEM: damn
2811  *  -ENODEV: Internal IRQ fail
2812  *  -E?: The add request failed
2813  *
2814  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2815  * non-zero timeout parameter the wait ioctl will wait for the given number of
2816  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2817  * without holding struct_mutex the object may become re-busied before this
2818  * function completes. A similar but shorter * race condition exists in the busy
2819  * ioctl
2820  */
2821 int
2822 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2823 {
2824         struct drm_i915_private *dev_priv = dev->dev_private;
2825         struct drm_i915_gem_wait *args = data;
2826         struct drm_i915_gem_object *obj;
2827         struct intel_engine_cs *ring = NULL;
2828         unsigned reset_counter;
2829         u32 seqno = 0;
2830         int ret = 0;
2831
2832         if (args->flags != 0)
2833                 return -EINVAL;
2834
2835         ret = i915_mutex_lock_interruptible(dev);
2836         if (ret)
2837                 return ret;
2838
2839         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2840         if (&obj->base == NULL) {
2841                 mutex_unlock(&dev->struct_mutex);
2842                 return -ENOENT;
2843         }
2844
2845         /* Need to make sure the object gets inactive eventually. */
2846         ret = i915_gem_object_flush_active(obj);
2847         if (ret)
2848                 goto out;
2849
2850         if (obj->active) {
2851                 seqno = obj->last_read_seqno;
2852                 ring = obj->ring;
2853         }
2854
2855         if (seqno == 0)
2856                  goto out;
2857
2858         /* Do this after OLR check to make sure we make forward progress polling
2859          * on this IOCTL with a timeout <=0 (like busy ioctl)
2860          */
2861         if (args->timeout_ns <= 0) {
2862                 ret = -ETIME;
2863                 goto out;
2864         }
2865
2866         drm_gem_object_unreference(&obj->base);
2867         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2868         mutex_unlock(&dev->struct_mutex);
2869
2870         return __i915_wait_seqno(ring, seqno, reset_counter, true,
2871                                  &args->timeout_ns, file->driver_priv);
2872
2873 out:
2874         drm_gem_object_unreference(&obj->base);
2875         mutex_unlock(&dev->struct_mutex);
2876         return ret;
2877 }
2878
2879 /**
2880  * i915_gem_object_sync - sync an object to a ring.
2881  *
2882  * @obj: object which may be in use on another ring.
2883  * @to: ring we wish to use the object on. May be NULL.
2884  *
2885  * This code is meant to abstract object synchronization with the GPU.
2886  * Calling with NULL implies synchronizing the object with the CPU
2887  * rather than a particular GPU ring.
2888  *
2889  * Returns 0 if successful, else propagates up the lower layer error.
2890  */
2891 int
2892 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2893                      struct intel_engine_cs *to)
2894 {
2895         struct intel_engine_cs *from = obj->ring;
2896         u32 seqno;
2897         int ret, idx;
2898
2899         if (from == NULL || to == from)
2900                 return 0;
2901
2902         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2903                 return i915_gem_object_wait_rendering(obj, false);
2904
2905         idx = intel_ring_sync_index(from, to);
2906
2907         seqno = obj->last_read_seqno;
2908         /* Optimization: Avoid semaphore sync when we are sure we already
2909          * waited for an object with higher seqno */
2910         if (seqno <= from->semaphore.sync_seqno[idx])
2911                 return 0;
2912
2913         ret = i915_gem_check_olr(obj->ring, seqno);
2914         if (ret)
2915                 return ret;
2916
2917         trace_i915_gem_ring_sync_to(from, to, seqno);
2918         ret = to->semaphore.sync_to(to, from, seqno);
2919         if (!ret)
2920                 /* We use last_read_seqno because sync_to()
2921                  * might have just caused seqno wrap under
2922                  * the radar.
2923                  */
2924                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2925
2926         return ret;
2927 }
2928
2929 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2930 {
2931         u32 old_write_domain, old_read_domains;
2932
2933         /* Force a pagefault for domain tracking on next user access */
2934         i915_gem_release_mmap(obj);
2935
2936         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2937                 return;
2938
2939         /* Wait for any direct GTT access to complete */
2940         mb();
2941
2942         old_read_domains = obj->base.read_domains;
2943         old_write_domain = obj->base.write_domain;
2944
2945         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2946         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2947
2948         trace_i915_gem_object_change_domain(obj,
2949                                             old_read_domains,
2950                                             old_write_domain);
2951 }
2952
2953 int i915_vma_unbind(struct i915_vma *vma)
2954 {
2955         struct drm_i915_gem_object *obj = vma->obj;
2956         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2957         int ret;
2958
2959         if (list_empty(&vma->vma_link))
2960                 return 0;
2961
2962         if (!drm_mm_node_allocated(&vma->node)) {
2963                 i915_gem_vma_destroy(vma);
2964                 return 0;
2965         }
2966
2967         if (vma->pin_count)
2968                 return -EBUSY;
2969
2970         BUG_ON(obj->pages == NULL);
2971
2972         ret = i915_gem_object_finish_gpu(obj);
2973         if (ret)
2974                 return ret;
2975         /* Continue on if we fail due to EIO, the GPU is hung so we
2976          * should be safe and we need to cleanup or else we might
2977          * cause memory corruption through use-after-free.
2978          */
2979
2980         /* Throw away the active reference before moving to the unbound list */
2981         i915_gem_object_retire(obj);
2982
2983         if (i915_is_ggtt(vma->vm)) {
2984                 i915_gem_object_finish_gtt(obj);
2985
2986                 /* release the fence reg _after_ flushing */
2987                 ret = i915_gem_object_put_fence(obj);
2988                 if (ret)
2989                         return ret;
2990         }
2991
2992         trace_i915_vma_unbind(vma);
2993
2994         vma->unbind_vma(vma);
2995
2996         list_del_init(&vma->mm_list);
2997         if (i915_is_ggtt(vma->vm))
2998                 obj->map_and_fenceable = false;
2999
3000         drm_mm_remove_node(&vma->node);
3001         i915_gem_vma_destroy(vma);
3002
3003         /* Since the unbound list is global, only move to that list if
3004          * no more VMAs exist. */
3005         if (list_empty(&obj->vma_list)) {
3006                 i915_gem_gtt_finish_object(obj);
3007                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3008         }
3009
3010         /* And finally now the object is completely decoupled from this vma,
3011          * we can drop its hold on the backing storage and allow it to be
3012          * reaped by the shrinker.
3013          */
3014         i915_gem_object_unpin_pages(obj);
3015
3016         return 0;
3017 }
3018
3019 int i915_gpu_idle(struct drm_device *dev)
3020 {
3021         struct drm_i915_private *dev_priv = dev->dev_private;
3022         struct intel_engine_cs *ring;
3023         int ret, i;
3024
3025         /* Flush everything onto the inactive list. */
3026         for_each_ring(ring, dev_priv, i) {
3027                 if (!i915.enable_execlists) {
3028                         ret = i915_switch_context(ring, ring->default_context);
3029                         if (ret)
3030                                 return ret;
3031                 }
3032
3033                 ret = intel_ring_idle(ring);
3034                 if (ret)
3035                         return ret;
3036         }
3037
3038         return 0;
3039 }
3040
3041 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3042                                  struct drm_i915_gem_object *obj)
3043 {
3044         struct drm_i915_private *dev_priv = dev->dev_private;
3045         int fence_reg;
3046         int fence_pitch_shift;
3047
3048         if (INTEL_INFO(dev)->gen >= 6) {
3049                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3050                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3051         } else {
3052                 fence_reg = FENCE_REG_965_0;
3053                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3054         }
3055
3056         fence_reg += reg * 8;
3057
3058         /* To w/a incoherency with non-atomic 64-bit register updates,
3059          * we split the 64-bit update into two 32-bit writes. In order
3060          * for a partial fence not to be evaluated between writes, we
3061          * precede the update with write to turn off the fence register,
3062          * and only enable the fence as the last step.
3063          *
3064          * For extra levels of paranoia, we make sure each step lands
3065          * before applying the next step.
3066          */
3067         I915_WRITE(fence_reg, 0);
3068         POSTING_READ(fence_reg);
3069
3070         if (obj) {
3071                 u32 size = i915_gem_obj_ggtt_size(obj);
3072                 uint64_t val;
3073
3074                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3075                                  0xfffff000) << 32;
3076                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3077                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3078                 if (obj->tiling_mode == I915_TILING_Y)
3079                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3080                 val |= I965_FENCE_REG_VALID;
3081
3082                 I915_WRITE(fence_reg + 4, val >> 32);
3083                 POSTING_READ(fence_reg + 4);
3084
3085                 I915_WRITE(fence_reg + 0, val);
3086                 POSTING_READ(fence_reg);
3087         } else {
3088                 I915_WRITE(fence_reg + 4, 0);
3089                 POSTING_READ(fence_reg + 4);
3090         }
3091 }
3092
3093 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3094                                  struct drm_i915_gem_object *obj)
3095 {
3096         struct drm_i915_private *dev_priv = dev->dev_private;
3097         u32 val;
3098
3099         if (obj) {
3100                 u32 size = i915_gem_obj_ggtt_size(obj);
3101                 int pitch_val;
3102                 int tile_width;
3103
3104                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3105                      (size & -size) != size ||
3106                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3107                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3108                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3109
3110                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3111                         tile_width = 128;
3112                 else
3113                         tile_width = 512;
3114
3115                 /* Note: pitch better be a power of two tile widths */
3116                 pitch_val = obj->stride / tile_width;
3117                 pitch_val = ffs(pitch_val) - 1;
3118
3119                 val = i915_gem_obj_ggtt_offset(obj);
3120                 if (obj->tiling_mode == I915_TILING_Y)
3121                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3122                 val |= I915_FENCE_SIZE_BITS(size);
3123                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3124                 val |= I830_FENCE_REG_VALID;
3125         } else
3126                 val = 0;
3127
3128         if (reg < 8)
3129                 reg = FENCE_REG_830_0 + reg * 4;
3130         else
3131                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3132
3133         I915_WRITE(reg, val);
3134         POSTING_READ(reg);
3135 }
3136
3137 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3138                                 struct drm_i915_gem_object *obj)
3139 {
3140         struct drm_i915_private *dev_priv = dev->dev_private;
3141         uint32_t val;
3142
3143         if (obj) {
3144                 u32 size = i915_gem_obj_ggtt_size(obj);
3145                 uint32_t pitch_val;
3146
3147                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3148                      (size & -size) != size ||
3149                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3150                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3151                      i915_gem_obj_ggtt_offset(obj), size);
3152
3153                 pitch_val = obj->stride / 128;
3154                 pitch_val = ffs(pitch_val) - 1;
3155
3156                 val = i915_gem_obj_ggtt_offset(obj);
3157                 if (obj->tiling_mode == I915_TILING_Y)
3158                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3159                 val |= I830_FENCE_SIZE_BITS(size);
3160                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3161                 val |= I830_FENCE_REG_VALID;
3162         } else
3163                 val = 0;
3164
3165         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3166         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3167 }
3168
3169 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3170 {
3171         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3172 }
3173
3174 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3175                                  struct drm_i915_gem_object *obj)
3176 {
3177         struct drm_i915_private *dev_priv = dev->dev_private;
3178
3179         /* Ensure that all CPU reads are completed before installing a fence
3180          * and all writes before removing the fence.
3181          */
3182         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3183                 mb();
3184
3185         WARN(obj && (!obj->stride || !obj->tiling_mode),
3186              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3187              obj->stride, obj->tiling_mode);
3188
3189         switch (INTEL_INFO(dev)->gen) {
3190         case 9:
3191         case 8:
3192         case 7:
3193         case 6:
3194         case 5:
3195         case 4: i965_write_fence_reg(dev, reg, obj); break;
3196         case 3: i915_write_fence_reg(dev, reg, obj); break;
3197         case 2: i830_write_fence_reg(dev, reg, obj); break;
3198         default: BUG();
3199         }
3200
3201         /* And similarly be paranoid that no direct access to this region
3202          * is reordered to before the fence is installed.
3203          */
3204         if (i915_gem_object_needs_mb(obj))
3205                 mb();
3206 }
3207
3208 static inline int fence_number(struct drm_i915_private *dev_priv,
3209                                struct drm_i915_fence_reg *fence)
3210 {
3211         return fence - dev_priv->fence_regs;
3212 }
3213
3214 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3215                                          struct drm_i915_fence_reg *fence,
3216                                          bool enable)
3217 {
3218         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3219         int reg = fence_number(dev_priv, fence);
3220
3221         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3222
3223         if (enable) {
3224                 obj->fence_reg = reg;
3225                 fence->obj = obj;
3226                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3227         } else {
3228                 obj->fence_reg = I915_FENCE_REG_NONE;
3229                 fence->obj = NULL;
3230                 list_del_init(&fence->lru_list);
3231         }
3232         obj->fence_dirty = false;
3233 }
3234
3235 static int
3236 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3237 {
3238         if (obj->last_fenced_seqno) {
3239                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3240                 if (ret)
3241                         return ret;
3242
3243                 obj->last_fenced_seqno = 0;
3244         }
3245
3246         return 0;
3247 }
3248
3249 int
3250 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3251 {
3252         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3253         struct drm_i915_fence_reg *fence;
3254         int ret;
3255
3256         ret = i915_gem_object_wait_fence(obj);
3257         if (ret)
3258                 return ret;
3259
3260         if (obj->fence_reg == I915_FENCE_REG_NONE)
3261                 return 0;
3262
3263         fence = &dev_priv->fence_regs[obj->fence_reg];
3264
3265         if (WARN_ON(fence->pin_count))
3266                 return -EBUSY;
3267
3268         i915_gem_object_fence_lost(obj);
3269         i915_gem_object_update_fence(obj, fence, false);
3270
3271         return 0;
3272 }
3273
3274 static struct drm_i915_fence_reg *
3275 i915_find_fence_reg(struct drm_device *dev)
3276 {
3277         struct drm_i915_private *dev_priv = dev->dev_private;
3278         struct drm_i915_fence_reg *reg, *avail;
3279         int i;
3280
3281         /* First try to find a free reg */
3282         avail = NULL;
3283         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3284                 reg = &dev_priv->fence_regs[i];
3285                 if (!reg->obj)
3286                         return reg;
3287
3288                 if (!reg->pin_count)
3289                         avail = reg;
3290         }
3291
3292         if (avail == NULL)
3293                 goto deadlock;
3294
3295         /* None available, try to steal one or wait for a user to finish */
3296         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3297                 if (reg->pin_count)
3298                         continue;
3299
3300                 return reg;
3301         }
3302
3303 deadlock:
3304         /* Wait for completion of pending flips which consume fences */
3305         if (intel_has_pending_fb_unpin(dev))
3306                 return ERR_PTR(-EAGAIN);
3307
3308         return ERR_PTR(-EDEADLK);
3309 }
3310
3311 /**
3312  * i915_gem_object_get_fence - set up fencing for an object
3313  * @obj: object to map through a fence reg
3314  *
3315  * When mapping objects through the GTT, userspace wants to be able to write
3316  * to them without having to worry about swizzling if the object is tiled.
3317  * This function walks the fence regs looking for a free one for @obj,
3318  * stealing one if it can't find any.
3319  *
3320  * It then sets up the reg based on the object's properties: address, pitch
3321  * and tiling format.
3322  *
3323  * For an untiled surface, this removes any existing fence.
3324  */
3325 int
3326 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3327 {
3328         struct drm_device *dev = obj->base.dev;
3329         struct drm_i915_private *dev_priv = dev->dev_private;
3330         bool enable = obj->tiling_mode != I915_TILING_NONE;
3331         struct drm_i915_fence_reg *reg;
3332         int ret;
3333
3334         /* Have we updated the tiling parameters upon the object and so
3335          * will need to serialise the write to the associated fence register?
3336          */
3337         if (obj->fence_dirty) {
3338                 ret = i915_gem_object_wait_fence(obj);
3339                 if (ret)
3340                         return ret;
3341         }
3342
3343         /* Just update our place in the LRU if our fence is getting reused. */
3344         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3345                 reg = &dev_priv->fence_regs[obj->fence_reg];
3346                 if (!obj->fence_dirty) {
3347                         list_move_tail(&reg->lru_list,
3348                                        &dev_priv->mm.fence_list);
3349                         return 0;
3350                 }
3351         } else if (enable) {
3352                 if (WARN_ON(!obj->map_and_fenceable))
3353                         return -EINVAL;
3354
3355                 reg = i915_find_fence_reg(dev);
3356                 if (IS_ERR(reg))
3357                         return PTR_ERR(reg);
3358
3359                 if (reg->obj) {
3360                         struct drm_i915_gem_object *old = reg->obj;
3361
3362                         ret = i915_gem_object_wait_fence(old);
3363                         if (ret)
3364                                 return ret;
3365
3366                         i915_gem_object_fence_lost(old);
3367                 }
3368         } else
3369                 return 0;
3370
3371         i915_gem_object_update_fence(obj, reg, enable);
3372
3373         return 0;
3374 }
3375
3376 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3377                                      unsigned long cache_level)
3378 {
3379         struct drm_mm_node *gtt_space = &vma->node;
3380         struct drm_mm_node *other;
3381
3382         /*
3383          * On some machines we have to be careful when putting differing types
3384          * of snoopable memory together to avoid the prefetcher crossing memory
3385          * domains and dying. During vm initialisation, we decide whether or not
3386          * these constraints apply and set the drm_mm.color_adjust
3387          * appropriately.
3388          */
3389         if (vma->vm->mm.color_adjust == NULL)
3390                 return true;
3391
3392         if (!drm_mm_node_allocated(gtt_space))
3393                 return true;
3394
3395         if (list_empty(&gtt_space->node_list))
3396                 return true;
3397
3398         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3399         if (other->allocated && !other->hole_follows && other->color != cache_level)
3400                 return false;
3401
3402         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3403         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3404                 return false;
3405
3406         return true;
3407 }
3408
3409 /**
3410  * Finds free space in the GTT aperture and binds the object there.
3411  */
3412 static struct i915_vma *
3413 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3414                            struct i915_address_space *vm,
3415                            unsigned alignment,
3416                            uint64_t flags)
3417 {
3418         struct drm_device *dev = obj->base.dev;
3419         struct drm_i915_private *dev_priv = dev->dev_private;
3420         u32 size, fence_size, fence_alignment, unfenced_alignment;
3421         unsigned long start =
3422                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3423         unsigned long end =
3424                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3425         struct i915_vma *vma;
3426         int ret;
3427
3428         fence_size = i915_gem_get_gtt_size(dev,
3429                                            obj->base.size,
3430                                            obj->tiling_mode);
3431         fence_alignment = i915_gem_get_gtt_alignment(dev,
3432                                                      obj->base.size,
3433                                                      obj->tiling_mode, true);
3434         unfenced_alignment =
3435                 i915_gem_get_gtt_alignment(dev,
3436                                            obj->base.size,
3437                                            obj->tiling_mode, false);
3438
3439         if (alignment == 0)
3440                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3441                                                 unfenced_alignment;
3442         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3443                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3444                 return ERR_PTR(-EINVAL);
3445         }
3446
3447         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3448
3449         /* If the object is bigger than the entire aperture, reject it early
3450          * before evicting everything in a vain attempt to find space.
3451          */
3452         if (obj->base.size > end) {
3453                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3454                           obj->base.size,
3455                           flags & PIN_MAPPABLE ? "mappable" : "total",
3456                           end);
3457                 return ERR_PTR(-E2BIG);
3458         }
3459
3460         ret = i915_gem_object_get_pages(obj);
3461         if (ret)
3462                 return ERR_PTR(ret);
3463
3464         i915_gem_object_pin_pages(obj);
3465
3466         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3467         if (IS_ERR(vma))
3468                 goto err_unpin;
3469
3470 search_free:
3471         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3472                                                   size, alignment,
3473                                                   obj->cache_level,
3474                                                   start, end,
3475                                                   DRM_MM_SEARCH_DEFAULT,
3476                                                   DRM_MM_CREATE_DEFAULT);
3477         if (ret) {
3478                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3479                                                obj->cache_level,
3480                                                start, end,
3481                                                flags);
3482                 if (ret == 0)
3483                         goto search_free;
3484
3485                 goto err_free_vma;
3486         }
3487         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3488                 ret = -EINVAL;
3489                 goto err_remove_node;
3490         }
3491
3492         ret = i915_gem_gtt_prepare_object(obj);
3493         if (ret)
3494                 goto err_remove_node;
3495
3496         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3497         list_add_tail(&vma->mm_list, &vm->inactive_list);
3498
3499         trace_i915_vma_bind(vma, flags);
3500         vma->bind_vma(vma, obj->cache_level,
3501                       flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3502
3503         return vma;
3504
3505 err_remove_node:
3506         drm_mm_remove_node(&vma->node);
3507 err_free_vma:
3508         i915_gem_vma_destroy(vma);
3509         vma = ERR_PTR(ret);
3510 err_unpin:
3511         i915_gem_object_unpin_pages(obj);
3512         return vma;
3513 }
3514
3515 bool
3516 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3517                         bool force)
3518 {
3519         /* If we don't have a page list set up, then we're not pinned
3520          * to GPU, and we can ignore the cache flush because it'll happen
3521          * again at bind time.
3522          */
3523         if (obj->pages == NULL)
3524                 return false;
3525
3526         /*
3527          * Stolen memory is always coherent with the GPU as it is explicitly
3528          * marked as wc by the system, or the system is cache-coherent.
3529          */
3530         if (obj->stolen)
3531                 return false;
3532
3533         /* If the GPU is snooping the contents of the CPU cache,
3534          * we do not need to manually clear the CPU cache lines.  However,
3535          * the caches are only snooped when the render cache is
3536          * flushed/invalidated.  As we always have to emit invalidations
3537          * and flushes when moving into and out of the RENDER domain, correct
3538          * snooping behaviour occurs naturally as the result of our domain
3539          * tracking.
3540          */
3541         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3542                 return false;
3543
3544         trace_i915_gem_object_clflush(obj);
3545         drm_clflush_sg(obj->pages);
3546
3547         return true;
3548 }
3549
3550 /** Flushes the GTT write domain for the object if it's dirty. */
3551 static void
3552 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3553 {
3554         uint32_t old_write_domain;
3555
3556         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3557                 return;
3558
3559         /* No actual flushing is required for the GTT write domain.  Writes
3560          * to it immediately go to main memory as far as we know, so there's
3561          * no chipset flush.  It also doesn't land in render cache.
3562          *
3563          * However, we do have to enforce the order so that all writes through
3564          * the GTT land before any writes to the device, such as updates to
3565          * the GATT itself.
3566          */
3567         wmb();
3568
3569         old_write_domain = obj->base.write_domain;
3570         obj->base.write_domain = 0;
3571
3572         intel_fb_obj_flush(obj, false);
3573
3574         trace_i915_gem_object_change_domain(obj,
3575                                             obj->base.read_domains,
3576                                             old_write_domain);
3577 }
3578
3579 /** Flushes the CPU write domain for the object if it's dirty. */
3580 static void
3581 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3582                                        bool force)
3583 {
3584         uint32_t old_write_domain;
3585
3586         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3587                 return;
3588
3589         if (i915_gem_clflush_object(obj, force))
3590                 i915_gem_chipset_flush(obj->base.dev);
3591
3592         old_write_domain = obj->base.write_domain;
3593         obj->base.write_domain = 0;
3594
3595         intel_fb_obj_flush(obj, false);
3596
3597         trace_i915_gem_object_change_domain(obj,
3598                                             obj->base.read_domains,
3599                                             old_write_domain);
3600 }
3601
3602 /**
3603  * Moves a single object to the GTT read, and possibly write domain.
3604  *
3605  * This function returns when the move is complete, including waiting on
3606  * flushes to occur.
3607  */
3608 int
3609 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3610 {
3611         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3612         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3613         uint32_t old_write_domain, old_read_domains;
3614         int ret;
3615
3616         /* Not valid to be called on unbound objects. */
3617         if (vma == NULL)
3618                 return -EINVAL;
3619
3620         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3621                 return 0;
3622
3623         ret = i915_gem_object_wait_rendering(obj, !write);
3624         if (ret)
3625                 return ret;
3626
3627         i915_gem_object_retire(obj);
3628         i915_gem_object_flush_cpu_write_domain(obj, false);
3629
3630         /* Serialise direct access to this object with the barriers for
3631          * coherent writes from the GPU, by effectively invalidating the
3632          * GTT domain upon first access.
3633          */
3634         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3635                 mb();
3636
3637         old_write_domain = obj->base.write_domain;
3638         old_read_domains = obj->base.read_domains;
3639
3640         /* It should now be out of any other write domains, and we can update
3641          * the domain values for our changes.
3642          */
3643         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3644         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3645         if (write) {
3646                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3647                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3648                 obj->dirty = 1;
3649         }
3650
3651         if (write)
3652                 intel_fb_obj_invalidate(obj, NULL);
3653
3654         trace_i915_gem_object_change_domain(obj,
3655                                             old_read_domains,
3656                                             old_write_domain);
3657
3658         /* And bump the LRU for this access */
3659         if (i915_gem_object_is_inactive(obj))
3660                 list_move_tail(&vma->mm_list,
3661                                &dev_priv->gtt.base.inactive_list);
3662
3663         return 0;
3664 }
3665
3666 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3667                                     enum i915_cache_level cache_level)
3668 {
3669         struct drm_device *dev = obj->base.dev;
3670         struct i915_vma *vma, *next;
3671         int ret;
3672
3673         if (obj->cache_level == cache_level)
3674                 return 0;
3675
3676         if (i915_gem_obj_is_pinned(obj)) {
3677                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3678                 return -EBUSY;
3679         }
3680
3681         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3682                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3683                         ret = i915_vma_unbind(vma);
3684                         if (ret)
3685                                 return ret;
3686                 }
3687         }
3688
3689         if (i915_gem_obj_bound_any(obj)) {
3690                 ret = i915_gem_object_finish_gpu(obj);
3691                 if (ret)
3692                         return ret;
3693
3694                 i915_gem_object_finish_gtt(obj);
3695
3696                 /* Before SandyBridge, you could not use tiling or fence
3697                  * registers with snooped memory, so relinquish any fences
3698                  * currently pointing to our region in the aperture.
3699                  */
3700                 if (INTEL_INFO(dev)->gen < 6) {
3701                         ret = i915_gem_object_put_fence(obj);
3702                         if (ret)
3703                                 return ret;
3704                 }
3705
3706                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3707                         if (drm_mm_node_allocated(&vma->node))
3708                                 vma->bind_vma(vma, cache_level,
3709                                                 vma->bound & GLOBAL_BIND);
3710         }
3711
3712         list_for_each_entry(vma, &obj->vma_list, vma_link)
3713                 vma->node.color = cache_level;
3714         obj->cache_level = cache_level;
3715
3716         if (cpu_write_needs_clflush(obj)) {
3717                 u32 old_read_domains, old_write_domain;
3718
3719                 /* If we're coming from LLC cached, then we haven't
3720                  * actually been tracking whether the data is in the
3721                  * CPU cache or not, since we only allow one bit set
3722                  * in obj->write_domain and have been skipping the clflushes.
3723                  * Just set it to the CPU cache for now.
3724                  */
3725                 i915_gem_object_retire(obj);
3726                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3727
3728                 old_read_domains = obj->base.read_domains;
3729                 old_write_domain = obj->base.write_domain;
3730
3731                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3732                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3733
3734                 trace_i915_gem_object_change_domain(obj,
3735                                                     old_read_domains,
3736                                                     old_write_domain);
3737         }
3738
3739         return 0;
3740 }
3741
3742 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3743                                struct drm_file *file)
3744 {
3745         struct drm_i915_gem_caching *args = data;
3746         struct drm_i915_gem_object *obj;
3747         int ret;
3748
3749         ret = i915_mutex_lock_interruptible(dev);
3750         if (ret)
3751                 return ret;
3752
3753         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3754         if (&obj->base == NULL) {
3755                 ret = -ENOENT;
3756                 goto unlock;
3757         }
3758
3759         switch (obj->cache_level) {
3760         case I915_CACHE_LLC:
3761         case I915_CACHE_L3_LLC:
3762                 args->caching = I915_CACHING_CACHED;
3763                 break;
3764
3765         case I915_CACHE_WT:
3766                 args->caching = I915_CACHING_DISPLAY;
3767                 break;
3768
3769         default:
3770                 args->caching = I915_CACHING_NONE;
3771                 break;
3772         }
3773
3774         drm_gem_object_unreference(&obj->base);
3775 unlock:
3776         mutex_unlock(&dev->struct_mutex);
3777         return ret;
3778 }
3779
3780 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3781                                struct drm_file *file)
3782 {
3783         struct drm_i915_gem_caching *args = data;
3784         struct drm_i915_gem_object *obj;
3785         enum i915_cache_level level;
3786         int ret;
3787
3788         switch (args->caching) {
3789         case I915_CACHING_NONE:
3790                 level = I915_CACHE_NONE;
3791                 break;
3792         case I915_CACHING_CACHED:
3793                 level = I915_CACHE_LLC;
3794                 break;
3795         case I915_CACHING_DISPLAY:
3796                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3797                 break;
3798         default:
3799                 return -EINVAL;
3800         }
3801
3802         ret = i915_mutex_lock_interruptible(dev);
3803         if (ret)
3804                 return ret;
3805
3806         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3807         if (&obj->base == NULL) {
3808                 ret = -ENOENT;
3809                 goto unlock;
3810         }
3811
3812         ret = i915_gem_object_set_cache_level(obj, level);
3813
3814         drm_gem_object_unreference(&obj->base);
3815 unlock:
3816         mutex_unlock(&dev->struct_mutex);
3817         return ret;
3818 }
3819
3820 static bool is_pin_display(struct drm_i915_gem_object *obj)
3821 {
3822         struct i915_vma *vma;
3823
3824         vma = i915_gem_obj_to_ggtt(obj);
3825         if (!vma)
3826                 return false;
3827
3828         /* There are 3 sources that pin objects:
3829          *   1. The display engine (scanouts, sprites, cursors);
3830          *   2. Reservations for execbuffer;
3831          *   3. The user.
3832          *
3833          * We can ignore reservations as we hold the struct_mutex and
3834          * are only called outside of the reservation path.  The user
3835          * can only increment pin_count once, and so if after
3836          * subtracting the potential reference by the user, any pin_count
3837          * remains, it must be due to another use by the display engine.
3838          */
3839         return vma->pin_count - !!obj->user_pin_count;
3840 }
3841
3842 /*
3843  * Prepare buffer for display plane (scanout, cursors, etc).
3844  * Can be called from an uninterruptible phase (modesetting) and allows
3845  * any flushes to be pipelined (for pageflips).
3846  */
3847 int
3848 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3849                                      u32 alignment,
3850                                      struct intel_engine_cs *pipelined)
3851 {
3852         u32 old_read_domains, old_write_domain;
3853         bool was_pin_display;
3854         int ret;
3855
3856         if (pipelined != obj->ring) {
3857                 ret = i915_gem_object_sync(obj, pipelined);
3858                 if (ret)
3859                         return ret;
3860         }
3861
3862         /* Mark the pin_display early so that we account for the
3863          * display coherency whilst setting up the cache domains.
3864          */
3865         was_pin_display = obj->pin_display;
3866         obj->pin_display = true;
3867
3868         /* The display engine is not coherent with the LLC cache on gen6.  As
3869          * a result, we make sure that the pinning that is about to occur is
3870          * done with uncached PTEs. This is lowest common denominator for all
3871          * chipsets.
3872          *
3873          * However for gen6+, we could do better by using the GFDT bit instead
3874          * of uncaching, which would allow us to flush all the LLC-cached data
3875          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3876          */
3877         ret = i915_gem_object_set_cache_level(obj,
3878                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3879         if (ret)
3880                 goto err_unpin_display;
3881
3882         /* As the user may map the buffer once pinned in the display plane
3883          * (e.g. libkms for the bootup splash), we have to ensure that we
3884          * always use map_and_fenceable for all scanout buffers.
3885          */
3886         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3887         if (ret)
3888                 goto err_unpin_display;
3889
3890         i915_gem_object_flush_cpu_write_domain(obj, true);
3891
3892         old_write_domain = obj->base.write_domain;
3893         old_read_domains = obj->base.read_domains;
3894
3895         /* It should now be out of any other write domains, and we can update
3896          * the domain values for our changes.
3897          */
3898         obj->base.write_domain = 0;
3899         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3900
3901         trace_i915_gem_object_change_domain(obj,
3902                                             old_read_domains,
3903                                             old_write_domain);
3904
3905         return 0;
3906
3907 err_unpin_display:
3908         WARN_ON(was_pin_display != is_pin_display(obj));
3909         obj->pin_display = was_pin_display;
3910         return ret;
3911 }
3912
3913 void
3914 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3915 {
3916         i915_gem_object_ggtt_unpin(obj);
3917         obj->pin_display = is_pin_display(obj);
3918 }
3919
3920 int
3921 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3922 {
3923         int ret;
3924
3925         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3926                 return 0;
3927
3928         ret = i915_gem_object_wait_rendering(obj, false);
3929         if (ret)
3930                 return ret;
3931
3932         /* Ensure that we invalidate the GPU's caches and TLBs. */
3933         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3934         return 0;
3935 }
3936
3937 /**
3938  * Moves a single object to the CPU read, and possibly write domain.
3939  *
3940  * This function returns when the move is complete, including waiting on
3941  * flushes to occur.
3942  */
3943 int
3944 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3945 {
3946         uint32_t old_write_domain, old_read_domains;
3947         int ret;
3948
3949         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3950                 return 0;
3951
3952         ret = i915_gem_object_wait_rendering(obj, !write);
3953         if (ret)
3954                 return ret;
3955
3956         i915_gem_object_retire(obj);
3957         i915_gem_object_flush_gtt_write_domain(obj);
3958
3959         old_write_domain = obj->base.write_domain;
3960         old_read_domains = obj->base.read_domains;
3961
3962         /* Flush the CPU cache if it's still invalid. */
3963         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3964                 i915_gem_clflush_object(obj, false);
3965
3966                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3967         }
3968
3969         /* It should now be out of any other write domains, and we can update
3970          * the domain values for our changes.
3971          */
3972         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3973
3974         /* If we're writing through the CPU, then the GPU read domains will
3975          * need to be invalidated at next use.
3976          */
3977         if (write) {
3978                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3979                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3980         }
3981
3982         if (write)
3983                 intel_fb_obj_invalidate(obj, NULL);
3984
3985         trace_i915_gem_object_change_domain(obj,
3986                                             old_read_domains,
3987                                             old_write_domain);
3988
3989         return 0;
3990 }
3991
3992 /* Throttle our rendering by waiting until the ring has completed our requests
3993  * emitted over 20 msec ago.
3994  *
3995  * Note that if we were to use the current jiffies each time around the loop,
3996  * we wouldn't escape the function with any frames outstanding if the time to
3997  * render a frame was over 20ms.
3998  *
3999  * This should get us reasonable parallelism between CPU and GPU but also
4000  * relatively low latency when blocking on a particular request to finish.
4001  */
4002 static int
4003 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4004 {
4005         struct drm_i915_private *dev_priv = dev->dev_private;
4006         struct drm_i915_file_private *file_priv = file->driver_priv;
4007         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4008         struct drm_i915_gem_request *request;
4009         struct intel_engine_cs *ring = NULL;
4010         unsigned reset_counter;
4011         u32 seqno = 0;
4012         int ret;
4013
4014         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4015         if (ret)
4016                 return ret;
4017
4018         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4019         if (ret)
4020                 return ret;
4021
4022         spin_lock(&file_priv->mm.lock);
4023         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4024                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4025                         break;
4026
4027                 ring = request->ring;
4028                 seqno = request->seqno;
4029         }
4030         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4031         spin_unlock(&file_priv->mm.lock);
4032
4033         if (seqno == 0)
4034                 return 0;
4035
4036         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4037         if (ret == 0)
4038                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4039
4040         return ret;
4041 }
4042
4043 static bool
4044 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4045 {
4046         struct drm_i915_gem_object *obj = vma->obj;
4047
4048         if (alignment &&
4049             vma->node.start & (alignment - 1))
4050                 return true;
4051
4052         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4053                 return true;
4054
4055         if (flags & PIN_OFFSET_BIAS &&
4056             vma->node.start < (flags & PIN_OFFSET_MASK))
4057                 return true;
4058
4059         return false;
4060 }
4061
4062 int
4063 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4064                     struct i915_address_space *vm,
4065                     uint32_t alignment,
4066                     uint64_t flags)
4067 {
4068         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4069         struct i915_vma *vma;
4070         unsigned bound;
4071         int ret;
4072
4073         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4074                 return -ENODEV;
4075
4076         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4077                 return -EINVAL;
4078
4079         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4080                 return -EINVAL;
4081
4082         vma = i915_gem_obj_to_vma(obj, vm);
4083         if (vma) {
4084                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4085                         return -EBUSY;
4086
4087                 if (i915_vma_misplaced(vma, alignment, flags)) {
4088                         WARN(vma->pin_count,
4089                              "bo is already pinned with incorrect alignment:"
4090                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4091                              " obj->map_and_fenceable=%d\n",
4092                              i915_gem_obj_offset(obj, vm), alignment,
4093                              !!(flags & PIN_MAPPABLE),
4094                              obj->map_and_fenceable);
4095                         ret = i915_vma_unbind(vma);
4096                         if (ret)
4097                                 return ret;
4098
4099                         vma = NULL;
4100                 }
4101         }
4102
4103         bound = vma ? vma->bound : 0;
4104         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4105                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4106                 if (IS_ERR(vma))
4107                         return PTR_ERR(vma);
4108         }
4109
4110         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
4111                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4112
4113         if ((bound ^ vma->bound) & GLOBAL_BIND) {
4114                 bool mappable, fenceable;
4115                 u32 fence_size, fence_alignment;
4116
4117                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4118                                                    obj->base.size,
4119                                                    obj->tiling_mode);
4120                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4121                                                              obj->base.size,
4122                                                              obj->tiling_mode,
4123                                                              true);
4124
4125                 fenceable = (vma->node.size == fence_size &&
4126                              (vma->node.start & (fence_alignment - 1)) == 0);
4127
4128                 mappable = (vma->node.start + obj->base.size <=
4129                             dev_priv->gtt.mappable_end);
4130
4131                 obj->map_and_fenceable = mappable && fenceable;
4132         }
4133
4134         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4135
4136         vma->pin_count++;
4137         if (flags & PIN_MAPPABLE)
4138                 obj->pin_mappable |= true;
4139
4140         return 0;
4141 }
4142
4143 void
4144 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4145 {
4146         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4147
4148         BUG_ON(!vma);
4149         BUG_ON(vma->pin_count == 0);
4150         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4151
4152         if (--vma->pin_count == 0)
4153                 obj->pin_mappable = false;
4154 }
4155
4156 bool
4157 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4158 {
4159         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4160                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4161                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4162
4163                 WARN_ON(!ggtt_vma ||
4164                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4165                         ggtt_vma->pin_count);
4166                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4167                 return true;
4168         } else
4169                 return false;
4170 }
4171
4172 void
4173 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4174 {
4175         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4176                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4177                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4178                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4179         }
4180 }
4181
4182 int
4183 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4184                    struct drm_file *file)
4185 {
4186         struct drm_i915_gem_pin *args = data;
4187         struct drm_i915_gem_object *obj;
4188         int ret;
4189
4190         if (INTEL_INFO(dev)->gen >= 6)
4191                 return -ENODEV;
4192
4193         ret = i915_mutex_lock_interruptible(dev);
4194         if (ret)
4195                 return ret;
4196
4197         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4198         if (&obj->base == NULL) {
4199                 ret = -ENOENT;
4200                 goto unlock;
4201         }
4202
4203         if (obj->madv != I915_MADV_WILLNEED) {
4204                 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4205                 ret = -EFAULT;
4206                 goto out;
4207         }
4208
4209         if (obj->pin_filp != NULL && obj->pin_filp != file) {
4210                 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4211                           args->handle);
4212                 ret = -EINVAL;
4213                 goto out;
4214         }
4215
4216         if (obj->user_pin_count == ULONG_MAX) {
4217                 ret = -EBUSY;
4218                 goto out;
4219         }
4220
4221         if (obj->user_pin_count == 0) {
4222                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4223                 if (ret)
4224                         goto out;
4225         }
4226
4227         obj->user_pin_count++;
4228         obj->pin_filp = file;
4229
4230         args->offset = i915_gem_obj_ggtt_offset(obj);
4231 out:
4232         drm_gem_object_unreference(&obj->base);
4233 unlock:
4234         mutex_unlock(&dev->struct_mutex);
4235         return ret;
4236 }
4237
4238 int
4239 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4240                      struct drm_file *file)
4241 {
4242         struct drm_i915_gem_pin *args = data;
4243         struct drm_i915_gem_object *obj;
4244         int ret;
4245
4246         ret = i915_mutex_lock_interruptible(dev);
4247         if (ret)
4248                 return ret;
4249
4250         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4251         if (&obj->base == NULL) {
4252                 ret = -ENOENT;
4253                 goto unlock;
4254         }
4255
4256         if (obj->pin_filp != file) {
4257                 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4258                           args->handle);
4259                 ret = -EINVAL;
4260                 goto out;
4261         }
4262         obj->user_pin_count--;
4263         if (obj->user_pin_count == 0) {
4264                 obj->pin_filp = NULL;
4265                 i915_gem_object_ggtt_unpin(obj);
4266         }
4267
4268 out:
4269         drm_gem_object_unreference(&obj->base);
4270 unlock:
4271         mutex_unlock(&dev->struct_mutex);
4272         return ret;
4273 }
4274
4275 int
4276 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4277                     struct drm_file *file)
4278 {
4279         struct drm_i915_gem_busy *args = data;
4280         struct drm_i915_gem_object *obj;
4281         int ret;
4282
4283         ret = i915_mutex_lock_interruptible(dev);
4284         if (ret)
4285                 return ret;
4286
4287         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4288         if (&obj->base == NULL) {
4289                 ret = -ENOENT;
4290                 goto unlock;
4291         }
4292
4293         /* Count all active objects as busy, even if they are currently not used
4294          * by the gpu. Users of this interface expect objects to eventually
4295          * become non-busy without any further actions, therefore emit any
4296          * necessary flushes here.
4297          */
4298         ret = i915_gem_object_flush_active(obj);
4299
4300         args->busy = obj->active;
4301         if (obj->ring) {
4302                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4303                 args->busy |= intel_ring_flag(obj->ring) << 16;
4304         }
4305
4306         drm_gem_object_unreference(&obj->base);
4307 unlock:
4308         mutex_unlock(&dev->struct_mutex);
4309         return ret;
4310 }
4311
4312 int
4313 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4314                         struct drm_file *file_priv)
4315 {
4316         return i915_gem_ring_throttle(dev, file_priv);
4317 }
4318
4319 int
4320 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4321                        struct drm_file *file_priv)
4322 {
4323         struct drm_i915_gem_madvise *args = data;
4324         struct drm_i915_gem_object *obj;
4325         int ret;
4326
4327         switch (args->madv) {
4328         case I915_MADV_DONTNEED:
4329         case I915_MADV_WILLNEED:
4330             break;
4331         default:
4332             return -EINVAL;
4333         }
4334
4335         ret = i915_mutex_lock_interruptible(dev);
4336         if (ret)
4337                 return ret;
4338
4339         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4340         if (&obj->base == NULL) {
4341                 ret = -ENOENT;
4342                 goto unlock;
4343         }
4344
4345         if (i915_gem_obj_is_pinned(obj)) {
4346                 ret = -EINVAL;
4347                 goto out;
4348         }
4349
4350         if (obj->madv != __I915_MADV_PURGED)
4351                 obj->madv = args->madv;
4352
4353         /* if the object is no longer attached, discard its backing storage */
4354         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4355                 i915_gem_object_truncate(obj);
4356
4357         args->retained = obj->madv != __I915_MADV_PURGED;
4358
4359 out:
4360         drm_gem_object_unreference(&obj->base);
4361 unlock:
4362         mutex_unlock(&dev->struct_mutex);
4363         return ret;
4364 }
4365
4366 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4367                           const struct drm_i915_gem_object_ops *ops)
4368 {
4369         INIT_LIST_HEAD(&obj->global_list);
4370         INIT_LIST_HEAD(&obj->ring_list);
4371         INIT_LIST_HEAD(&obj->obj_exec_link);
4372         INIT_LIST_HEAD(&obj->vma_list);
4373
4374         obj->ops = ops;
4375
4376         obj->fence_reg = I915_FENCE_REG_NONE;
4377         obj->madv = I915_MADV_WILLNEED;
4378
4379         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4380 }
4381
4382 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4383         .get_pages = i915_gem_object_get_pages_gtt,
4384         .put_pages = i915_gem_object_put_pages_gtt,
4385 };
4386
4387 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4388                                                   size_t size)
4389 {
4390         struct drm_i915_gem_object *obj;
4391         struct address_space *mapping;
4392         gfp_t mask;
4393
4394         obj = i915_gem_object_alloc(dev);
4395         if (obj == NULL)
4396                 return NULL;
4397
4398         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4399                 i915_gem_object_free(obj);
4400                 return NULL;
4401         }
4402
4403         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4404         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4405                 /* 965gm cannot relocate objects above 4GiB. */
4406                 mask &= ~__GFP_HIGHMEM;
4407                 mask |= __GFP_DMA32;
4408         }
4409
4410         mapping = file_inode(obj->base.filp)->i_mapping;
4411         mapping_set_gfp_mask(mapping, mask);
4412
4413         i915_gem_object_init(obj, &i915_gem_object_ops);
4414
4415         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4416         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4417
4418         if (HAS_LLC(dev)) {
4419                 /* On some devices, we can have the GPU use the LLC (the CPU
4420                  * cache) for about a 10% performance improvement
4421                  * compared to uncached.  Graphics requests other than
4422                  * display scanout are coherent with the CPU in
4423                  * accessing this cache.  This means in this mode we
4424                  * don't need to clflush on the CPU side, and on the
4425                  * GPU side we only need to flush internal caches to
4426                  * get data visible to the CPU.
4427                  *
4428                  * However, we maintain the display planes as UC, and so
4429                  * need to rebind when first used as such.
4430                  */
4431                 obj->cache_level = I915_CACHE_LLC;
4432         } else
4433                 obj->cache_level = I915_CACHE_NONE;
4434
4435         trace_i915_gem_object_create(obj);
4436
4437         return obj;
4438 }
4439
4440 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4441 {
4442         /* If we are the last user of the backing storage (be it shmemfs
4443          * pages or stolen etc), we know that the pages are going to be
4444          * immediately released. In this case, we can then skip copying
4445          * back the contents from the GPU.
4446          */
4447
4448         if (obj->madv != I915_MADV_WILLNEED)
4449                 return false;
4450
4451         if (obj->base.filp == NULL)
4452                 return true;
4453
4454         /* At first glance, this looks racy, but then again so would be
4455          * userspace racing mmap against close. However, the first external
4456          * reference to the filp can only be obtained through the
4457          * i915_gem_mmap_ioctl() which safeguards us against the user
4458          * acquiring such a reference whilst we are in the middle of
4459          * freeing the object.
4460          */
4461         return atomic_long_read(&obj->base.filp->f_count) == 1;
4462 }
4463
4464 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4465 {
4466         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4467         struct drm_device *dev = obj->base.dev;
4468         struct drm_i915_private *dev_priv = dev->dev_private;
4469         struct i915_vma *vma, *next;
4470
4471         intel_runtime_pm_get(dev_priv);
4472
4473         trace_i915_gem_object_destroy(obj);
4474
4475         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4476                 int ret;
4477
4478                 vma->pin_count = 0;
4479                 ret = i915_vma_unbind(vma);
4480                 if (WARN_ON(ret == -ERESTARTSYS)) {
4481                         bool was_interruptible;
4482
4483                         was_interruptible = dev_priv->mm.interruptible;
4484                         dev_priv->mm.interruptible = false;
4485
4486                         WARN_ON(i915_vma_unbind(vma));
4487
4488                         dev_priv->mm.interruptible = was_interruptible;
4489                 }
4490         }
4491
4492         i915_gem_object_detach_phys(obj);
4493
4494         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4495          * before progressing. */
4496         if (obj->stolen)
4497                 i915_gem_object_unpin_pages(obj);
4498
4499         WARN_ON(obj->frontbuffer_bits);
4500
4501         if (WARN_ON(obj->pages_pin_count))
4502                 obj->pages_pin_count = 0;
4503         if (discard_backing_storage(obj))
4504                 obj->madv = I915_MADV_DONTNEED;
4505         i915_gem_object_put_pages(obj);
4506         i915_gem_object_free_mmap_offset(obj);
4507
4508         BUG_ON(obj->pages);
4509
4510         if (obj->base.import_attach)
4511                 drm_prime_gem_destroy(&obj->base, NULL);
4512
4513         if (obj->ops->release)
4514                 obj->ops->release(obj);
4515
4516         drm_gem_object_release(&obj->base);
4517         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4518
4519         kfree(obj->bit_17);
4520         i915_gem_object_free(obj);
4521
4522         intel_runtime_pm_put(dev_priv);
4523 }
4524
4525 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4526                                      struct i915_address_space *vm)
4527 {
4528         struct i915_vma *vma;
4529         list_for_each_entry(vma, &obj->vma_list, vma_link)
4530                 if (vma->vm == vm)
4531                         return vma;
4532
4533         return NULL;
4534 }
4535
4536 void i915_gem_vma_destroy(struct i915_vma *vma)
4537 {
4538         struct i915_address_space *vm = NULL;
4539         WARN_ON(vma->node.allocated);
4540
4541         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4542         if (!list_empty(&vma->exec_list))
4543                 return;
4544
4545         vm = vma->vm;
4546
4547         if (!i915_is_ggtt(vm))
4548                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4549
4550         list_del(&vma->vma_link);
4551
4552         kfree(vma);
4553 }
4554
4555 static void
4556 i915_gem_stop_ringbuffers(struct drm_device *dev)
4557 {
4558         struct drm_i915_private *dev_priv = dev->dev_private;
4559         struct intel_engine_cs *ring;
4560         int i;
4561
4562         for_each_ring(ring, dev_priv, i)
4563                 dev_priv->gt.stop_ring(ring);
4564 }
4565
4566 int
4567 i915_gem_suspend(struct drm_device *dev)
4568 {
4569         struct drm_i915_private *dev_priv = dev->dev_private;
4570         int ret = 0;
4571
4572         mutex_lock(&dev->struct_mutex);
4573         if (dev_priv->ums.mm_suspended)
4574                 goto err;
4575
4576         ret = i915_gpu_idle(dev);
4577         if (ret)
4578                 goto err;
4579
4580         i915_gem_retire_requests(dev);
4581
4582         /* Under UMS, be paranoid and evict. */
4583         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4584                 i915_gem_evict_everything(dev);
4585
4586         i915_kernel_lost_context(dev);
4587         i915_gem_stop_ringbuffers(dev);
4588
4589         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4590          * We need to replace this with a semaphore, or something.
4591          * And not confound ums.mm_suspended!
4592          */
4593         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4594                                                              DRIVER_MODESET);
4595         mutex_unlock(&dev->struct_mutex);
4596
4597         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4598         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4599         flush_delayed_work(&dev_priv->mm.idle_work);
4600
4601         return 0;
4602
4603 err:
4604         mutex_unlock(&dev->struct_mutex);
4605         return ret;
4606 }
4607
4608 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4609 {
4610         struct drm_device *dev = ring->dev;
4611         struct drm_i915_private *dev_priv = dev->dev_private;
4612         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4613         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4614         int i, ret;
4615
4616         if (!HAS_L3_DPF(dev) || !remap_info)
4617                 return 0;
4618
4619         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4620         if (ret)
4621                 return ret;
4622
4623         /*
4624          * Note: We do not worry about the concurrent register cacheline hang
4625          * here because no other code should access these registers other than
4626          * at initialization time.
4627          */
4628         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4629                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4630                 intel_ring_emit(ring, reg_base + i);
4631                 intel_ring_emit(ring, remap_info[i/4]);
4632         }
4633
4634         intel_ring_advance(ring);
4635
4636         return ret;
4637 }
4638
4639 void i915_gem_init_swizzling(struct drm_device *dev)
4640 {
4641         struct drm_i915_private *dev_priv = dev->dev_private;
4642
4643         if (INTEL_INFO(dev)->gen < 5 ||
4644             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4645                 return;
4646
4647         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4648                                  DISP_TILE_SURFACE_SWIZZLING);
4649
4650         if (IS_GEN5(dev))
4651                 return;
4652
4653         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4654         if (IS_GEN6(dev))
4655                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4656         else if (IS_GEN7(dev))
4657                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4658         else if (IS_GEN8(dev))
4659                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4660         else
4661                 BUG();
4662 }
4663
4664 static bool
4665 intel_enable_blt(struct drm_device *dev)
4666 {
4667         if (!HAS_BLT(dev))
4668                 return false;
4669
4670         /* The blitter was dysfunctional on early prototypes */
4671         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4672                 DRM_INFO("BLT not supported on this pre-production hardware;"
4673                          " graphics performance will be degraded.\n");
4674                 return false;
4675         }
4676
4677         return true;
4678 }
4679
4680 static void init_unused_ring(struct drm_device *dev, u32 base)
4681 {
4682         struct drm_i915_private *dev_priv = dev->dev_private;
4683
4684         I915_WRITE(RING_CTL(base), 0);
4685         I915_WRITE(RING_HEAD(base), 0);
4686         I915_WRITE(RING_TAIL(base), 0);
4687         I915_WRITE(RING_START(base), 0);
4688 }
4689
4690 static void init_unused_rings(struct drm_device *dev)
4691 {
4692         if (IS_I830(dev)) {
4693                 init_unused_ring(dev, PRB1_BASE);
4694                 init_unused_ring(dev, SRB0_BASE);
4695                 init_unused_ring(dev, SRB1_BASE);
4696                 init_unused_ring(dev, SRB2_BASE);
4697                 init_unused_ring(dev, SRB3_BASE);
4698         } else if (IS_GEN2(dev)) {
4699                 init_unused_ring(dev, SRB0_BASE);
4700                 init_unused_ring(dev, SRB1_BASE);
4701         } else if (IS_GEN3(dev)) {
4702                 init_unused_ring(dev, PRB1_BASE);
4703                 init_unused_ring(dev, PRB2_BASE);
4704         }
4705 }
4706
4707 int i915_gem_init_rings(struct drm_device *dev)
4708 {
4709         struct drm_i915_private *dev_priv = dev->dev_private;
4710         int ret;
4711
4712         /*
4713          * At least 830 can leave some of the unused rings
4714          * "active" (ie. head != tail) after resume which
4715          * will prevent c3 entry. Makes sure all unused rings
4716          * are totally idle.
4717          */
4718         init_unused_rings(dev);
4719
4720         ret = intel_init_render_ring_buffer(dev);
4721         if (ret)
4722                 return ret;
4723
4724         if (HAS_BSD(dev)) {
4725                 ret = intel_init_bsd_ring_buffer(dev);
4726                 if (ret)
4727                         goto cleanup_render_ring;
4728         }
4729
4730         if (intel_enable_blt(dev)) {
4731                 ret = intel_init_blt_ring_buffer(dev);
4732                 if (ret)
4733                         goto cleanup_bsd_ring;
4734         }
4735
4736         if (HAS_VEBOX(dev)) {
4737                 ret = intel_init_vebox_ring_buffer(dev);
4738                 if (ret)
4739                         goto cleanup_blt_ring;
4740         }
4741
4742         if (HAS_BSD2(dev)) {
4743                 ret = intel_init_bsd2_ring_buffer(dev);
4744                 if (ret)
4745                         goto cleanup_vebox_ring;
4746         }
4747
4748         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4749         if (ret)
4750                 goto cleanup_bsd2_ring;
4751
4752         return 0;
4753
4754 cleanup_bsd2_ring:
4755         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4756 cleanup_vebox_ring:
4757         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4758 cleanup_blt_ring:
4759         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4760 cleanup_bsd_ring:
4761         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4762 cleanup_render_ring:
4763         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4764
4765         return ret;
4766 }
4767
4768 int
4769 i915_gem_init_hw(struct drm_device *dev)
4770 {
4771         struct drm_i915_private *dev_priv = dev->dev_private;
4772         int ret, i;
4773
4774         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4775                 return -EIO;
4776
4777         if (dev_priv->ellc_size)
4778                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4779
4780         if (IS_HASWELL(dev))
4781                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4782                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4783
4784         if (HAS_PCH_NOP(dev)) {
4785                 if (IS_IVYBRIDGE(dev)) {
4786                         u32 temp = I915_READ(GEN7_MSG_CTL);
4787                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4788                         I915_WRITE(GEN7_MSG_CTL, temp);
4789                 } else if (INTEL_INFO(dev)->gen >= 7) {
4790                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4791                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4792                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4793                 }
4794         }
4795
4796         i915_gem_init_swizzling(dev);
4797
4798         ret = dev_priv->gt.init_rings(dev);
4799         if (ret)
4800                 return ret;
4801
4802         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4803                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4804
4805         /*
4806          * XXX: Contexts should only be initialized once. Doing a switch to the
4807          * default context switch however is something we'd like to do after
4808          * reset or thaw (the latter may not actually be necessary for HW, but
4809          * goes with our code better). Context switching requires rings (for
4810          * the do_switch), but before enabling PPGTT. So don't move this.
4811          */
4812         ret = i915_gem_context_enable(dev_priv);
4813         if (ret && ret != -EIO) {
4814                 DRM_ERROR("Context enable failed %d\n", ret);
4815                 i915_gem_cleanup_ringbuffer(dev);
4816
4817                 return ret;
4818         }
4819
4820         ret = i915_ppgtt_init_hw(dev);
4821         if (ret && ret != -EIO) {
4822                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4823                 i915_gem_cleanup_ringbuffer(dev);
4824         }
4825
4826         return ret;
4827 }
4828
4829 int i915_gem_init(struct drm_device *dev)
4830 {
4831         struct drm_i915_private *dev_priv = dev->dev_private;
4832         int ret;
4833
4834         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4835                         i915.enable_execlists);
4836
4837         mutex_lock(&dev->struct_mutex);
4838
4839         if (IS_VALLEYVIEW(dev)) {
4840                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4841                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4842                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4843                               VLV_GTLC_ALLOWWAKEACK), 10))
4844                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4845         }
4846
4847         if (!i915.enable_execlists) {
4848                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4849                 dev_priv->gt.init_rings = i915_gem_init_rings;
4850                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4851                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4852         } else {
4853                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4854                 dev_priv->gt.init_rings = intel_logical_rings_init;
4855                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4856                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4857         }
4858
4859         ret = i915_gem_init_userptr(dev);
4860         if (ret) {
4861                 mutex_unlock(&dev->struct_mutex);
4862                 return ret;
4863         }
4864
4865         i915_gem_init_global_gtt(dev);
4866
4867         ret = i915_gem_context_init(dev);
4868         if (ret) {
4869                 mutex_unlock(&dev->struct_mutex);
4870                 return ret;
4871         }
4872
4873         ret = i915_gem_init_hw(dev);
4874         if (ret == -EIO) {
4875                 /* Allow ring initialisation to fail by marking the GPU as
4876                  * wedged. But we only want to do this where the GPU is angry,
4877                  * for all other failure, such as an allocation failure, bail.
4878                  */
4879                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4880                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4881                 ret = 0;
4882         }
4883         mutex_unlock(&dev->struct_mutex);
4884
4885         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4886         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4887                 dev_priv->dri1.allow_batchbuffer = 1;
4888         return ret;
4889 }
4890
4891 void
4892 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4893 {
4894         struct drm_i915_private *dev_priv = dev->dev_private;
4895         struct intel_engine_cs *ring;
4896         int i;
4897
4898         for_each_ring(ring, dev_priv, i)
4899                 dev_priv->gt.cleanup_ring(ring);
4900 }
4901
4902 int
4903 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4904                        struct drm_file *file_priv)
4905 {
4906         struct drm_i915_private *dev_priv = dev->dev_private;
4907         int ret;
4908
4909         if (drm_core_check_feature(dev, DRIVER_MODESET))
4910                 return 0;
4911
4912         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4913                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4914                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4915         }
4916
4917         mutex_lock(&dev->struct_mutex);
4918         dev_priv->ums.mm_suspended = 0;
4919
4920         ret = i915_gem_init_hw(dev);
4921         if (ret != 0) {
4922                 mutex_unlock(&dev->struct_mutex);
4923                 return ret;
4924         }
4925
4926         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4927
4928         ret = drm_irq_install(dev, dev->pdev->irq);
4929         if (ret)
4930                 goto cleanup_ringbuffer;
4931         mutex_unlock(&dev->struct_mutex);
4932
4933         return 0;
4934
4935 cleanup_ringbuffer:
4936         i915_gem_cleanup_ringbuffer(dev);
4937         dev_priv->ums.mm_suspended = 1;
4938         mutex_unlock(&dev->struct_mutex);
4939
4940         return ret;
4941 }
4942
4943 int
4944 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4945                        struct drm_file *file_priv)
4946 {
4947         if (drm_core_check_feature(dev, DRIVER_MODESET))
4948                 return 0;
4949
4950         mutex_lock(&dev->struct_mutex);
4951         drm_irq_uninstall(dev);
4952         mutex_unlock(&dev->struct_mutex);
4953
4954         return i915_gem_suspend(dev);
4955 }
4956
4957 void
4958 i915_gem_lastclose(struct drm_device *dev)
4959 {
4960         int ret;
4961
4962         if (drm_core_check_feature(dev, DRIVER_MODESET))
4963                 return;
4964
4965         ret = i915_gem_suspend(dev);
4966         if (ret)
4967                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4968 }
4969
4970 static void
4971 init_ring_lists(struct intel_engine_cs *ring)
4972 {
4973         INIT_LIST_HEAD(&ring->active_list);
4974         INIT_LIST_HEAD(&ring->request_list);
4975 }
4976
4977 void i915_init_vm(struct drm_i915_private *dev_priv,
4978                   struct i915_address_space *vm)
4979 {
4980         if (!i915_is_ggtt(vm))
4981                 drm_mm_init(&vm->mm, vm->start, vm->total);
4982         vm->dev = dev_priv->dev;
4983         INIT_LIST_HEAD(&vm->active_list);
4984         INIT_LIST_HEAD(&vm->inactive_list);
4985         INIT_LIST_HEAD(&vm->global_link);
4986         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4987 }
4988
4989 void
4990 i915_gem_load(struct drm_device *dev)
4991 {
4992         struct drm_i915_private *dev_priv = dev->dev_private;
4993         int i;
4994
4995         dev_priv->slab =
4996                 kmem_cache_create("i915_gem_object",
4997                                   sizeof(struct drm_i915_gem_object), 0,
4998                                   SLAB_HWCACHE_ALIGN,
4999                                   NULL);
5000
5001         INIT_LIST_HEAD(&dev_priv->vm_list);
5002         i915_init_vm(dev_priv, &dev_priv->gtt.base);
5003
5004         INIT_LIST_HEAD(&dev_priv->context_list);
5005         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5006         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5007         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5008         for (i = 0; i < I915_NUM_RINGS; i++)
5009                 init_ring_lists(&dev_priv->ring[i]);
5010         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5011                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5012         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5013                           i915_gem_retire_work_handler);
5014         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5015                           i915_gem_idle_work_handler);
5016         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5017
5018         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5019         if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
5020                 I915_WRITE(MI_ARB_STATE,
5021                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5022         }
5023
5024         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5025
5026         /* Old X drivers will take 0-2 for front, back, depth buffers */
5027         if (!drm_core_check_feature(dev, DRIVER_MODESET))
5028                 dev_priv->fence_reg_start = 3;
5029
5030         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5031                 dev_priv->num_fence_regs = 32;
5032         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5033                 dev_priv->num_fence_regs = 16;
5034         else
5035                 dev_priv->num_fence_regs = 8;
5036
5037         /* Initialize fence registers to zero */
5038         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5039         i915_gem_restore_fences(dev);
5040
5041         i915_gem_detect_bit_6_swizzle(dev);
5042         init_waitqueue_head(&dev_priv->pending_flip_queue);
5043
5044         dev_priv->mm.interruptible = true;
5045
5046         dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
5047         dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
5048         dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
5049         register_shrinker(&dev_priv->mm.shrinker);
5050
5051         dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
5052         register_oom_notifier(&dev_priv->mm.oom_notifier);
5053
5054         mutex_init(&dev_priv->fb_tracking.lock);
5055 }
5056
5057 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5058 {
5059         struct drm_i915_file_private *file_priv = file->driver_priv;
5060
5061         cancel_delayed_work_sync(&file_priv->mm.idle_work);
5062
5063         /* Clean up our request list when the client is going away, so that
5064          * later retire_requests won't dereference our soon-to-be-gone
5065          * file_priv.
5066          */
5067         spin_lock(&file_priv->mm.lock);
5068         while (!list_empty(&file_priv->mm.request_list)) {
5069                 struct drm_i915_gem_request *request;
5070
5071                 request = list_first_entry(&file_priv->mm.request_list,
5072                                            struct drm_i915_gem_request,
5073                                            client_list);
5074                 list_del(&request->client_list);
5075                 request->file_priv = NULL;
5076         }
5077         spin_unlock(&file_priv->mm.lock);
5078 }
5079
5080 static void
5081 i915_gem_file_idle_work_handler(struct work_struct *work)
5082 {
5083         struct drm_i915_file_private *file_priv =
5084                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5085
5086         atomic_set(&file_priv->rps_wait_boost, false);
5087 }
5088
5089 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5090 {
5091         struct drm_i915_file_private *file_priv;
5092         int ret;
5093
5094         DRM_DEBUG_DRIVER("\n");
5095
5096         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5097         if (!file_priv)
5098                 return -ENOMEM;
5099
5100         file->driver_priv = file_priv;
5101         file_priv->dev_priv = dev->dev_private;
5102         file_priv->file = file;
5103
5104         spin_lock_init(&file_priv->mm.lock);
5105         INIT_LIST_HEAD(&file_priv->mm.request_list);
5106         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5107                           i915_gem_file_idle_work_handler);
5108
5109         ret = i915_gem_context_open(dev, file);
5110         if (ret)
5111                 kfree(file_priv);
5112
5113         return ret;
5114 }
5115
5116 /**
5117  * i915_gem_track_fb - update frontbuffer tracking
5118  * old: current GEM buffer for the frontbuffer slots
5119  * new: new GEM buffer for the frontbuffer slots
5120  * frontbuffer_bits: bitmask of frontbuffer slots
5121  *
5122  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5123  * from @old and setting them in @new. Both @old and @new can be NULL.
5124  */
5125 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5126                        struct drm_i915_gem_object *new,
5127                        unsigned frontbuffer_bits)
5128 {
5129         if (old) {
5130                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5131                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5132                 old->frontbuffer_bits &= ~frontbuffer_bits;
5133         }
5134
5135         if (new) {
5136                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5137                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5138                 new->frontbuffer_bits |= frontbuffer_bits;
5139         }
5140 }
5141
5142 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5143 {
5144         if (!mutex_is_locked(mutex))
5145                 return false;
5146
5147 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5148         return mutex->owner == task;
5149 #else
5150         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5151         return false;
5152 #endif
5153 }
5154
5155 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5156 {
5157         if (!mutex_trylock(&dev->struct_mutex)) {
5158                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5159                         return false;
5160
5161                 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5162                         return false;
5163
5164                 *unlock = false;
5165         } else
5166                 *unlock = true;
5167
5168         return true;
5169 }
5170
5171 static int num_vma_bound(struct drm_i915_gem_object *obj)
5172 {
5173         struct i915_vma *vma;
5174         int count = 0;
5175
5176         list_for_each_entry(vma, &obj->vma_list, vma_link)
5177                 if (drm_mm_node_allocated(&vma->node))
5178                         count++;
5179
5180         return count;
5181 }
5182
5183 static unsigned long
5184 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5185 {
5186         struct drm_i915_private *dev_priv =
5187                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5188         struct drm_device *dev = dev_priv->dev;
5189         struct drm_i915_gem_object *obj;
5190         unsigned long count;
5191         bool unlock;
5192
5193         if (!i915_gem_shrinker_lock(dev, &unlock))
5194                 return 0;
5195
5196         count = 0;
5197         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5198                 if (obj->pages_pin_count == 0)
5199                         count += obj->base.size >> PAGE_SHIFT;
5200
5201         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5202                 if (!i915_gem_obj_is_pinned(obj) &&
5203                     obj->pages_pin_count == num_vma_bound(obj))
5204                         count += obj->base.size >> PAGE_SHIFT;
5205         }
5206
5207         if (unlock)
5208                 mutex_unlock(&dev->struct_mutex);
5209
5210         return count;
5211 }
5212
5213 /* All the new VM stuff */
5214 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5215                                   struct i915_address_space *vm)
5216 {
5217         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5218         struct i915_vma *vma;
5219
5220         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5221
5222         list_for_each_entry(vma, &o->vma_list, vma_link) {
5223                 if (vma->vm == vm)
5224                         return vma->node.start;
5225
5226         }
5227         WARN(1, "%s vma for this object not found.\n",
5228              i915_is_ggtt(vm) ? "global" : "ppgtt");
5229         return -1;
5230 }
5231
5232 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5233                         struct i915_address_space *vm)
5234 {
5235         struct i915_vma *vma;
5236
5237         list_for_each_entry(vma, &o->vma_list, vma_link)
5238                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5239                         return true;
5240
5241         return false;
5242 }
5243
5244 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5245 {
5246         struct i915_vma *vma;
5247
5248         list_for_each_entry(vma, &o->vma_list, vma_link)
5249                 if (drm_mm_node_allocated(&vma->node))
5250                         return true;
5251
5252         return false;
5253 }
5254
5255 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5256                                 struct i915_address_space *vm)
5257 {
5258         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5259         struct i915_vma *vma;
5260
5261         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5262
5263         BUG_ON(list_empty(&o->vma_list));
5264
5265         list_for_each_entry(vma, &o->vma_list, vma_link)
5266                 if (vma->vm == vm)
5267                         return vma->node.size;
5268
5269         return 0;
5270 }
5271
5272 static unsigned long
5273 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5274 {
5275         struct drm_i915_private *dev_priv =
5276                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5277         struct drm_device *dev = dev_priv->dev;
5278         unsigned long freed;
5279         bool unlock;
5280
5281         if (!i915_gem_shrinker_lock(dev, &unlock))
5282                 return SHRINK_STOP;
5283
5284         freed = i915_gem_shrink(dev_priv,
5285                                 sc->nr_to_scan,
5286                                 I915_SHRINK_BOUND |
5287                                 I915_SHRINK_UNBOUND |
5288                                 I915_SHRINK_PURGEABLE);
5289         if (freed < sc->nr_to_scan)
5290                 freed += i915_gem_shrink(dev_priv,
5291                                          sc->nr_to_scan - freed,
5292                                          I915_SHRINK_BOUND |
5293                                          I915_SHRINK_UNBOUND);
5294         if (unlock)
5295                 mutex_unlock(&dev->struct_mutex);
5296
5297         return freed;
5298 }
5299
5300 static int
5301 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5302 {
5303         struct drm_i915_private *dev_priv =
5304                 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5305         struct drm_device *dev = dev_priv->dev;
5306         struct drm_i915_gem_object *obj;
5307         unsigned long timeout = msecs_to_jiffies(5000) + 1;
5308         unsigned long pinned, bound, unbound, freed_pages;
5309         bool was_interruptible;
5310         bool unlock;
5311
5312         while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5313                 schedule_timeout_killable(1);
5314                 if (fatal_signal_pending(current))
5315                         return NOTIFY_DONE;
5316         }
5317         if (timeout == 0) {
5318                 pr_err("Unable to purge GPU memory due lock contention.\n");
5319                 return NOTIFY_DONE;
5320         }
5321
5322         was_interruptible = dev_priv->mm.interruptible;
5323         dev_priv->mm.interruptible = false;
5324
5325         freed_pages = i915_gem_shrink_all(dev_priv);
5326
5327         dev_priv->mm.interruptible = was_interruptible;
5328
5329         /* Because we may be allocating inside our own driver, we cannot
5330          * assert that there are no objects with pinned pages that are not
5331          * being pointed to by hardware.
5332          */
5333         unbound = bound = pinned = 0;
5334         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5335                 if (!obj->base.filp) /* not backed by a freeable object */
5336                         continue;
5337
5338                 if (obj->pages_pin_count)
5339                         pinned += obj->base.size;
5340                 else
5341                         unbound += obj->base.size;
5342         }
5343         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5344                 if (!obj->base.filp)
5345                         continue;
5346
5347                 if (obj->pages_pin_count)
5348                         pinned += obj->base.size;
5349                 else
5350                         bound += obj->base.size;
5351         }
5352
5353         if (unlock)
5354                 mutex_unlock(&dev->struct_mutex);
5355
5356         if (freed_pages || unbound || bound)
5357                 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5358                         freed_pages << PAGE_SHIFT, pinned);
5359         if (unbound || bound)
5360                 pr_err("%lu and %lu bytes still available in the "
5361                        "bound and unbound GPU page lists.\n",
5362                        bound, unbound);
5363
5364         *(unsigned long *)ptr += freed_pages;
5365         return NOTIFY_DONE;
5366 }
5367
5368 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5369 {
5370         struct i915_vma *vma;
5371
5372         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5373         if (vma->vm != i915_obj_to_ggtt(obj))
5374                 return NULL;
5375
5376         return vma;
5377 }