drm/i915: Fallback to single page GTT mmappings for relocations
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <linux/dma_remapping.h>
30 #include <linux/reservation.h>
31 #include <linux/uaccess.h>
32
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35
36 #include "i915_drv.h"
37 #include "i915_gem_dmabuf.h"
38 #include "i915_trace.h"
39 #include "intel_drv.h"
40 #include "intel_frontbuffer.h"
41
42 #define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
43
44 #define  __EXEC_OBJECT_HAS_PIN          (1<<31)
45 #define  __EXEC_OBJECT_HAS_FENCE        (1<<30)
46 #define  __EXEC_OBJECT_NEEDS_MAP        (1<<29)
47 #define  __EXEC_OBJECT_NEEDS_BIAS       (1<<28)
48 #define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
49
50 #define BATCH_OFFSET_BIAS (256*1024)
51
52 struct i915_execbuffer_params {
53         struct drm_device               *dev;
54         struct drm_file                 *file;
55         struct i915_vma                 *batch;
56         u32                             dispatch_flags;
57         u32                             args_batch_start_offset;
58         struct intel_engine_cs          *engine;
59         struct i915_gem_context         *ctx;
60         struct drm_i915_gem_request     *request;
61 };
62
63 struct eb_vmas {
64         struct drm_i915_private *i915;
65         struct list_head vmas;
66         int and;
67         union {
68                 struct i915_vma *lut[0];
69                 struct hlist_head buckets[0];
70         };
71 };
72
73 static struct eb_vmas *
74 eb_create(struct drm_i915_private *i915,
75           struct drm_i915_gem_execbuffer2 *args)
76 {
77         struct eb_vmas *eb = NULL;
78
79         if (args->flags & I915_EXEC_HANDLE_LUT) {
80                 unsigned size = args->buffer_count;
81                 size *= sizeof(struct i915_vma *);
82                 size += sizeof(struct eb_vmas);
83                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
84         }
85
86         if (eb == NULL) {
87                 unsigned size = args->buffer_count;
88                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
89                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
90                 while (count > 2*size)
91                         count >>= 1;
92                 eb = kzalloc(count*sizeof(struct hlist_head) +
93                              sizeof(struct eb_vmas),
94                              GFP_TEMPORARY);
95                 if (eb == NULL)
96                         return eb;
97
98                 eb->and = count - 1;
99         } else
100                 eb->and = -args->buffer_count;
101
102         eb->i915 = i915;
103         INIT_LIST_HEAD(&eb->vmas);
104         return eb;
105 }
106
107 static void
108 eb_reset(struct eb_vmas *eb)
109 {
110         if (eb->and >= 0)
111                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
112 }
113
114 static struct i915_vma *
115 eb_get_batch(struct eb_vmas *eb)
116 {
117         struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
118
119         /*
120          * SNA is doing fancy tricks with compressing batch buffers, which leads
121          * to negative relocation deltas. Usually that works out ok since the
122          * relocate address is still positive, except when the batch is placed
123          * very low in the GTT. Ensure this doesn't happen.
124          *
125          * Note that actual hangs have only been observed on gen7, but for
126          * paranoia do it everywhere.
127          */
128         if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129                 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
130
131         return vma;
132 }
133
134 static int
135 eb_lookup_vmas(struct eb_vmas *eb,
136                struct drm_i915_gem_exec_object2 *exec,
137                const struct drm_i915_gem_execbuffer2 *args,
138                struct i915_address_space *vm,
139                struct drm_file *file)
140 {
141         struct drm_i915_gem_object *obj;
142         struct list_head objects;
143         int i, ret;
144
145         INIT_LIST_HEAD(&objects);
146         spin_lock(&file->table_lock);
147         /* Grab a reference to the object and release the lock so we can lookup
148          * or create the VMA without using GFP_ATOMIC */
149         for (i = 0; i < args->buffer_count; i++) {
150                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
151                 if (obj == NULL) {
152                         spin_unlock(&file->table_lock);
153                         DRM_DEBUG("Invalid object handle %d at index %d\n",
154                                    exec[i].handle, i);
155                         ret = -ENOENT;
156                         goto err;
157                 }
158
159                 if (!list_empty(&obj->obj_exec_link)) {
160                         spin_unlock(&file->table_lock);
161                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162                                    obj, exec[i].handle, i);
163                         ret = -EINVAL;
164                         goto err;
165                 }
166
167                 i915_gem_object_get(obj);
168                 list_add_tail(&obj->obj_exec_link, &objects);
169         }
170         spin_unlock(&file->table_lock);
171
172         i = 0;
173         while (!list_empty(&objects)) {
174                 struct i915_vma *vma;
175
176                 obj = list_first_entry(&objects,
177                                        struct drm_i915_gem_object,
178                                        obj_exec_link);
179
180                 /*
181                  * NOTE: We can leak any vmas created here when something fails
182                  * later on. But that's no issue since vma_unbind can deal with
183                  * vmas which are not actually bound. And since only
184                  * lookup_or_create exists as an interface to get at the vma
185                  * from the (obj, vm) we don't run the risk of creating
186                  * duplicated vmas for the same vm.
187                  */
188                 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
189                 if (unlikely(IS_ERR(vma))) {
190                         DRM_DEBUG("Failed to lookup VMA\n");
191                         ret = PTR_ERR(vma);
192                         goto err;
193                 }
194
195                 /* Transfer ownership from the objects list to the vmas list. */
196                 list_add_tail(&vma->exec_list, &eb->vmas);
197                 list_del_init(&obj->obj_exec_link);
198
199                 vma->exec_entry = &exec[i];
200                 if (eb->and < 0) {
201                         eb->lut[i] = vma;
202                 } else {
203                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
204                         vma->exec_handle = handle;
205                         hlist_add_head(&vma->exec_node,
206                                        &eb->buckets[handle & eb->and]);
207                 }
208                 ++i;
209         }
210
211         return 0;
212
213
214 err:
215         while (!list_empty(&objects)) {
216                 obj = list_first_entry(&objects,
217                                        struct drm_i915_gem_object,
218                                        obj_exec_link);
219                 list_del_init(&obj->obj_exec_link);
220                 i915_gem_object_put(obj);
221         }
222         /*
223          * Objects already transfered to the vmas list will be unreferenced by
224          * eb_destroy.
225          */
226
227         return ret;
228 }
229
230 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
231 {
232         if (eb->and < 0) {
233                 if (handle >= -eb->and)
234                         return NULL;
235                 return eb->lut[handle];
236         } else {
237                 struct hlist_head *head;
238                 struct i915_vma *vma;
239
240                 head = &eb->buckets[handle & eb->and];
241                 hlist_for_each_entry(vma, head, exec_node) {
242                         if (vma->exec_handle == handle)
243                                 return vma;
244                 }
245                 return NULL;
246         }
247 }
248
249 static void
250 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
251 {
252         struct drm_i915_gem_exec_object2 *entry;
253         struct drm_i915_gem_object *obj = vma->obj;
254
255         if (!drm_mm_node_allocated(&vma->node))
256                 return;
257
258         entry = vma->exec_entry;
259
260         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
261                 i915_gem_object_unpin_fence(obj);
262
263         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
264                 __i915_vma_unpin(vma);
265
266         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
267 }
268
269 static void eb_destroy(struct eb_vmas *eb)
270 {
271         while (!list_empty(&eb->vmas)) {
272                 struct i915_vma *vma;
273
274                 vma = list_first_entry(&eb->vmas,
275                                        struct i915_vma,
276                                        exec_list);
277                 list_del_init(&vma->exec_list);
278                 i915_gem_execbuffer_unreserve_vma(vma);
279                 i915_vma_put(vma);
280         }
281         kfree(eb);
282 }
283
284 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
285 {
286         if (DBG_USE_CPU_RELOC)
287                 return DBG_USE_CPU_RELOC > 0;
288
289         return (HAS_LLC(obj->base.dev) ||
290                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
291                 obj->cache_level != I915_CACHE_NONE);
292 }
293
294 /* Used to convert any address to canonical form.
295  * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
296  * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
297  * addresses to be in a canonical form:
298  * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
299  * canonical form [63:48] == [47]."
300  */
301 #define GEN8_HIGH_ADDRESS_BIT 47
302 static inline uint64_t gen8_canonical_addr(uint64_t address)
303 {
304         return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
305 }
306
307 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
308 {
309         return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
310 }
311
312 static inline uint64_t
313 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
314                   uint64_t target_offset)
315 {
316         return gen8_canonical_addr((int)reloc->delta + target_offset);
317 }
318
319 struct reloc_cache {
320         struct drm_i915_private *i915;
321         struct drm_mm_node node;
322         unsigned long vaddr;
323         unsigned int page;
324         bool use_64bit_reloc;
325 };
326
327 static void reloc_cache_init(struct reloc_cache *cache,
328                              struct drm_i915_private *i915)
329 {
330         cache->page = -1;
331         cache->vaddr = 0;
332         cache->i915 = i915;
333         cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
334         cache->node.allocated = false;
335 }
336
337 static inline void *unmask_page(unsigned long p)
338 {
339         return (void *)(uintptr_t)(p & PAGE_MASK);
340 }
341
342 static inline unsigned int unmask_flags(unsigned long p)
343 {
344         return p & ~PAGE_MASK;
345 }
346
347 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
348
349 static void reloc_cache_fini(struct reloc_cache *cache)
350 {
351         void *vaddr;
352
353         if (!cache->vaddr)
354                 return;
355
356         vaddr = unmask_page(cache->vaddr);
357         if (cache->vaddr & KMAP) {
358                 if (cache->vaddr & CLFLUSH_AFTER)
359                         mb();
360
361                 kunmap_atomic(vaddr);
362                 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
363         } else {
364                 wmb();
365                 io_mapping_unmap_atomic((void __iomem *)vaddr);
366                 if (cache->node.allocated) {
367                         struct i915_ggtt *ggtt = &cache->i915->ggtt;
368
369                         ggtt->base.clear_range(&ggtt->base,
370                                                cache->node.start,
371                                                cache->node.size,
372                                                true);
373                         drm_mm_remove_node(&cache->node);
374                 } else {
375                         i915_vma_unpin((struct i915_vma *)cache->node.mm);
376                 }
377         }
378 }
379
380 static void *reloc_kmap(struct drm_i915_gem_object *obj,
381                         struct reloc_cache *cache,
382                         int page)
383 {
384         void *vaddr;
385
386         if (cache->vaddr) {
387                 kunmap_atomic(unmask_page(cache->vaddr));
388         } else {
389                 unsigned int flushes;
390                 int ret;
391
392                 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
393                 if (ret)
394                         return ERR_PTR(ret);
395
396                 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
397                 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
398
399                 cache->vaddr = flushes | KMAP;
400                 cache->node.mm = (void *)obj;
401                 if (flushes)
402                         mb();
403         }
404
405         vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
406         cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
407         cache->page = page;
408
409         return vaddr;
410 }
411
412 static void *reloc_iomap(struct drm_i915_gem_object *obj,
413                          struct reloc_cache *cache,
414                          int page)
415 {
416         struct i915_ggtt *ggtt = &cache->i915->ggtt;
417         unsigned long offset;
418         void *vaddr;
419
420         if (cache->node.allocated) {
421                 wmb();
422                 ggtt->base.insert_page(&ggtt->base,
423                                        i915_gem_object_get_dma_address(obj, page),
424                                        cache->node.start, I915_CACHE_NONE, 0);
425                 cache->page = page;
426                 return unmask_page(cache->vaddr);
427         }
428
429         if (cache->vaddr) {
430                 io_mapping_unmap_atomic(unmask_page(cache->vaddr));
431         } else {
432                 struct i915_vma *vma;
433                 int ret;
434
435                 if (use_cpu_reloc(obj))
436                         return NULL;
437
438                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
439                 if (ret)
440                         return ERR_PTR(ret);
441
442                 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
443                                                PIN_MAPPABLE | PIN_NONBLOCK);
444                 if (IS_ERR(vma)) {
445                         memset(&cache->node, 0, sizeof(cache->node));
446                         ret = drm_mm_insert_node_in_range_generic
447                                 (&ggtt->base.mm, &cache->node,
448                                  4096, 0, 0,
449                                  0, ggtt->mappable_end,
450                                  DRM_MM_SEARCH_DEFAULT,
451                                  DRM_MM_CREATE_DEFAULT);
452                         if (ret)
453                                 return ERR_PTR(ret);
454                 } else {
455                         ret = i915_gem_object_put_fence(obj);
456                         if (ret) {
457                                 i915_vma_unpin(vma);
458                                 return ERR_PTR(ret);
459                         }
460
461                         cache->node.start = vma->node.start;
462                         cache->node.mm = (void *)vma;
463                 }
464         }
465
466         offset = cache->node.start;
467         if (cache->node.allocated) {
468                 ggtt->base.insert_page(&ggtt->base,
469                                        i915_gem_object_get_dma_address(obj, page),
470                                        offset, I915_CACHE_NONE, 0);
471         } else {
472                 offset += page << PAGE_SHIFT;
473         }
474
475         vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
476         cache->page = page;
477         cache->vaddr = (unsigned long)vaddr;
478
479         return vaddr;
480 }
481
482 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
483                          struct reloc_cache *cache,
484                          int page)
485 {
486         void *vaddr;
487
488         if (cache->page == page) {
489                 vaddr = unmask_page(cache->vaddr);
490         } else {
491                 vaddr = NULL;
492                 if ((cache->vaddr & KMAP) == 0)
493                         vaddr = reloc_iomap(obj, cache, page);
494                 if (!vaddr)
495                         vaddr = reloc_kmap(obj, cache, page);
496         }
497
498         return vaddr;
499 }
500
501 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
502 {
503         if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
504                 if (flushes & CLFLUSH_BEFORE) {
505                         clflushopt(addr);
506                         mb();
507                 }
508
509                 *addr = value;
510
511                 /* Writes to the same cacheline are serialised by the CPU
512                  * (including clflush). On the write path, we only require
513                  * that it hits memory in an orderly fashion and place
514                  * mb barriers at the start and end of the relocation phase
515                  * to ensure ordering of clflush wrt to the system.
516                  */
517                 if (flushes & CLFLUSH_AFTER)
518                         clflushopt(addr);
519         } else
520                 *addr = value;
521 }
522
523 static int
524 relocate_entry(struct drm_i915_gem_object *obj,
525                const struct drm_i915_gem_relocation_entry *reloc,
526                struct reloc_cache *cache,
527                u64 target_offset)
528 {
529         u64 offset = reloc->offset;
530         bool wide = cache->use_64bit_reloc;
531         void *vaddr;
532
533         target_offset = relocation_target(reloc, target_offset);
534 repeat:
535         vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
536         if (IS_ERR(vaddr))
537                 return PTR_ERR(vaddr);
538
539         clflush_write32(vaddr + offset_in_page(offset),
540                         lower_32_bits(target_offset),
541                         cache->vaddr);
542
543         if (wide) {
544                 offset += sizeof(u32);
545                 target_offset >>= 32;
546                 wide = false;
547                 goto repeat;
548         }
549
550         return 0;
551 }
552
553 static bool object_is_idle(struct drm_i915_gem_object *obj)
554 {
555         unsigned long active = i915_gem_object_get_active(obj);
556         int idx;
557
558         for_each_active(active, idx) {
559                 if (!i915_gem_active_is_idle(&obj->last_read[idx],
560                                              &obj->base.dev->struct_mutex))
561                         return false;
562         }
563
564         return true;
565 }
566
567 static int
568 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
569                                    struct eb_vmas *eb,
570                                    struct drm_i915_gem_relocation_entry *reloc,
571                                    struct reloc_cache *cache)
572 {
573         struct drm_device *dev = obj->base.dev;
574         struct drm_gem_object *target_obj;
575         struct drm_i915_gem_object *target_i915_obj;
576         struct i915_vma *target_vma;
577         uint64_t target_offset;
578         int ret;
579
580         /* we've already hold a reference to all valid objects */
581         target_vma = eb_get_vma(eb, reloc->target_handle);
582         if (unlikely(target_vma == NULL))
583                 return -ENOENT;
584         target_i915_obj = target_vma->obj;
585         target_obj = &target_vma->obj->base;
586
587         target_offset = gen8_canonical_addr(target_vma->node.start);
588
589         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
590          * pipe_control writes because the gpu doesn't properly redirect them
591          * through the ppgtt for non_secure batchbuffers. */
592         if (unlikely(IS_GEN6(dev) &&
593             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
594                 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
595                                     PIN_GLOBAL);
596                 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
597                         return ret;
598         }
599
600         /* Validate that the target is in a valid r/w GPU domain */
601         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
602                 DRM_DEBUG("reloc with multiple write domains: "
603                           "obj %p target %d offset %d "
604                           "read %08x write %08x",
605                           obj, reloc->target_handle,
606                           (int) reloc->offset,
607                           reloc->read_domains,
608                           reloc->write_domain);
609                 return -EINVAL;
610         }
611         if (unlikely((reloc->write_domain | reloc->read_domains)
612                      & ~I915_GEM_GPU_DOMAINS)) {
613                 DRM_DEBUG("reloc with read/write non-GPU domains: "
614                           "obj %p target %d offset %d "
615                           "read %08x write %08x",
616                           obj, reloc->target_handle,
617                           (int) reloc->offset,
618                           reloc->read_domains,
619                           reloc->write_domain);
620                 return -EINVAL;
621         }
622
623         target_obj->pending_read_domains |= reloc->read_domains;
624         target_obj->pending_write_domain |= reloc->write_domain;
625
626         /* If the relocation already has the right value in it, no
627          * more work needs to be done.
628          */
629         if (target_offset == reloc->presumed_offset)
630                 return 0;
631
632         /* Check that the relocation address is valid... */
633         if (unlikely(reloc->offset >
634                      obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
635                 DRM_DEBUG("Relocation beyond object bounds: "
636                           "obj %p target %d offset %d size %d.\n",
637                           obj, reloc->target_handle,
638                           (int) reloc->offset,
639                           (int) obj->base.size);
640                 return -EINVAL;
641         }
642         if (unlikely(reloc->offset & 3)) {
643                 DRM_DEBUG("Relocation not 4-byte aligned: "
644                           "obj %p target %d offset %d.\n",
645                           obj, reloc->target_handle,
646                           (int) reloc->offset);
647                 return -EINVAL;
648         }
649
650         /* We can't wait for rendering with pagefaults disabled */
651         if (pagefault_disabled() && !object_is_idle(obj))
652                 return -EFAULT;
653
654         ret = relocate_entry(obj, reloc, cache, target_offset);
655         if (ret)
656                 return ret;
657
658         /* and update the user's relocation entry */
659         reloc->presumed_offset = target_offset;
660         return 0;
661 }
662
663 static int
664 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
665                                  struct eb_vmas *eb)
666 {
667 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
668         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
669         struct drm_i915_gem_relocation_entry __user *user_relocs;
670         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
671         struct reloc_cache cache;
672         int remain, ret = 0;
673
674         user_relocs = u64_to_user_ptr(entry->relocs_ptr);
675         reloc_cache_init(&cache, eb->i915);
676
677         remain = entry->relocation_count;
678         while (remain) {
679                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
680                 int count = remain;
681                 if (count > ARRAY_SIZE(stack_reloc))
682                         count = ARRAY_SIZE(stack_reloc);
683                 remain -= count;
684
685                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
686                         ret = -EFAULT;
687                         goto out;
688                 }
689
690                 do {
691                         u64 offset = r->presumed_offset;
692
693                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
694                         if (ret)
695                                 goto out;
696
697                         if (r->presumed_offset != offset &&
698                             __put_user(r->presumed_offset,
699                                        &user_relocs->presumed_offset)) {
700                                 ret = -EFAULT;
701                                 goto out;
702                         }
703
704                         user_relocs++;
705                         r++;
706                 } while (--count);
707         }
708
709 out:
710         reloc_cache_fini(&cache);
711         return ret;
712 #undef N_RELOC
713 }
714
715 static int
716 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
717                                       struct eb_vmas *eb,
718                                       struct drm_i915_gem_relocation_entry *relocs)
719 {
720         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
721         struct reloc_cache cache;
722         int i, ret = 0;
723
724         reloc_cache_init(&cache, eb->i915);
725         for (i = 0; i < entry->relocation_count; i++) {
726                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
727                 if (ret)
728                         break;
729         }
730         reloc_cache_fini(&cache);
731
732         return ret;
733 }
734
735 static int
736 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
737 {
738         struct i915_vma *vma;
739         int ret = 0;
740
741         /* This is the fast path and we cannot handle a pagefault whilst
742          * holding the struct mutex lest the user pass in the relocations
743          * contained within a mmaped bo. For in such a case we, the page
744          * fault handler would call i915_gem_fault() and we would try to
745          * acquire the struct mutex again. Obviously this is bad and so
746          * lockdep complains vehemently.
747          */
748         pagefault_disable();
749         list_for_each_entry(vma, &eb->vmas, exec_list) {
750                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
751                 if (ret)
752                         break;
753         }
754         pagefault_enable();
755
756         return ret;
757 }
758
759 static bool only_mappable_for_reloc(unsigned int flags)
760 {
761         return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
762                 __EXEC_OBJECT_NEEDS_MAP;
763 }
764
765 static int
766 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
767                                 struct intel_engine_cs *engine,
768                                 bool *need_reloc)
769 {
770         struct drm_i915_gem_object *obj = vma->obj;
771         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
772         uint64_t flags;
773         int ret;
774
775         flags = PIN_USER;
776         if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
777                 flags |= PIN_GLOBAL;
778
779         if (!drm_mm_node_allocated(&vma->node)) {
780                 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
781                  * limit address to the first 4GBs for unflagged objects.
782                  */
783                 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
784                         flags |= PIN_ZONE_4G;
785                 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
786                         flags |= PIN_GLOBAL | PIN_MAPPABLE;
787                 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
788                         flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
789                 if (entry->flags & EXEC_OBJECT_PINNED)
790                         flags |= entry->offset | PIN_OFFSET_FIXED;
791                 if ((flags & PIN_MAPPABLE) == 0)
792                         flags |= PIN_HIGH;
793         }
794
795         ret = i915_vma_pin(vma,
796                            entry->pad_to_size,
797                            entry->alignment,
798                            flags);
799         if ((ret == -ENOSPC || ret == -E2BIG) &&
800             only_mappable_for_reloc(entry->flags))
801                 ret = i915_vma_pin(vma,
802                                    entry->pad_to_size,
803                                    entry->alignment,
804                                    flags & ~PIN_MAPPABLE);
805         if (ret)
806                 return ret;
807
808         entry->flags |= __EXEC_OBJECT_HAS_PIN;
809
810         if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
811                 ret = i915_gem_object_get_fence(obj);
812                 if (ret)
813                         return ret;
814
815                 if (i915_gem_object_pin_fence(obj))
816                         entry->flags |= __EXEC_OBJECT_HAS_FENCE;
817         }
818
819         if (entry->offset != vma->node.start) {
820                 entry->offset = vma->node.start;
821                 *need_reloc = true;
822         }
823
824         if (entry->flags & EXEC_OBJECT_WRITE) {
825                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
826                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
827         }
828
829         return 0;
830 }
831
832 static bool
833 need_reloc_mappable(struct i915_vma *vma)
834 {
835         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
836
837         if (entry->relocation_count == 0)
838                 return false;
839
840         if (!i915_vma_is_ggtt(vma))
841                 return false;
842
843         /* See also use_cpu_reloc() */
844         if (HAS_LLC(vma->obj->base.dev))
845                 return false;
846
847         if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
848                 return false;
849
850         return true;
851 }
852
853 static bool
854 eb_vma_misplaced(struct i915_vma *vma)
855 {
856         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
857         struct drm_i915_gem_object *obj = vma->obj;
858
859         WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
860                 !i915_vma_is_ggtt(vma));
861
862         if (entry->alignment &&
863             vma->node.start & (entry->alignment - 1))
864                 return true;
865
866         if (vma->node.size < entry->pad_to_size)
867                 return true;
868
869         if (entry->flags & EXEC_OBJECT_PINNED &&
870             vma->node.start != entry->offset)
871                 return true;
872
873         if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
874             vma->node.start < BATCH_OFFSET_BIAS)
875                 return true;
876
877         /* avoid costly ping-pong once a batch bo ended up non-mappable */
878         if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
879                 return !only_mappable_for_reloc(entry->flags);
880
881         if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
882             (vma->node.start + vma->node.size - 1) >> 32)
883                 return true;
884
885         return false;
886 }
887
888 static int
889 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
890                             struct list_head *vmas,
891                             struct i915_gem_context *ctx,
892                             bool *need_relocs)
893 {
894         struct drm_i915_gem_object *obj;
895         struct i915_vma *vma;
896         struct i915_address_space *vm;
897         struct list_head ordered_vmas;
898         struct list_head pinned_vmas;
899         bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
900         int retry;
901
902         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
903
904         INIT_LIST_HEAD(&ordered_vmas);
905         INIT_LIST_HEAD(&pinned_vmas);
906         while (!list_empty(vmas)) {
907                 struct drm_i915_gem_exec_object2 *entry;
908                 bool need_fence, need_mappable;
909
910                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
911                 obj = vma->obj;
912                 entry = vma->exec_entry;
913
914                 if (ctx->flags & CONTEXT_NO_ZEROMAP)
915                         entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
916
917                 if (!has_fenced_gpu_access)
918                         entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
919                 need_fence =
920                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
921                         i915_gem_object_is_tiled(obj);
922                 need_mappable = need_fence || need_reloc_mappable(vma);
923
924                 if (entry->flags & EXEC_OBJECT_PINNED)
925                         list_move_tail(&vma->exec_list, &pinned_vmas);
926                 else if (need_mappable) {
927                         entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
928                         list_move(&vma->exec_list, &ordered_vmas);
929                 } else
930                         list_move_tail(&vma->exec_list, &ordered_vmas);
931
932                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
933                 obj->base.pending_write_domain = 0;
934         }
935         list_splice(&ordered_vmas, vmas);
936         list_splice(&pinned_vmas, vmas);
937
938         /* Attempt to pin all of the buffers into the GTT.
939          * This is done in 3 phases:
940          *
941          * 1a. Unbind all objects that do not match the GTT constraints for
942          *     the execbuffer (fenceable, mappable, alignment etc).
943          * 1b. Increment pin count for already bound objects.
944          * 2.  Bind new objects.
945          * 3.  Decrement pin count.
946          *
947          * This avoid unnecessary unbinding of later objects in order to make
948          * room for the earlier objects *unless* we need to defragment.
949          */
950         retry = 0;
951         do {
952                 int ret = 0;
953
954                 /* Unbind any ill-fitting objects or pin. */
955                 list_for_each_entry(vma, vmas, exec_list) {
956                         if (!drm_mm_node_allocated(&vma->node))
957                                 continue;
958
959                         if (eb_vma_misplaced(vma))
960                                 ret = i915_vma_unbind(vma);
961                         else
962                                 ret = i915_gem_execbuffer_reserve_vma(vma,
963                                                                       engine,
964                                                                       need_relocs);
965                         if (ret)
966                                 goto err;
967                 }
968
969                 /* Bind fresh objects */
970                 list_for_each_entry(vma, vmas, exec_list) {
971                         if (drm_mm_node_allocated(&vma->node))
972                                 continue;
973
974                         ret = i915_gem_execbuffer_reserve_vma(vma, engine,
975                                                               need_relocs);
976                         if (ret)
977                                 goto err;
978                 }
979
980 err:
981                 if (ret != -ENOSPC || retry++)
982                         return ret;
983
984                 /* Decrement pin count for bound objects */
985                 list_for_each_entry(vma, vmas, exec_list)
986                         i915_gem_execbuffer_unreserve_vma(vma);
987
988                 ret = i915_gem_evict_vm(vm, true);
989                 if (ret)
990                         return ret;
991         } while (1);
992 }
993
994 static int
995 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
996                                   struct drm_i915_gem_execbuffer2 *args,
997                                   struct drm_file *file,
998                                   struct intel_engine_cs *engine,
999                                   struct eb_vmas *eb,
1000                                   struct drm_i915_gem_exec_object2 *exec,
1001                                   struct i915_gem_context *ctx)
1002 {
1003         struct drm_i915_gem_relocation_entry *reloc;
1004         struct i915_address_space *vm;
1005         struct i915_vma *vma;
1006         bool need_relocs;
1007         int *reloc_offset;
1008         int i, total, ret;
1009         unsigned count = args->buffer_count;
1010
1011         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1012
1013         /* We may process another execbuffer during the unlock... */
1014         while (!list_empty(&eb->vmas)) {
1015                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1016                 list_del_init(&vma->exec_list);
1017                 i915_gem_execbuffer_unreserve_vma(vma);
1018                 i915_vma_put(vma);
1019         }
1020
1021         mutex_unlock(&dev->struct_mutex);
1022
1023         total = 0;
1024         for (i = 0; i < count; i++)
1025                 total += exec[i].relocation_count;
1026
1027         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
1028         reloc = drm_malloc_ab(total, sizeof(*reloc));
1029         if (reloc == NULL || reloc_offset == NULL) {
1030                 drm_free_large(reloc);
1031                 drm_free_large(reloc_offset);
1032                 mutex_lock(&dev->struct_mutex);
1033                 return -ENOMEM;
1034         }
1035
1036         total = 0;
1037         for (i = 0; i < count; i++) {
1038                 struct drm_i915_gem_relocation_entry __user *user_relocs;
1039                 u64 invalid_offset = (u64)-1;
1040                 int j;
1041
1042                 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
1043
1044                 if (copy_from_user(reloc+total, user_relocs,
1045                                    exec[i].relocation_count * sizeof(*reloc))) {
1046                         ret = -EFAULT;
1047                         mutex_lock(&dev->struct_mutex);
1048                         goto err;
1049                 }
1050
1051                 /* As we do not update the known relocation offsets after
1052                  * relocating (due to the complexities in lock handling),
1053                  * we need to mark them as invalid now so that we force the
1054                  * relocation processing next time. Just in case the target
1055                  * object is evicted and then rebound into its old
1056                  * presumed_offset before the next execbuffer - if that
1057                  * happened we would make the mistake of assuming that the
1058                  * relocations were valid.
1059                  */
1060                 for (j = 0; j < exec[i].relocation_count; j++) {
1061                         if (__copy_to_user(&user_relocs[j].presumed_offset,
1062                                            &invalid_offset,
1063                                            sizeof(invalid_offset))) {
1064                                 ret = -EFAULT;
1065                                 mutex_lock(&dev->struct_mutex);
1066                                 goto err;
1067                         }
1068                 }
1069
1070                 reloc_offset[i] = total;
1071                 total += exec[i].relocation_count;
1072         }
1073
1074         ret = i915_mutex_lock_interruptible(dev);
1075         if (ret) {
1076                 mutex_lock(&dev->struct_mutex);
1077                 goto err;
1078         }
1079
1080         /* reacquire the objects */
1081         eb_reset(eb);
1082         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1083         if (ret)
1084                 goto err;
1085
1086         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1087         ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1088                                           &need_relocs);
1089         if (ret)
1090                 goto err;
1091
1092         list_for_each_entry(vma, &eb->vmas, exec_list) {
1093                 int offset = vma->exec_entry - exec;
1094                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1095                                                             reloc + reloc_offset[offset]);
1096                 if (ret)
1097                         goto err;
1098         }
1099
1100         /* Leave the user relocations as are, this is the painfully slow path,
1101          * and we want to avoid the complication of dropping the lock whilst
1102          * having buffers reserved in the aperture and so causing spurious
1103          * ENOSPC for random operations.
1104          */
1105
1106 err:
1107         drm_free_large(reloc);
1108         drm_free_large(reloc_offset);
1109         return ret;
1110 }
1111
1112 static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
1113 {
1114         unsigned int mask;
1115
1116         mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
1117         mask <<= I915_BO_ACTIVE_SHIFT;
1118
1119         return mask;
1120 }
1121
1122 static int
1123 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1124                                 struct list_head *vmas)
1125 {
1126         const unsigned int other_rings = eb_other_engines(req);
1127         struct i915_vma *vma;
1128         int ret;
1129
1130         list_for_each_entry(vma, vmas, exec_list) {
1131                 struct drm_i915_gem_object *obj = vma->obj;
1132
1133                 if (obj->flags & other_rings) {
1134                         ret = i915_gem_object_sync(obj, req);
1135                         if (ret)
1136                                 return ret;
1137                 }
1138
1139                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
1140                         i915_gem_clflush_object(obj, false);
1141         }
1142
1143         /* Unconditionally flush any chipset caches (for streaming writes). */
1144         i915_gem_chipset_flush(req->engine->i915);
1145
1146         /* Unconditionally invalidate GPU caches and TLBs. */
1147         return req->engine->emit_flush(req, EMIT_INVALIDATE);
1148 }
1149
1150 static bool
1151 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1152 {
1153         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1154                 return false;
1155
1156         /* Kernel clipping was a DRI1 misfeature */
1157         if (exec->num_cliprects || exec->cliprects_ptr)
1158                 return false;
1159
1160         if (exec->DR4 == 0xffffffff) {
1161                 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1162                 exec->DR4 = 0;
1163         }
1164         if (exec->DR1 || exec->DR4)
1165                 return false;
1166
1167         if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1168                 return false;
1169
1170         return true;
1171 }
1172
1173 static int
1174 validate_exec_list(struct drm_device *dev,
1175                    struct drm_i915_gem_exec_object2 *exec,
1176                    int count)
1177 {
1178         unsigned relocs_total = 0;
1179         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1180         unsigned invalid_flags;
1181         int i;
1182
1183         /* INTERNAL flags must not overlap with external ones */
1184         BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1185
1186         invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1187         if (USES_FULL_PPGTT(dev))
1188                 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1189
1190         for (i = 0; i < count; i++) {
1191                 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1192                 int length; /* limited by fault_in_pages_readable() */
1193
1194                 if (exec[i].flags & invalid_flags)
1195                         return -EINVAL;
1196
1197                 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1198                  * any non-page-aligned or non-canonical addresses.
1199                  */
1200                 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1201                         if (exec[i].offset !=
1202                             gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1203                                 return -EINVAL;
1204
1205                         /* From drm_mm perspective address space is continuous,
1206                          * so from this point we're always using non-canonical
1207                          * form internally.
1208                          */
1209                         exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1210                 }
1211
1212                 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1213                         return -EINVAL;
1214
1215                 /* pad_to_size was once a reserved field, so sanitize it */
1216                 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1217                         if (offset_in_page(exec[i].pad_to_size))
1218                                 return -EINVAL;
1219                 } else {
1220                         exec[i].pad_to_size = 0;
1221                 }
1222
1223                 /* First check for malicious input causing overflow in
1224                  * the worst case where we need to allocate the entire
1225                  * relocation tree as a single array.
1226                  */
1227                 if (exec[i].relocation_count > relocs_max - relocs_total)
1228                         return -EINVAL;
1229                 relocs_total += exec[i].relocation_count;
1230
1231                 length = exec[i].relocation_count *
1232                         sizeof(struct drm_i915_gem_relocation_entry);
1233                 /*
1234                  * We must check that the entire relocation array is safe
1235                  * to read, but since we may need to update the presumed
1236                  * offsets during execution, check for full write access.
1237                  */
1238                 if (!access_ok(VERIFY_WRITE, ptr, length))
1239                         return -EFAULT;
1240
1241                 if (likely(!i915.prefault_disable)) {
1242                         if (fault_in_multipages_readable(ptr, length))
1243                                 return -EFAULT;
1244                 }
1245         }
1246
1247         return 0;
1248 }
1249
1250 static struct i915_gem_context *
1251 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1252                           struct intel_engine_cs *engine, const u32 ctx_id)
1253 {
1254         struct i915_gem_context *ctx = NULL;
1255         struct i915_ctx_hang_stats *hs;
1256
1257         if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1258                 return ERR_PTR(-EINVAL);
1259
1260         ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1261         if (IS_ERR(ctx))
1262                 return ctx;
1263
1264         hs = &ctx->hang_stats;
1265         if (hs->banned) {
1266                 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1267                 return ERR_PTR(-EIO);
1268         }
1269
1270         return ctx;
1271 }
1272
1273 void i915_vma_move_to_active(struct i915_vma *vma,
1274                              struct drm_i915_gem_request *req,
1275                              unsigned int flags)
1276 {
1277         struct drm_i915_gem_object *obj = vma->obj;
1278         const unsigned int idx = req->engine->id;
1279
1280         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1281
1282         obj->dirty = 1; /* be paranoid  */
1283
1284         /* Add a reference if we're newly entering the active list.
1285          * The order in which we add operations to the retirement queue is
1286          * vital here: mark_active adds to the start of the callback list,
1287          * such that subsequent callbacks are called first. Therefore we
1288          * add the active reference first and queue for it to be dropped
1289          * *last*.
1290          */
1291         if (!i915_gem_object_is_active(obj))
1292                 i915_gem_object_get(obj);
1293         i915_gem_object_set_active(obj, idx);
1294         i915_gem_active_set(&obj->last_read[idx], req);
1295
1296         if (flags & EXEC_OBJECT_WRITE) {
1297                 i915_gem_active_set(&obj->last_write, req);
1298
1299                 intel_fb_obj_invalidate(obj, ORIGIN_CS);
1300
1301                 /* update for the implicit flush after a batch */
1302                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1303         }
1304
1305         if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1306                 i915_gem_active_set(&obj->last_fence, req);
1307                 if (flags & __EXEC_OBJECT_HAS_FENCE) {
1308                         struct drm_i915_private *dev_priv = req->i915;
1309
1310                         list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1311                                        &dev_priv->mm.fence_list);
1312                 }
1313         }
1314
1315         i915_vma_set_active(vma, idx);
1316         i915_gem_active_set(&vma->last_read[idx], req);
1317         list_move_tail(&vma->vm_link, &vma->vm->active_list);
1318 }
1319
1320 static void eb_export_fence(struct drm_i915_gem_object *obj,
1321                             struct drm_i915_gem_request *req,
1322                             unsigned int flags)
1323 {
1324         struct reservation_object *resv;
1325
1326         resv = i915_gem_object_get_dmabuf_resv(obj);
1327         if (!resv)
1328                 return;
1329
1330         /* Ignore errors from failing to allocate the new fence, we can't
1331          * handle an error right now. Worst case should be missed
1332          * synchronisation leading to rendering corruption.
1333          */
1334         ww_mutex_lock(&resv->lock, NULL);
1335         if (flags & EXEC_OBJECT_WRITE)
1336                 reservation_object_add_excl_fence(resv, &req->fence);
1337         else if (reservation_object_reserve_shared(resv) == 0)
1338                 reservation_object_add_shared_fence(resv, &req->fence);
1339         ww_mutex_unlock(&resv->lock);
1340 }
1341
1342 static void
1343 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1344                                    struct drm_i915_gem_request *req)
1345 {
1346         struct i915_vma *vma;
1347
1348         list_for_each_entry(vma, vmas, exec_list) {
1349                 struct drm_i915_gem_object *obj = vma->obj;
1350                 u32 old_read = obj->base.read_domains;
1351                 u32 old_write = obj->base.write_domain;
1352
1353                 obj->base.write_domain = obj->base.pending_write_domain;
1354                 if (obj->base.write_domain)
1355                         vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1356                 else
1357                         obj->base.pending_read_domains |= obj->base.read_domains;
1358                 obj->base.read_domains = obj->base.pending_read_domains;
1359
1360                 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1361                 eb_export_fence(obj, req, vma->exec_entry->flags);
1362                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
1363         }
1364 }
1365
1366 static int
1367 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1368 {
1369         struct intel_ring *ring = req->ring;
1370         int ret, i;
1371
1372         if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1373                 DRM_DEBUG("sol reset is gen7/rcs only\n");
1374                 return -EINVAL;
1375         }
1376
1377         ret = intel_ring_begin(req, 4 * 3);
1378         if (ret)
1379                 return ret;
1380
1381         for (i = 0; i < 4; i++) {
1382                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1383                 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1384                 intel_ring_emit(ring, 0);
1385         }
1386
1387         intel_ring_advance(ring);
1388
1389         return 0;
1390 }
1391
1392 static struct i915_vma *
1393 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1394                           struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1395                           struct drm_i915_gem_object *batch_obj,
1396                           struct eb_vmas *eb,
1397                           u32 batch_start_offset,
1398                           u32 batch_len,
1399                           bool is_master)
1400 {
1401         struct drm_i915_gem_object *shadow_batch_obj;
1402         struct i915_vma *vma;
1403         int ret;
1404
1405         shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1406                                                    PAGE_ALIGN(batch_len));
1407         if (IS_ERR(shadow_batch_obj))
1408                 return ERR_CAST(shadow_batch_obj);
1409
1410         ret = intel_engine_cmd_parser(engine,
1411                                       batch_obj,
1412                                       shadow_batch_obj,
1413                                       batch_start_offset,
1414                                       batch_len,
1415                                       is_master);
1416         if (ret) {
1417                 if (ret == -EACCES) /* unhandled chained batch */
1418                         vma = NULL;
1419                 else
1420                         vma = ERR_PTR(ret);
1421                 goto out;
1422         }
1423
1424         vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1425         if (IS_ERR(vma))
1426                 goto out;
1427
1428         memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1429
1430         vma->exec_entry = shadow_exec_entry;
1431         vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1432         i915_gem_object_get(shadow_batch_obj);
1433         list_add_tail(&vma->exec_list, &eb->vmas);
1434
1435 out:
1436         i915_gem_object_unpin_pages(shadow_batch_obj);
1437         return vma;
1438 }
1439
1440 static int
1441 execbuf_submit(struct i915_execbuffer_params *params,
1442                struct drm_i915_gem_execbuffer2 *args,
1443                struct list_head *vmas)
1444 {
1445         struct drm_i915_private *dev_priv = params->request->i915;
1446         u64 exec_start, exec_len;
1447         int instp_mode;
1448         u32 instp_mask;
1449         int ret;
1450
1451         ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1452         if (ret)
1453                 return ret;
1454
1455         ret = i915_switch_context(params->request);
1456         if (ret)
1457                 return ret;
1458
1459         instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1460         instp_mask = I915_EXEC_CONSTANTS_MASK;
1461         switch (instp_mode) {
1462         case I915_EXEC_CONSTANTS_REL_GENERAL:
1463         case I915_EXEC_CONSTANTS_ABSOLUTE:
1464         case I915_EXEC_CONSTANTS_REL_SURFACE:
1465                 if (instp_mode != 0 && params->engine->id != RCS) {
1466                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1467                         return -EINVAL;
1468                 }
1469
1470                 if (instp_mode != dev_priv->relative_constants_mode) {
1471                         if (INTEL_INFO(dev_priv)->gen < 4) {
1472                                 DRM_DEBUG("no rel constants on pre-gen4\n");
1473                                 return -EINVAL;
1474                         }
1475
1476                         if (INTEL_INFO(dev_priv)->gen > 5 &&
1477                             instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1478                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1479                                 return -EINVAL;
1480                         }
1481
1482                         /* The HW changed the meaning on this bit on gen6 */
1483                         if (INTEL_INFO(dev_priv)->gen >= 6)
1484                                 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1485                 }
1486                 break;
1487         default:
1488                 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1489                 return -EINVAL;
1490         }
1491
1492         if (params->engine->id == RCS &&
1493             instp_mode != dev_priv->relative_constants_mode) {
1494                 struct intel_ring *ring = params->request->ring;
1495
1496                 ret = intel_ring_begin(params->request, 4);
1497                 if (ret)
1498                         return ret;
1499
1500                 intel_ring_emit(ring, MI_NOOP);
1501                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1502                 intel_ring_emit_reg(ring, INSTPM);
1503                 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1504                 intel_ring_advance(ring);
1505
1506                 dev_priv->relative_constants_mode = instp_mode;
1507         }
1508
1509         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1510                 ret = i915_reset_gen7_sol_offsets(params->request);
1511                 if (ret)
1512                         return ret;
1513         }
1514
1515         exec_len   = args->batch_len;
1516         exec_start = params->batch->node.start +
1517                      params->args_batch_start_offset;
1518
1519         if (exec_len == 0)
1520                 exec_len = params->batch->size;
1521
1522         ret = params->engine->emit_bb_start(params->request,
1523                                             exec_start, exec_len,
1524                                             params->dispatch_flags);
1525         if (ret)
1526                 return ret;
1527
1528         trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1529
1530         i915_gem_execbuffer_move_to_active(vmas, params->request);
1531
1532         return 0;
1533 }
1534
1535 /**
1536  * Find one BSD ring to dispatch the corresponding BSD command.
1537  * The engine index is returned.
1538  */
1539 static unsigned int
1540 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1541                          struct drm_file *file)
1542 {
1543         struct drm_i915_file_private *file_priv = file->driver_priv;
1544
1545         /* Check whether the file_priv has already selected one ring. */
1546         if ((int)file_priv->bsd_engine < 0) {
1547                 /* If not, use the ping-pong mechanism to select one. */
1548                 mutex_lock(&dev_priv->drm.struct_mutex);
1549                 file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
1550                 dev_priv->mm.bsd_engine_dispatch_index ^= 1;
1551                 mutex_unlock(&dev_priv->drm.struct_mutex);
1552         }
1553
1554         return file_priv->bsd_engine;
1555 }
1556
1557 #define I915_USER_RINGS (4)
1558
1559 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1560         [I915_EXEC_DEFAULT]     = RCS,
1561         [I915_EXEC_RENDER]      = RCS,
1562         [I915_EXEC_BLT]         = BCS,
1563         [I915_EXEC_BSD]         = VCS,
1564         [I915_EXEC_VEBOX]       = VECS
1565 };
1566
1567 static struct intel_engine_cs *
1568 eb_select_engine(struct drm_i915_private *dev_priv,
1569                  struct drm_file *file,
1570                  struct drm_i915_gem_execbuffer2 *args)
1571 {
1572         unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1573         struct intel_engine_cs *engine;
1574
1575         if (user_ring_id > I915_USER_RINGS) {
1576                 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1577                 return NULL;
1578         }
1579
1580         if ((user_ring_id != I915_EXEC_BSD) &&
1581             ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1582                 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1583                           "bsd dispatch flags: %d\n", (int)(args->flags));
1584                 return NULL;
1585         }
1586
1587         if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1588                 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1589
1590                 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1591                         bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1592                 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1593                            bsd_idx <= I915_EXEC_BSD_RING2) {
1594                         bsd_idx >>= I915_EXEC_BSD_SHIFT;
1595                         bsd_idx--;
1596                 } else {
1597                         DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1598                                   bsd_idx);
1599                         return NULL;
1600                 }
1601
1602                 engine = &dev_priv->engine[_VCS(bsd_idx)];
1603         } else {
1604                 engine = &dev_priv->engine[user_ring_map[user_ring_id]];
1605         }
1606
1607         if (!intel_engine_initialized(engine)) {
1608                 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1609                 return NULL;
1610         }
1611
1612         return engine;
1613 }
1614
1615 static int
1616 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1617                        struct drm_file *file,
1618                        struct drm_i915_gem_execbuffer2 *args,
1619                        struct drm_i915_gem_exec_object2 *exec)
1620 {
1621         struct drm_i915_private *dev_priv = to_i915(dev);
1622         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1623         struct eb_vmas *eb;
1624         struct drm_i915_gem_exec_object2 shadow_exec_entry;
1625         struct intel_engine_cs *engine;
1626         struct i915_gem_context *ctx;
1627         struct i915_address_space *vm;
1628         struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1629         struct i915_execbuffer_params *params = &params_master;
1630         const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1631         u32 dispatch_flags;
1632         int ret;
1633         bool need_relocs;
1634
1635         if (!i915_gem_check_execbuffer(args))
1636                 return -EINVAL;
1637
1638         ret = validate_exec_list(dev, exec, args->buffer_count);
1639         if (ret)
1640                 return ret;
1641
1642         dispatch_flags = 0;
1643         if (args->flags & I915_EXEC_SECURE) {
1644                 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1645                     return -EPERM;
1646
1647                 dispatch_flags |= I915_DISPATCH_SECURE;
1648         }
1649         if (args->flags & I915_EXEC_IS_PINNED)
1650                 dispatch_flags |= I915_DISPATCH_PINNED;
1651
1652         engine = eb_select_engine(dev_priv, file, args);
1653         if (!engine)
1654                 return -EINVAL;
1655
1656         if (args->buffer_count < 1) {
1657                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1658                 return -EINVAL;
1659         }
1660
1661         if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1662                 if (!HAS_RESOURCE_STREAMER(dev)) {
1663                         DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1664                         return -EINVAL;
1665                 }
1666                 if (engine->id != RCS) {
1667                         DRM_DEBUG("RS is not available on %s\n",
1668                                  engine->name);
1669                         return -EINVAL;
1670                 }
1671
1672                 dispatch_flags |= I915_DISPATCH_RS;
1673         }
1674
1675         /* Take a local wakeref for preparing to dispatch the execbuf as
1676          * we expect to access the hardware fairly frequently in the
1677          * process. Upon first dispatch, we acquire another prolonged
1678          * wakeref that we hold until the GPU has been idle for at least
1679          * 100ms.
1680          */
1681         intel_runtime_pm_get(dev_priv);
1682
1683         ret = i915_mutex_lock_interruptible(dev);
1684         if (ret)
1685                 goto pre_mutex_err;
1686
1687         ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1688         if (IS_ERR(ctx)) {
1689                 mutex_unlock(&dev->struct_mutex);
1690                 ret = PTR_ERR(ctx);
1691                 goto pre_mutex_err;
1692         }
1693
1694         i915_gem_context_get(ctx);
1695
1696         if (ctx->ppgtt)
1697                 vm = &ctx->ppgtt->base;
1698         else
1699                 vm = &ggtt->base;
1700
1701         memset(&params_master, 0x00, sizeof(params_master));
1702
1703         eb = eb_create(dev_priv, args);
1704         if (eb == NULL) {
1705                 i915_gem_context_put(ctx);
1706                 mutex_unlock(&dev->struct_mutex);
1707                 ret = -ENOMEM;
1708                 goto pre_mutex_err;
1709         }
1710
1711         /* Look up object handles */
1712         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1713         if (ret)
1714                 goto err;
1715
1716         /* take note of the batch buffer before we might reorder the lists */
1717         params->batch = eb_get_batch(eb);
1718
1719         /* Move the objects en-masse into the GTT, evicting if necessary. */
1720         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1721         ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1722                                           &need_relocs);
1723         if (ret)
1724                 goto err;
1725
1726         /* The objects are in their final locations, apply the relocations. */
1727         if (need_relocs)
1728                 ret = i915_gem_execbuffer_relocate(eb);
1729         if (ret) {
1730                 if (ret == -EFAULT) {
1731                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1732                                                                 engine,
1733                                                                 eb, exec, ctx);
1734                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1735                 }
1736                 if (ret)
1737                         goto err;
1738         }
1739
1740         /* Set the pending read domains for the batch buffer to COMMAND */
1741         if (params->batch->obj->base.pending_write_domain) {
1742                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1743                 ret = -EINVAL;
1744                 goto err;
1745         }
1746
1747         params->args_batch_start_offset = args->batch_start_offset;
1748         if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
1749                 struct i915_vma *vma;
1750
1751                 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1752                                                 params->batch->obj,
1753                                                 eb,
1754                                                 args->batch_start_offset,
1755                                                 args->batch_len,
1756                                                 drm_is_current_master(file));
1757                 if (IS_ERR(vma)) {
1758                         ret = PTR_ERR(vma);
1759                         goto err;
1760                 }
1761
1762                 if (vma) {
1763                         /*
1764                          * Batch parsed and accepted:
1765                          *
1766                          * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1767                          * bit from MI_BATCH_BUFFER_START commands issued in
1768                          * the dispatch_execbuffer implementations. We
1769                          * specifically don't want that set on batches the
1770                          * command parser has accepted.
1771                          */
1772                         dispatch_flags |= I915_DISPATCH_SECURE;
1773                         params->args_batch_start_offset = 0;
1774                         params->batch = vma;
1775                 }
1776         }
1777
1778         params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1779
1780         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1781          * batch" bit. Hence we need to pin secure batches into the global gtt.
1782          * hsw should have this fixed, but bdw mucks it up again. */
1783         if (dispatch_flags & I915_DISPATCH_SECURE) {
1784                 struct drm_i915_gem_object *obj = params->batch->obj;
1785                 struct i915_vma *vma;
1786
1787                 /*
1788                  * So on first glance it looks freaky that we pin the batch here
1789                  * outside of the reservation loop. But:
1790                  * - The batch is already pinned into the relevant ppgtt, so we
1791                  *   already have the backing storage fully allocated.
1792                  * - No other BO uses the global gtt (well contexts, but meh),
1793                  *   so we don't really have issues with multiple objects not
1794                  *   fitting due to fragmentation.
1795                  * So this is actually safe.
1796                  */
1797                 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1798                 if (IS_ERR(vma)) {
1799                         ret = PTR_ERR(vma);
1800                         goto err;
1801                 }
1802
1803                 params->batch = vma;
1804         }
1805
1806         /* Allocate a request for this batch buffer nice and early. */
1807         params->request = i915_gem_request_alloc(engine, ctx);
1808         if (IS_ERR(params->request)) {
1809                 ret = PTR_ERR(params->request);
1810                 goto err_batch_unpin;
1811         }
1812
1813         /* Whilst this request exists, batch_obj will be on the
1814          * active_list, and so will hold the active reference. Only when this
1815          * request is retired will the the batch_obj be moved onto the
1816          * inactive_list and lose its active reference. Hence we do not need
1817          * to explicitly hold another reference here.
1818          */
1819         params->request->batch = params->batch;
1820
1821         ret = i915_gem_request_add_to_client(params->request, file);
1822         if (ret)
1823                 goto err_request;
1824
1825         /*
1826          * Save assorted stuff away to pass through to *_submission().
1827          * NB: This data should be 'persistent' and not local as it will
1828          * kept around beyond the duration of the IOCTL once the GPU
1829          * scheduler arrives.
1830          */
1831         params->dev                     = dev;
1832         params->file                    = file;
1833         params->engine                    = engine;
1834         params->dispatch_flags          = dispatch_flags;
1835         params->ctx                     = ctx;
1836
1837         ret = execbuf_submit(params, args, &eb->vmas);
1838 err_request:
1839         __i915_add_request(params->request, ret == 0);
1840
1841 err_batch_unpin:
1842         /*
1843          * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1844          * batch vma for correctness. For less ugly and less fragility this
1845          * needs to be adjusted to also track the ggtt batch vma properly as
1846          * active.
1847          */
1848         if (dispatch_flags & I915_DISPATCH_SECURE)
1849                 i915_vma_unpin(params->batch);
1850 err:
1851         /* the request owns the ref now */
1852         i915_gem_context_put(ctx);
1853         eb_destroy(eb);
1854
1855         mutex_unlock(&dev->struct_mutex);
1856
1857 pre_mutex_err:
1858         /* intel_gpu_busy should also get a ref, so it will free when the device
1859          * is really idle. */
1860         intel_runtime_pm_put(dev_priv);
1861         return ret;
1862 }
1863
1864 /*
1865  * Legacy execbuffer just creates an exec2 list from the original exec object
1866  * list array and passes it to the real function.
1867  */
1868 int
1869 i915_gem_execbuffer(struct drm_device *dev, void *data,
1870                     struct drm_file *file)
1871 {
1872         struct drm_i915_gem_execbuffer *args = data;
1873         struct drm_i915_gem_execbuffer2 exec2;
1874         struct drm_i915_gem_exec_object *exec_list = NULL;
1875         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1876         int ret, i;
1877
1878         if (args->buffer_count < 1) {
1879                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1880                 return -EINVAL;
1881         }
1882
1883         /* Copy in the exec list from userland */
1884         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1885         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1886         if (exec_list == NULL || exec2_list == NULL) {
1887                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1888                           args->buffer_count);
1889                 drm_free_large(exec_list);
1890                 drm_free_large(exec2_list);
1891                 return -ENOMEM;
1892         }
1893         ret = copy_from_user(exec_list,
1894                              u64_to_user_ptr(args->buffers_ptr),
1895                              sizeof(*exec_list) * args->buffer_count);
1896         if (ret != 0) {
1897                 DRM_DEBUG("copy %d exec entries failed %d\n",
1898                           args->buffer_count, ret);
1899                 drm_free_large(exec_list);
1900                 drm_free_large(exec2_list);
1901                 return -EFAULT;
1902         }
1903
1904         for (i = 0; i < args->buffer_count; i++) {
1905                 exec2_list[i].handle = exec_list[i].handle;
1906                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1907                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1908                 exec2_list[i].alignment = exec_list[i].alignment;
1909                 exec2_list[i].offset = exec_list[i].offset;
1910                 if (INTEL_INFO(dev)->gen < 4)
1911                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1912                 else
1913                         exec2_list[i].flags = 0;
1914         }
1915
1916         exec2.buffers_ptr = args->buffers_ptr;
1917         exec2.buffer_count = args->buffer_count;
1918         exec2.batch_start_offset = args->batch_start_offset;
1919         exec2.batch_len = args->batch_len;
1920         exec2.DR1 = args->DR1;
1921         exec2.DR4 = args->DR4;
1922         exec2.num_cliprects = args->num_cliprects;
1923         exec2.cliprects_ptr = args->cliprects_ptr;
1924         exec2.flags = I915_EXEC_RENDER;
1925         i915_execbuffer2_set_context_id(exec2, 0);
1926
1927         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1928         if (!ret) {
1929                 struct drm_i915_gem_exec_object __user *user_exec_list =
1930                         u64_to_user_ptr(args->buffers_ptr);
1931
1932                 /* Copy the new buffer offsets back to the user's exec list. */
1933                 for (i = 0; i < args->buffer_count; i++) {
1934                         exec2_list[i].offset =
1935                                 gen8_canonical_addr(exec2_list[i].offset);
1936                         ret = __copy_to_user(&user_exec_list[i].offset,
1937                                              &exec2_list[i].offset,
1938                                              sizeof(user_exec_list[i].offset));
1939                         if (ret) {
1940                                 ret = -EFAULT;
1941                                 DRM_DEBUG("failed to copy %d exec entries "
1942                                           "back to user (%d)\n",
1943                                           args->buffer_count, ret);
1944                                 break;
1945                         }
1946                 }
1947         }
1948
1949         drm_free_large(exec_list);
1950         drm_free_large(exec2_list);
1951         return ret;
1952 }
1953
1954 int
1955 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1956                      struct drm_file *file)
1957 {
1958         struct drm_i915_gem_execbuffer2 *args = data;
1959         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1960         int ret;
1961
1962         if (args->buffer_count < 1 ||
1963             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1964                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1965                 return -EINVAL;
1966         }
1967
1968         if (args->rsvd2 != 0) {
1969                 DRM_DEBUG("dirty rvsd2 field\n");
1970                 return -EINVAL;
1971         }
1972
1973         exec2_list = drm_malloc_gfp(args->buffer_count,
1974                                     sizeof(*exec2_list),
1975                                     GFP_TEMPORARY);
1976         if (exec2_list == NULL) {
1977                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1978                           args->buffer_count);
1979                 return -ENOMEM;
1980         }
1981         ret = copy_from_user(exec2_list,
1982                              u64_to_user_ptr(args->buffers_ptr),
1983                              sizeof(*exec2_list) * args->buffer_count);
1984         if (ret != 0) {
1985                 DRM_DEBUG("copy %d exec entries failed %d\n",
1986                           args->buffer_count, ret);
1987                 drm_free_large(exec2_list);
1988                 return -EFAULT;
1989         }
1990
1991         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1992         if (!ret) {
1993                 /* Copy the new buffer offsets back to the user's exec list. */
1994                 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1995                                    u64_to_user_ptr(args->buffers_ptr);
1996                 int i;
1997
1998                 for (i = 0; i < args->buffer_count; i++) {
1999                         exec2_list[i].offset =
2000                                 gen8_canonical_addr(exec2_list[i].offset);
2001                         ret = __copy_to_user(&user_exec_list[i].offset,
2002                                              &exec2_list[i].offset,
2003                                              sizeof(user_exec_list[i].offset));
2004                         if (ret) {
2005                                 ret = -EFAULT;
2006                                 DRM_DEBUG("failed to copy %d exec entries "
2007                                           "back to user\n",
2008                                           args->buffer_count);
2009                                 break;
2010                         }
2011                 }
2012         }
2013
2014         drm_free_large(exec2_list);
2015         return ret;
2016 }