drm/amdgpu: move fence structure into amdgpu_fence.c
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.
48  */
49
50 struct amdgpu_fence {
51         struct fence base;
52
53         /* RB, DMA, etc. */
54         struct amdgpu_ring              *ring;
55         uint64_t                        seq;
56
57         wait_queue_t                    fence_wake;
58 };
59
60 static struct kmem_cache *amdgpu_fence_slab;
61 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
62
63 /*
64  * Cast helper
65  */
66 static const struct fence_ops amdgpu_fence_ops;
67 static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
68 {
69         struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
70
71         if (__f->base.ops == &amdgpu_fence_ops)
72                 return __f;
73
74         return NULL;
75 }
76
77 /**
78  * amdgpu_fence_write - write a fence value
79  *
80  * @ring: ring the fence is associated with
81  * @seq: sequence number to write
82  *
83  * Writes a fence value to memory (all asics).
84  */
85 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
86 {
87         struct amdgpu_fence_driver *drv = &ring->fence_drv;
88
89         if (drv->cpu_addr)
90                 *drv->cpu_addr = cpu_to_le32(seq);
91 }
92
93 /**
94  * amdgpu_fence_read - read a fence value
95  *
96  * @ring: ring the fence is associated with
97  *
98  * Reads a fence value from memory (all asics).
99  * Returns the value of the fence read from memory.
100  */
101 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
102 {
103         struct amdgpu_fence_driver *drv = &ring->fence_drv;
104         u32 seq = 0;
105
106         if (drv->cpu_addr)
107                 seq = le32_to_cpu(*drv->cpu_addr);
108         else
109                 seq = lower_32_bits(atomic64_read(&drv->last_seq));
110
111         return seq;
112 }
113
114 /**
115  * amdgpu_fence_emit - emit a fence on the requested ring
116  *
117  * @ring: ring the fence is associated with
118  * @f: resulting fence object
119  *
120  * Emits a fence command on the requested ring (all asics).
121  * Returns 0 on success, -ENOMEM on failure.
122  */
123 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
124 {
125         struct amdgpu_device *adev = ring->adev;
126         struct amdgpu_fence *fence;
127
128         fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
129         if (fence == NULL)
130                 return -ENOMEM;
131
132         fence->seq = ++ring->fence_drv.sync_seq;
133         fence->ring = ring;
134         fence_init(&fence->base, &amdgpu_fence_ops,
135                    &ring->fence_drv.fence_queue.lock,
136                    adev->fence_context + ring->idx,
137                    fence->seq);
138         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
139                                fence->seq, AMDGPU_FENCE_FLAG_INT);
140         *f = &fence->base;
141         return 0;
142 }
143
144 /**
145  * amdgpu_fence_schedule_fallback - schedule fallback check
146  *
147  * @ring: pointer to struct amdgpu_ring
148  *
149  * Start a timer as fallback to our interrupts.
150  */
151 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
152 {
153         mod_timer(&ring->fence_drv.fallback_timer,
154                   jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
155 }
156
157 /**
158  * amdgpu_fence_activity - check for fence activity
159  *
160  * @ring: pointer to struct amdgpu_ring
161  *
162  * Checks the current fence value and calculates the last
163  * signalled fence value. Returns true if activity occured
164  * on the ring, and the fence_queue should be waken up.
165  */
166 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
167 {
168         uint64_t seq, last_seq, last_emitted;
169         unsigned count_loop = 0;
170         bool wake = false;
171
172         /* Note there is a scenario here for an infinite loop but it's
173          * very unlikely to happen. For it to happen, the current polling
174          * process need to be interrupted by another process and another
175          * process needs to update the last_seq btw the atomic read and
176          * xchg of the current process.
177          *
178          * More over for this to go in infinite loop there need to be
179          * continuously new fence signaled ie amdgpu_fence_read needs
180          * to return a different value each time for both the currently
181          * polling process and the other process that xchg the last_seq
182          * btw atomic read and xchg of the current process. And the
183          * value the other process set as last seq must be higher than
184          * the seq value we just read. Which means that current process
185          * need to be interrupted after amdgpu_fence_read and before
186          * atomic xchg.
187          *
188          * To be even more safe we count the number of time we loop and
189          * we bail after 10 loop just accepting the fact that we might
190          * have temporarly set the last_seq not to the true real last
191          * seq but to an older one.
192          */
193         last_seq = atomic64_read(&ring->fence_drv.last_seq);
194         do {
195                 last_emitted = ring->fence_drv.sync_seq;
196                 seq = amdgpu_fence_read(ring);
197                 seq |= last_seq & 0xffffffff00000000LL;
198                 if (seq < last_seq) {
199                         seq &= 0xffffffff;
200                         seq |= last_emitted & 0xffffffff00000000LL;
201                 }
202
203                 if (seq <= last_seq || seq > last_emitted) {
204                         break;
205                 }
206                 /* If we loop over we don't want to return without
207                  * checking if a fence is signaled as it means that the
208                  * seq we just read is different from the previous on.
209                  */
210                 wake = true;
211                 last_seq = seq;
212                 if ((count_loop++) > 10) {
213                         /* We looped over too many time leave with the
214                          * fact that we might have set an older fence
215                          * seq then the current real last seq as signaled
216                          * by the hw.
217                          */
218                         break;
219                 }
220         } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
221
222         if (seq < last_emitted)
223                 amdgpu_fence_schedule_fallback(ring);
224
225         return wake;
226 }
227
228 /**
229  * amdgpu_fence_process - process a fence
230  *
231  * @adev: amdgpu_device pointer
232  * @ring: ring index the fence is associated with
233  *
234  * Checks the current fence value and wakes the fence queue
235  * if the sequence number has increased (all asics).
236  */
237 void amdgpu_fence_process(struct amdgpu_ring *ring)
238 {
239         if (amdgpu_fence_activity(ring))
240                 wake_up_all(&ring->fence_drv.fence_queue);
241 }
242
243 /**
244  * amdgpu_fence_fallback - fallback for hardware interrupts
245  *
246  * @work: delayed work item
247  *
248  * Checks for fence activity.
249  */
250 static void amdgpu_fence_fallback(unsigned long arg)
251 {
252         struct amdgpu_ring *ring = (void *)arg;
253
254         amdgpu_fence_process(ring);
255 }
256
257 /**
258  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
259  *
260  * @ring: ring the fence is associated with
261  * @seq: sequence number
262  *
263  * Check if the last signaled fence sequnce number is >= the requested
264  * sequence number (all asics).
265  * Returns true if the fence has signaled (current fence value
266  * is >= requested value) or false if it has not (current fence
267  * value is < the requested value.  Helper function for
268  * amdgpu_fence_signaled().
269  */
270 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
271 {
272         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
273                 return true;
274
275         /* poll new last sequence at least once */
276         amdgpu_fence_process(ring);
277         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
278                 return true;
279
280         return false;
281 }
282
283 /*
284  * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
285  * @ring: ring to wait on for the seq number
286  * @seq: seq number wait for
287  *
288  * return value:
289  * 0: seq signaled, and gpu not hang
290  * -EINVAL: some paramter is not valid
291  */
292 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
293 {
294         BUG_ON(!ring);
295         if (seq > ring->fence_drv.sync_seq)
296                 return -EINVAL;
297
298         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
299                 return 0;
300
301         amdgpu_fence_schedule_fallback(ring);
302         wait_event(ring->fence_drv.fence_queue,
303                    amdgpu_fence_seq_signaled(ring, seq));
304
305         return 0;
306 }
307
308 /**
309  * amdgpu_fence_wait_empty - wait for all fences to signal
310  *
311  * @adev: amdgpu device pointer
312  * @ring: ring index the fence is associated with
313  *
314  * Wait for all fences on the requested ring to signal (all asics).
315  * Returns 0 if the fences have passed, error for all other cases.
316  * Caller must hold ring lock.
317  */
318 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
319 {
320         uint64_t seq = ring->fence_drv.sync_seq;
321
322         if (!seq)
323                 return 0;
324
325         return amdgpu_fence_ring_wait_seq(ring, seq);
326 }
327
328 /**
329  * amdgpu_fence_count_emitted - get the count of emitted fences
330  *
331  * @ring: ring the fence is associated with
332  *
333  * Get the number of fences emitted on the requested ring (all asics).
334  * Returns the number of emitted fences on the ring.  Used by the
335  * dynpm code to ring track activity.
336  */
337 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
338 {
339         uint64_t emitted;
340
341         /* We are not protected by ring lock when reading the last sequence
342          * but it's ok to report slightly wrong fence count here.
343          */
344         amdgpu_fence_process(ring);
345         emitted = ring->fence_drv.sync_seq
346                 - atomic64_read(&ring->fence_drv.last_seq);
347         /* to avoid 32bits warp around */
348         if (emitted > 0x10000000)
349                 emitted = 0x10000000;
350
351         return (unsigned)emitted;
352 }
353
354 /**
355  * amdgpu_fence_driver_start_ring - make the fence driver
356  * ready for use on the requested ring.
357  *
358  * @ring: ring to start the fence driver on
359  * @irq_src: interrupt source to use for this ring
360  * @irq_type: interrupt type to use for this ring
361  *
362  * Make the fence driver ready for processing (all asics).
363  * Not all asics have all rings, so each asic will only
364  * start the fence driver on the rings it has.
365  * Returns 0 for success, errors for failure.
366  */
367 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
368                                    struct amdgpu_irq_src *irq_src,
369                                    unsigned irq_type)
370 {
371         struct amdgpu_device *adev = ring->adev;
372         uint64_t index;
373
374         if (ring != &adev->uvd.ring) {
375                 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
376                 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
377         } else {
378                 /* put fence directly behind firmware */
379                 index = ALIGN(adev->uvd.fw->size, 8);
380                 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
381                 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
382         }
383         amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
384         amdgpu_irq_get(adev, irq_src, irq_type);
385
386         ring->fence_drv.irq_src = irq_src;
387         ring->fence_drv.irq_type = irq_type;
388         ring->fence_drv.initialized = true;
389
390         dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
391                  "cpu addr 0x%p\n", ring->idx,
392                  ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
393         return 0;
394 }
395
396 /**
397  * amdgpu_fence_driver_init_ring - init the fence driver
398  * for the requested ring.
399  *
400  * @ring: ring to init the fence driver on
401  *
402  * Init the fence driver for the requested ring (all asics).
403  * Helper function for amdgpu_fence_driver_init().
404  */
405 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
406 {
407         long timeout;
408         int r;
409
410         ring->fence_drv.cpu_addr = NULL;
411         ring->fence_drv.gpu_addr = 0;
412         ring->fence_drv.sync_seq = 0;
413         atomic64_set(&ring->fence_drv.last_seq, 0);
414         ring->fence_drv.initialized = false;
415
416         setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
417                     (unsigned long)ring);
418
419         init_waitqueue_head(&ring->fence_drv.fence_queue);
420
421         timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
422         if (timeout == 0) {
423                 /*
424                  * FIXME:
425                  * Delayed workqueue cannot use it directly,
426                  * so the scheduler will not use delayed workqueue if
427                  * MAX_SCHEDULE_TIMEOUT is set.
428                  * Currently keep it simple and silly.
429                  */
430                 timeout = MAX_SCHEDULE_TIMEOUT;
431         }
432         r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
433                            amdgpu_sched_hw_submission,
434                            timeout, ring->name);
435         if (r) {
436                 DRM_ERROR("Failed to create scheduler on ring %s.\n",
437                           ring->name);
438                 return r;
439         }
440
441         return 0;
442 }
443
444 /**
445  * amdgpu_fence_driver_init - init the fence driver
446  * for all possible rings.
447  *
448  * @adev: amdgpu device pointer
449  *
450  * Init the fence driver for all possible rings (all asics).
451  * Not all asics have all rings, so each asic will only
452  * start the fence driver on the rings it has using
453  * amdgpu_fence_driver_start_ring().
454  * Returns 0 for success.
455  */
456 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
457 {
458         if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
459                 amdgpu_fence_slab = kmem_cache_create(
460                         "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
461                         SLAB_HWCACHE_ALIGN, NULL);
462                 if (!amdgpu_fence_slab)
463                         return -ENOMEM;
464         }
465         if (amdgpu_debugfs_fence_init(adev))
466                 dev_err(adev->dev, "fence debugfs file creation failed\n");
467
468         return 0;
469 }
470
471 /**
472  * amdgpu_fence_driver_fini - tear down the fence driver
473  * for all possible rings.
474  *
475  * @adev: amdgpu device pointer
476  *
477  * Tear down the fence driver for all possible rings (all asics).
478  */
479 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
480 {
481         int i, r;
482
483         if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
484                 kmem_cache_destroy(amdgpu_fence_slab);
485         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
486                 struct amdgpu_ring *ring = adev->rings[i];
487
488                 if (!ring || !ring->fence_drv.initialized)
489                         continue;
490                 r = amdgpu_fence_wait_empty(ring);
491                 if (r) {
492                         /* no need to trigger GPU reset as we are unloading */
493                         amdgpu_fence_driver_force_completion(adev);
494                 }
495                 wake_up_all(&ring->fence_drv.fence_queue);
496                 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
497                                ring->fence_drv.irq_type);
498                 amd_sched_fini(&ring->sched);
499                 del_timer_sync(&ring->fence_drv.fallback_timer);
500                 ring->fence_drv.initialized = false;
501         }
502 }
503
504 /**
505  * amdgpu_fence_driver_suspend - suspend the fence driver
506  * for all possible rings.
507  *
508  * @adev: amdgpu device pointer
509  *
510  * Suspend the fence driver for all possible rings (all asics).
511  */
512 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
513 {
514         int i, r;
515
516         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
517                 struct amdgpu_ring *ring = adev->rings[i];
518                 if (!ring || !ring->fence_drv.initialized)
519                         continue;
520
521                 /* wait for gpu to finish processing current batch */
522                 r = amdgpu_fence_wait_empty(ring);
523                 if (r) {
524                         /* delay GPU reset to resume */
525                         amdgpu_fence_driver_force_completion(adev);
526                 }
527
528                 /* disable the interrupt */
529                 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
530                                ring->fence_drv.irq_type);
531         }
532 }
533
534 /**
535  * amdgpu_fence_driver_resume - resume the fence driver
536  * for all possible rings.
537  *
538  * @adev: amdgpu device pointer
539  *
540  * Resume the fence driver for all possible rings (all asics).
541  * Not all asics have all rings, so each asic will only
542  * start the fence driver on the rings it has using
543  * amdgpu_fence_driver_start_ring().
544  * Returns 0 for success.
545  */
546 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
547 {
548         int i;
549
550         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
551                 struct amdgpu_ring *ring = adev->rings[i];
552                 if (!ring || !ring->fence_drv.initialized)
553                         continue;
554
555                 /* enable the interrupt */
556                 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
557                                ring->fence_drv.irq_type);
558         }
559 }
560
561 /**
562  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
563  *
564  * @adev: amdgpu device pointer
565  *
566  * In case of GPU reset failure make sure no process keep waiting on fence
567  * that will never complete.
568  */
569 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
570 {
571         int i;
572
573         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
574                 struct amdgpu_ring *ring = adev->rings[i];
575                 if (!ring || !ring->fence_drv.initialized)
576                         continue;
577
578                 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
579         }
580 }
581
582 /*
583  * Common fence implementation
584  */
585
586 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
587 {
588         return "amdgpu";
589 }
590
591 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
592 {
593         struct amdgpu_fence *fence = to_amdgpu_fence(f);
594         return (const char *)fence->ring->name;
595 }
596
597 /**
598  * amdgpu_fence_is_signaled - test if fence is signaled
599  *
600  * @f: fence to test
601  *
602  * Test the fence sequence number if it is already signaled. If it isn't
603  * signaled start fence processing. Returns True if the fence is signaled.
604  */
605 static bool amdgpu_fence_is_signaled(struct fence *f)
606 {
607         struct amdgpu_fence *fence = to_amdgpu_fence(f);
608         struct amdgpu_ring *ring = fence->ring;
609
610         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
611                 return true;
612
613         amdgpu_fence_process(ring);
614
615         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
616                 return true;
617
618         return false;
619 }
620
621 /**
622  * amdgpu_fence_check_signaled - callback from fence_queue
623  *
624  * this function is called with fence_queue lock held, which is also used
625  * for the fence locking itself, so unlocked variants are used for
626  * fence_signal, and remove_wait_queue.
627  */
628 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
629 {
630         struct amdgpu_fence *fence;
631         struct amdgpu_device *adev;
632         u64 seq;
633         int ret;
634
635         fence = container_of(wait, struct amdgpu_fence, fence_wake);
636         adev = fence->ring->adev;
637
638         /*
639          * We cannot use amdgpu_fence_process here because we're already
640          * in the waitqueue, in a call from wake_up_all.
641          */
642         seq = atomic64_read(&fence->ring->fence_drv.last_seq);
643         if (seq >= fence->seq) {
644                 ret = fence_signal_locked(&fence->base);
645                 if (!ret)
646                         FENCE_TRACE(&fence->base, "signaled from irq context\n");
647                 else
648                         FENCE_TRACE(&fence->base, "was already signaled\n");
649
650                 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
651                 fence_put(&fence->base);
652         } else
653                 FENCE_TRACE(&fence->base, "pending\n");
654         return 0;
655 }
656
657 /**
658  * amdgpu_fence_enable_signaling - enable signalling on fence
659  * @fence: fence
660  *
661  * This function is called with fence_queue lock held, and adds a callback
662  * to fence_queue that checks if this fence is signaled, and if so it
663  * signals the fence and removes itself.
664  */
665 static bool amdgpu_fence_enable_signaling(struct fence *f)
666 {
667         struct amdgpu_fence *fence = to_amdgpu_fence(f);
668         struct amdgpu_ring *ring = fence->ring;
669
670         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
671                 return false;
672
673         fence->fence_wake.flags = 0;
674         fence->fence_wake.private = NULL;
675         fence->fence_wake.func = amdgpu_fence_check_signaled;
676         __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
677         fence_get(f);
678         if (!timer_pending(&ring->fence_drv.fallback_timer))
679                 amdgpu_fence_schedule_fallback(ring);
680         FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
681         return true;
682 }
683
684 static void amdgpu_fence_release(struct fence *f)
685 {
686         struct amdgpu_fence *fence = to_amdgpu_fence(f);
687         kmem_cache_free(amdgpu_fence_slab, fence);
688 }
689
690 static const struct fence_ops amdgpu_fence_ops = {
691         .get_driver_name = amdgpu_fence_get_driver_name,
692         .get_timeline_name = amdgpu_fence_get_timeline_name,
693         .enable_signaling = amdgpu_fence_enable_signaling,
694         .signaled = amdgpu_fence_is_signaled,
695         .wait = fence_default_wait,
696         .release = amdgpu_fence_release,
697 };
698
699 /*
700  * Fence debugfs
701  */
702 #if defined(CONFIG_DEBUG_FS)
703 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
704 {
705         struct drm_info_node *node = (struct drm_info_node *)m->private;
706         struct drm_device *dev = node->minor->dev;
707         struct amdgpu_device *adev = dev->dev_private;
708         int i;
709
710         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
711                 struct amdgpu_ring *ring = adev->rings[i];
712                 if (!ring || !ring->fence_drv.initialized)
713                         continue;
714
715                 amdgpu_fence_process(ring);
716
717                 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
718                 seq_printf(m, "Last signaled fence 0x%016llx\n",
719                            (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
720                 seq_printf(m, "Last emitted        0x%016llx\n",
721                            ring->fence_drv.sync_seq);
722         }
723         return 0;
724 }
725
726 /**
727  * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
728  *
729  * Manually trigger a gpu reset at the next fence wait.
730  */
731 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
732 {
733         struct drm_info_node *node = (struct drm_info_node *) m->private;
734         struct drm_device *dev = node->minor->dev;
735         struct amdgpu_device *adev = dev->dev_private;
736
737         seq_printf(m, "gpu reset\n");
738         amdgpu_gpu_reset(adev);
739
740         return 0;
741 }
742
743 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
744         {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
745         {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
746 };
747 #endif
748
749 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
750 {
751 #if defined(CONFIG_DEBUG_FS)
752         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
753 #else
754         return 0;
755 #endif
756 }
757