2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
39 #include "amdgpu_trace.h"
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
54 struct amdgpu_ring *ring;
57 wait_queue_t fence_wake;
60 static struct kmem_cache *amdgpu_fence_slab;
61 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
66 static const struct fence_ops amdgpu_fence_ops;
67 static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
69 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
71 if (__f->base.ops == &amdgpu_fence_ops)
78 * amdgpu_fence_write - write a fence value
80 * @ring: ring the fence is associated with
81 * @seq: sequence number to write
83 * Writes a fence value to memory (all asics).
85 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
87 struct amdgpu_fence_driver *drv = &ring->fence_drv;
90 *drv->cpu_addr = cpu_to_le32(seq);
94 * amdgpu_fence_read - read a fence value
96 * @ring: ring the fence is associated with
98 * Reads a fence value from memory (all asics).
99 * Returns the value of the fence read from memory.
101 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
103 struct amdgpu_fence_driver *drv = &ring->fence_drv;
107 seq = le32_to_cpu(*drv->cpu_addr);
109 seq = lower_32_bits(atomic64_read(&drv->last_seq));
115 * amdgpu_fence_emit - emit a fence on the requested ring
117 * @ring: ring the fence is associated with
118 * @f: resulting fence object
120 * Emits a fence command on the requested ring (all asics).
121 * Returns 0 on success, -ENOMEM on failure.
123 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
125 struct amdgpu_device *adev = ring->adev;
126 struct amdgpu_fence *fence;
128 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
132 fence->seq = ++ring->fence_drv.sync_seq;
134 fence_init(&fence->base, &amdgpu_fence_ops,
135 &ring->fence_drv.fence_queue.lock,
136 adev->fence_context + ring->idx,
138 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
139 fence->seq, AMDGPU_FENCE_FLAG_INT);
145 * amdgpu_fence_schedule_fallback - schedule fallback check
147 * @ring: pointer to struct amdgpu_ring
149 * Start a timer as fallback to our interrupts.
151 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
153 mod_timer(&ring->fence_drv.fallback_timer,
154 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
158 * amdgpu_fence_activity - check for fence activity
160 * @ring: pointer to struct amdgpu_ring
162 * Checks the current fence value and calculates the last
163 * signalled fence value. Returns true if activity occured
164 * on the ring, and the fence_queue should be waken up.
166 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
168 uint64_t seq, last_seq, last_emitted;
169 unsigned count_loop = 0;
172 /* Note there is a scenario here for an infinite loop but it's
173 * very unlikely to happen. For it to happen, the current polling
174 * process need to be interrupted by another process and another
175 * process needs to update the last_seq btw the atomic read and
176 * xchg of the current process.
178 * More over for this to go in infinite loop there need to be
179 * continuously new fence signaled ie amdgpu_fence_read needs
180 * to return a different value each time for both the currently
181 * polling process and the other process that xchg the last_seq
182 * btw atomic read and xchg of the current process. And the
183 * value the other process set as last seq must be higher than
184 * the seq value we just read. Which means that current process
185 * need to be interrupted after amdgpu_fence_read and before
188 * To be even more safe we count the number of time we loop and
189 * we bail after 10 loop just accepting the fact that we might
190 * have temporarly set the last_seq not to the true real last
191 * seq but to an older one.
193 last_seq = atomic64_read(&ring->fence_drv.last_seq);
195 last_emitted = ring->fence_drv.sync_seq;
196 seq = amdgpu_fence_read(ring);
197 seq |= last_seq & 0xffffffff00000000LL;
198 if (seq < last_seq) {
200 seq |= last_emitted & 0xffffffff00000000LL;
203 if (seq <= last_seq || seq > last_emitted) {
206 /* If we loop over we don't want to return without
207 * checking if a fence is signaled as it means that the
208 * seq we just read is different from the previous on.
212 if ((count_loop++) > 10) {
213 /* We looped over too many time leave with the
214 * fact that we might have set an older fence
215 * seq then the current real last seq as signaled
220 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
222 if (seq < last_emitted)
223 amdgpu_fence_schedule_fallback(ring);
229 * amdgpu_fence_process - process a fence
231 * @adev: amdgpu_device pointer
232 * @ring: ring index the fence is associated with
234 * Checks the current fence value and wakes the fence queue
235 * if the sequence number has increased (all asics).
237 void amdgpu_fence_process(struct amdgpu_ring *ring)
239 if (amdgpu_fence_activity(ring))
240 wake_up_all(&ring->fence_drv.fence_queue);
244 * amdgpu_fence_fallback - fallback for hardware interrupts
246 * @work: delayed work item
248 * Checks for fence activity.
250 static void amdgpu_fence_fallback(unsigned long arg)
252 struct amdgpu_ring *ring = (void *)arg;
254 amdgpu_fence_process(ring);
258 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
260 * @ring: ring the fence is associated with
261 * @seq: sequence number
263 * Check if the last signaled fence sequnce number is >= the requested
264 * sequence number (all asics).
265 * Returns true if the fence has signaled (current fence value
266 * is >= requested value) or false if it has not (current fence
267 * value is < the requested value. Helper function for
268 * amdgpu_fence_signaled().
270 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
272 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
275 /* poll new last sequence at least once */
276 amdgpu_fence_process(ring);
277 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
284 * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
285 * @ring: ring to wait on for the seq number
286 * @seq: seq number wait for
289 * 0: seq signaled, and gpu not hang
290 * -EINVAL: some paramter is not valid
292 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
295 if (seq > ring->fence_drv.sync_seq)
298 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
301 amdgpu_fence_schedule_fallback(ring);
302 wait_event(ring->fence_drv.fence_queue,
303 amdgpu_fence_seq_signaled(ring, seq));
309 * amdgpu_fence_wait_empty - wait for all fences to signal
311 * @adev: amdgpu device pointer
312 * @ring: ring index the fence is associated with
314 * Wait for all fences on the requested ring to signal (all asics).
315 * Returns 0 if the fences have passed, error for all other cases.
316 * Caller must hold ring lock.
318 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
320 uint64_t seq = ring->fence_drv.sync_seq;
325 return amdgpu_fence_ring_wait_seq(ring, seq);
329 * amdgpu_fence_count_emitted - get the count of emitted fences
331 * @ring: ring the fence is associated with
333 * Get the number of fences emitted on the requested ring (all asics).
334 * Returns the number of emitted fences on the ring. Used by the
335 * dynpm code to ring track activity.
337 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
341 /* We are not protected by ring lock when reading the last sequence
342 * but it's ok to report slightly wrong fence count here.
344 amdgpu_fence_process(ring);
345 emitted = ring->fence_drv.sync_seq
346 - atomic64_read(&ring->fence_drv.last_seq);
347 /* to avoid 32bits warp around */
348 if (emitted > 0x10000000)
349 emitted = 0x10000000;
351 return (unsigned)emitted;
355 * amdgpu_fence_driver_start_ring - make the fence driver
356 * ready for use on the requested ring.
358 * @ring: ring to start the fence driver on
359 * @irq_src: interrupt source to use for this ring
360 * @irq_type: interrupt type to use for this ring
362 * Make the fence driver ready for processing (all asics).
363 * Not all asics have all rings, so each asic will only
364 * start the fence driver on the rings it has.
365 * Returns 0 for success, errors for failure.
367 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
368 struct amdgpu_irq_src *irq_src,
371 struct amdgpu_device *adev = ring->adev;
374 if (ring != &adev->uvd.ring) {
375 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
376 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
378 /* put fence directly behind firmware */
379 index = ALIGN(adev->uvd.fw->size, 8);
380 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
381 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
383 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
384 amdgpu_irq_get(adev, irq_src, irq_type);
386 ring->fence_drv.irq_src = irq_src;
387 ring->fence_drv.irq_type = irq_type;
388 ring->fence_drv.initialized = true;
390 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
391 "cpu addr 0x%p\n", ring->idx,
392 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
397 * amdgpu_fence_driver_init_ring - init the fence driver
398 * for the requested ring.
400 * @ring: ring to init the fence driver on
402 * Init the fence driver for the requested ring (all asics).
403 * Helper function for amdgpu_fence_driver_init().
405 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
410 ring->fence_drv.cpu_addr = NULL;
411 ring->fence_drv.gpu_addr = 0;
412 ring->fence_drv.sync_seq = 0;
413 atomic64_set(&ring->fence_drv.last_seq, 0);
414 ring->fence_drv.initialized = false;
416 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
417 (unsigned long)ring);
419 init_waitqueue_head(&ring->fence_drv.fence_queue);
421 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
425 * Delayed workqueue cannot use it directly,
426 * so the scheduler will not use delayed workqueue if
427 * MAX_SCHEDULE_TIMEOUT is set.
428 * Currently keep it simple and silly.
430 timeout = MAX_SCHEDULE_TIMEOUT;
432 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
433 amdgpu_sched_hw_submission,
434 timeout, ring->name);
436 DRM_ERROR("Failed to create scheduler on ring %s.\n",
445 * amdgpu_fence_driver_init - init the fence driver
446 * for all possible rings.
448 * @adev: amdgpu device pointer
450 * Init the fence driver for all possible rings (all asics).
451 * Not all asics have all rings, so each asic will only
452 * start the fence driver on the rings it has using
453 * amdgpu_fence_driver_start_ring().
454 * Returns 0 for success.
456 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
458 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
459 amdgpu_fence_slab = kmem_cache_create(
460 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
461 SLAB_HWCACHE_ALIGN, NULL);
462 if (!amdgpu_fence_slab)
465 if (amdgpu_debugfs_fence_init(adev))
466 dev_err(adev->dev, "fence debugfs file creation failed\n");
472 * amdgpu_fence_driver_fini - tear down the fence driver
473 * for all possible rings.
475 * @adev: amdgpu device pointer
477 * Tear down the fence driver for all possible rings (all asics).
479 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
483 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
484 kmem_cache_destroy(amdgpu_fence_slab);
485 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
486 struct amdgpu_ring *ring = adev->rings[i];
488 if (!ring || !ring->fence_drv.initialized)
490 r = amdgpu_fence_wait_empty(ring);
492 /* no need to trigger GPU reset as we are unloading */
493 amdgpu_fence_driver_force_completion(adev);
495 wake_up_all(&ring->fence_drv.fence_queue);
496 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
497 ring->fence_drv.irq_type);
498 amd_sched_fini(&ring->sched);
499 del_timer_sync(&ring->fence_drv.fallback_timer);
500 ring->fence_drv.initialized = false;
505 * amdgpu_fence_driver_suspend - suspend the fence driver
506 * for all possible rings.
508 * @adev: amdgpu device pointer
510 * Suspend the fence driver for all possible rings (all asics).
512 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
516 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
517 struct amdgpu_ring *ring = adev->rings[i];
518 if (!ring || !ring->fence_drv.initialized)
521 /* wait for gpu to finish processing current batch */
522 r = amdgpu_fence_wait_empty(ring);
524 /* delay GPU reset to resume */
525 amdgpu_fence_driver_force_completion(adev);
528 /* disable the interrupt */
529 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
530 ring->fence_drv.irq_type);
535 * amdgpu_fence_driver_resume - resume the fence driver
536 * for all possible rings.
538 * @adev: amdgpu device pointer
540 * Resume the fence driver for all possible rings (all asics).
541 * Not all asics have all rings, so each asic will only
542 * start the fence driver on the rings it has using
543 * amdgpu_fence_driver_start_ring().
544 * Returns 0 for success.
546 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
550 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
551 struct amdgpu_ring *ring = adev->rings[i];
552 if (!ring || !ring->fence_drv.initialized)
555 /* enable the interrupt */
556 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
557 ring->fence_drv.irq_type);
562 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
564 * @adev: amdgpu device pointer
566 * In case of GPU reset failure make sure no process keep waiting on fence
567 * that will never complete.
569 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
573 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
574 struct amdgpu_ring *ring = adev->rings[i];
575 if (!ring || !ring->fence_drv.initialized)
578 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
583 * Common fence implementation
586 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
591 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
593 struct amdgpu_fence *fence = to_amdgpu_fence(f);
594 return (const char *)fence->ring->name;
598 * amdgpu_fence_is_signaled - test if fence is signaled
602 * Test the fence sequence number if it is already signaled. If it isn't
603 * signaled start fence processing. Returns True if the fence is signaled.
605 static bool amdgpu_fence_is_signaled(struct fence *f)
607 struct amdgpu_fence *fence = to_amdgpu_fence(f);
608 struct amdgpu_ring *ring = fence->ring;
610 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
613 amdgpu_fence_process(ring);
615 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
622 * amdgpu_fence_check_signaled - callback from fence_queue
624 * this function is called with fence_queue lock held, which is also used
625 * for the fence locking itself, so unlocked variants are used for
626 * fence_signal, and remove_wait_queue.
628 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
630 struct amdgpu_fence *fence;
631 struct amdgpu_device *adev;
635 fence = container_of(wait, struct amdgpu_fence, fence_wake);
636 adev = fence->ring->adev;
639 * We cannot use amdgpu_fence_process here because we're already
640 * in the waitqueue, in a call from wake_up_all.
642 seq = atomic64_read(&fence->ring->fence_drv.last_seq);
643 if (seq >= fence->seq) {
644 ret = fence_signal_locked(&fence->base);
646 FENCE_TRACE(&fence->base, "signaled from irq context\n");
648 FENCE_TRACE(&fence->base, "was already signaled\n");
650 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
651 fence_put(&fence->base);
653 FENCE_TRACE(&fence->base, "pending\n");
658 * amdgpu_fence_enable_signaling - enable signalling on fence
661 * This function is called with fence_queue lock held, and adds a callback
662 * to fence_queue that checks if this fence is signaled, and if so it
663 * signals the fence and removes itself.
665 static bool amdgpu_fence_enable_signaling(struct fence *f)
667 struct amdgpu_fence *fence = to_amdgpu_fence(f);
668 struct amdgpu_ring *ring = fence->ring;
670 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
673 fence->fence_wake.flags = 0;
674 fence->fence_wake.private = NULL;
675 fence->fence_wake.func = amdgpu_fence_check_signaled;
676 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
678 if (!timer_pending(&ring->fence_drv.fallback_timer))
679 amdgpu_fence_schedule_fallback(ring);
680 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
684 static void amdgpu_fence_release(struct fence *f)
686 struct amdgpu_fence *fence = to_amdgpu_fence(f);
687 kmem_cache_free(amdgpu_fence_slab, fence);
690 static const struct fence_ops amdgpu_fence_ops = {
691 .get_driver_name = amdgpu_fence_get_driver_name,
692 .get_timeline_name = amdgpu_fence_get_timeline_name,
693 .enable_signaling = amdgpu_fence_enable_signaling,
694 .signaled = amdgpu_fence_is_signaled,
695 .wait = fence_default_wait,
696 .release = amdgpu_fence_release,
702 #if defined(CONFIG_DEBUG_FS)
703 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
705 struct drm_info_node *node = (struct drm_info_node *)m->private;
706 struct drm_device *dev = node->minor->dev;
707 struct amdgpu_device *adev = dev->dev_private;
710 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
711 struct amdgpu_ring *ring = adev->rings[i];
712 if (!ring || !ring->fence_drv.initialized)
715 amdgpu_fence_process(ring);
717 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
718 seq_printf(m, "Last signaled fence 0x%016llx\n",
719 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
720 seq_printf(m, "Last emitted 0x%016llx\n",
721 ring->fence_drv.sync_seq);
727 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
729 * Manually trigger a gpu reset at the next fence wait.
731 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
733 struct drm_info_node *node = (struct drm_info_node *) m->private;
734 struct drm_device *dev = node->minor->dev;
735 struct amdgpu_device *adev = dev->dev_private;
737 seq_printf(m, "gpu reset\n");
738 amdgpu_gpu_reset(adev);
743 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
744 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
745 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
749 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
751 #if defined(CONFIG_DEBUG_FS)
752 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);