2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
40 #define DRM_I915_RING_DEBUG 1
43 #if defined(CONFIG_DEBUG_FS)
53 static const char *yesno(int v)
55 return v ? "yes" : "no";
58 static int i915_capabilities(struct seq_file *m, void *data)
60 struct drm_info_node *node = (struct drm_info_node *) m->private;
61 struct drm_device *dev = node->minor->dev;
62 const struct intel_device_info *info = INTEL_INFO(dev);
64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
80 B(cursor_needs_physical);
82 B(overlay_needs_physical);
92 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
94 if (obj->user_pin_count > 0)
96 else if (obj->pin_count > 0)
102 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
104 switch (obj->tiling_mode) {
106 case I915_TILING_NONE: return " ";
107 case I915_TILING_X: return "X";
108 case I915_TILING_Y: return "Y";
112 static const char *cache_level_str(int type)
115 case I915_CACHE_NONE: return " uncached";
116 case I915_CACHE_LLC: return " snooped (LLC)";
117 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
128 get_tiling_flag(obj),
130 obj->base.read_domains,
131 obj->base.write_domain,
132 obj->last_rendering_seqno,
133 obj->last_fenced_seqno,
134 cache_level_str(obj->cache_level),
135 obj->dirty ? " dirty" : "",
136 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
138 seq_printf(m, " (name: %d)", obj->base.name);
139 if (obj->fence_reg != I915_FENCE_REG_NONE)
140 seq_printf(m, " (fence: %d)", obj->fence_reg);
141 if (obj->gtt_space != NULL)
142 seq_printf(m, " (gtt offset: %08x, size: %08x)",
143 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
144 if (obj->pin_mappable || obj->fault_mappable) {
146 if (obj->pin_mappable)
148 if (obj->fault_mappable)
151 seq_printf(m, " (%s mappable)", s);
153 if (obj->ring != NULL)
154 seq_printf(m, " (%s)", obj->ring->name);
157 static int i915_gem_object_list_info(struct seq_file *m, void *data)
159 struct drm_info_node *node = (struct drm_info_node *) m->private;
160 uintptr_t list = (uintptr_t) node->info_ent->data;
161 struct list_head *head;
162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private;
164 struct drm_i915_gem_object *obj;
165 size_t total_obj_size, total_gtt_size;
168 ret = mutex_lock_interruptible(&dev->struct_mutex);
174 seq_printf(m, "Active:\n");
175 head = &dev_priv->mm.active_list;
178 seq_printf(m, "Inactive:\n");
179 head = &dev_priv->mm.inactive_list;
182 seq_printf(m, "Pinned:\n");
183 head = &dev_priv->mm.pinned_list;
186 seq_printf(m, "Flushing:\n");
187 head = &dev_priv->mm.flushing_list;
189 case DEFERRED_FREE_LIST:
190 seq_printf(m, "Deferred free:\n");
191 head = &dev_priv->mm.deferred_free_list;
194 mutex_unlock(&dev->struct_mutex);
198 total_obj_size = total_gtt_size = count = 0;
199 list_for_each_entry(obj, head, mm_list) {
201 describe_obj(m, obj);
203 total_obj_size += obj->base.size;
204 total_gtt_size += obj->gtt_space->size;
207 mutex_unlock(&dev->struct_mutex);
209 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
210 count, total_obj_size, total_gtt_size);
214 #define count_objects(list, member) do { \
215 list_for_each_entry(obj, list, member) { \
216 size += obj->gtt_space->size; \
218 if (obj->map_and_fenceable) { \
219 mappable_size += obj->gtt_space->size; \
225 static int i915_gem_object_info(struct seq_file *m, void* data)
227 struct drm_info_node *node = (struct drm_info_node *) m->private;
228 struct drm_device *dev = node->minor->dev;
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 u32 count, mappable_count;
231 size_t size, mappable_size;
232 struct drm_i915_gem_object *obj;
235 ret = mutex_lock_interruptible(&dev->struct_mutex);
239 seq_printf(m, "%u objects, %zu bytes\n",
240 dev_priv->mm.object_count,
241 dev_priv->mm.object_memory);
243 size = count = mappable_size = mappable_count = 0;
244 count_objects(&dev_priv->mm.gtt_list, gtt_list);
245 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
246 count, mappable_count, size, mappable_size);
248 size = count = mappable_size = mappable_count = 0;
249 count_objects(&dev_priv->mm.active_list, mm_list);
250 count_objects(&dev_priv->mm.flushing_list, mm_list);
251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
252 count, mappable_count, size, mappable_size);
254 size = count = mappable_size = mappable_count = 0;
255 count_objects(&dev_priv->mm.pinned_list, mm_list);
256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
257 count, mappable_count, size, mappable_size);
259 size = count = mappable_size = mappable_count = 0;
260 count_objects(&dev_priv->mm.inactive_list, mm_list);
261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
262 count, mappable_count, size, mappable_size);
264 size = count = mappable_size = mappable_count = 0;
265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
267 count, mappable_count, size, mappable_size);
269 size = count = mappable_size = mappable_count = 0;
270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
271 if (obj->fault_mappable) {
272 size += obj->gtt_space->size;
275 if (obj->pin_mappable) {
276 mappable_size += obj->gtt_space->size;
280 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
281 mappable_count, mappable_size);
282 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
285 seq_printf(m, "%zu [%zu] gtt total\n",
286 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
288 mutex_unlock(&dev->struct_mutex);
293 static int i915_gem_gtt_info(struct seq_file *m, void* data)
295 struct drm_info_node *node = (struct drm_info_node *) m->private;
296 struct drm_device *dev = node->minor->dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj;
299 size_t total_obj_size, total_gtt_size;
302 ret = mutex_lock_interruptible(&dev->struct_mutex);
306 total_obj_size = total_gtt_size = count = 0;
307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
309 describe_obj(m, obj);
311 total_obj_size += obj->base.size;
312 total_gtt_size += obj->gtt_space->size;
316 mutex_unlock(&dev->struct_mutex);
318 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
319 count, total_obj_size, total_gtt_size);
325 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
327 struct drm_info_node *node = (struct drm_info_node *) m->private;
328 struct drm_device *dev = node->minor->dev;
330 struct intel_crtc *crtc;
332 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
333 const char pipe = pipe_name(crtc->pipe);
334 const char plane = plane_name(crtc->plane);
335 struct intel_unpin_work *work;
337 spin_lock_irqsave(&dev->event_lock, flags);
338 work = crtc->unpin_work;
340 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
343 if (!work->pending) {
344 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
347 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
350 if (work->enable_stall_check)
351 seq_printf(m, "Stall check enabled, ");
353 seq_printf(m, "Stall check waiting for page flip ioctl, ");
354 seq_printf(m, "%d prepares\n", work->pending);
356 if (work->old_fb_obj) {
357 struct drm_i915_gem_object *obj = work->old_fb_obj;
359 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
361 if (work->pending_flip_obj) {
362 struct drm_i915_gem_object *obj = work->pending_flip_obj;
364 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
367 spin_unlock_irqrestore(&dev->event_lock, flags);
373 static int i915_gem_request_info(struct seq_file *m, void *data)
375 struct drm_info_node *node = (struct drm_info_node *) m->private;
376 struct drm_device *dev = node->minor->dev;
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 struct drm_i915_gem_request *gem_request;
381 ret = mutex_lock_interruptible(&dev->struct_mutex);
386 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
387 seq_printf(m, "Render requests:\n");
388 list_for_each_entry(gem_request,
389 &dev_priv->ring[RCS].request_list,
391 seq_printf(m, " %d @ %d\n",
393 (int) (jiffies - gem_request->emitted_jiffies));
397 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
398 seq_printf(m, "BSD requests:\n");
399 list_for_each_entry(gem_request,
400 &dev_priv->ring[VCS].request_list,
402 seq_printf(m, " %d @ %d\n",
404 (int) (jiffies - gem_request->emitted_jiffies));
408 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
409 seq_printf(m, "BLT requests:\n");
410 list_for_each_entry(gem_request,
411 &dev_priv->ring[BCS].request_list,
413 seq_printf(m, " %d @ %d\n",
415 (int) (jiffies - gem_request->emitted_jiffies));
419 mutex_unlock(&dev->struct_mutex);
422 seq_printf(m, "No requests\n");
427 static void i915_ring_seqno_info(struct seq_file *m,
428 struct intel_ring_buffer *ring)
430 if (ring->get_seqno) {
431 seq_printf(m, "Current sequence (%s): %d\n",
432 ring->name, ring->get_seqno(ring));
433 seq_printf(m, "Waiter sequence (%s): %d\n",
434 ring->name, ring->waiting_seqno);
435 seq_printf(m, "IRQ sequence (%s): %d\n",
436 ring->name, ring->irq_seqno);
440 static int i915_gem_seqno_info(struct seq_file *m, void *data)
442 struct drm_info_node *node = (struct drm_info_node *) m->private;
443 struct drm_device *dev = node->minor->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
447 ret = mutex_lock_interruptible(&dev->struct_mutex);
451 for (i = 0; i < I915_NUM_RINGS; i++)
452 i915_ring_seqno_info(m, &dev_priv->ring[i]);
454 mutex_unlock(&dev->struct_mutex);
460 static int i915_interrupt_info(struct seq_file *m, void *data)
462 struct drm_info_node *node = (struct drm_info_node *) m->private;
463 struct drm_device *dev = node->minor->dev;
464 drm_i915_private_t *dev_priv = dev->dev_private;
467 ret = mutex_lock_interruptible(&dev->struct_mutex);
471 if (!HAS_PCH_SPLIT(dev)) {
472 seq_printf(m, "Interrupt enable: %08x\n",
474 seq_printf(m, "Interrupt identity: %08x\n",
476 seq_printf(m, "Interrupt mask: %08x\n",
479 seq_printf(m, "Pipe %c stat: %08x\n",
481 I915_READ(PIPESTAT(pipe)));
483 seq_printf(m, "North Display Interrupt enable: %08x\n",
485 seq_printf(m, "North Display Interrupt identity: %08x\n",
487 seq_printf(m, "North Display Interrupt mask: %08x\n",
489 seq_printf(m, "South Display Interrupt enable: %08x\n",
491 seq_printf(m, "South Display Interrupt identity: %08x\n",
493 seq_printf(m, "South Display Interrupt mask: %08x\n",
495 seq_printf(m, "Graphics Interrupt enable: %08x\n",
497 seq_printf(m, "Graphics Interrupt identity: %08x\n",
499 seq_printf(m, "Graphics Interrupt mask: %08x\n",
502 seq_printf(m, "Interrupts received: %d\n",
503 atomic_read(&dev_priv->irq_received));
504 for (i = 0; i < I915_NUM_RINGS; i++) {
505 if (IS_GEN6(dev) || IS_GEN7(dev)) {
506 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
507 dev_priv->ring[i].name,
508 I915_READ_IMR(&dev_priv->ring[i]));
510 i915_ring_seqno_info(m, &dev_priv->ring[i]);
512 mutex_unlock(&dev->struct_mutex);
517 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
519 struct drm_info_node *node = (struct drm_info_node *) m->private;
520 struct drm_device *dev = node->minor->dev;
521 drm_i915_private_t *dev_priv = dev->dev_private;
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
528 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
529 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
530 for (i = 0; i < dev_priv->num_fence_regs; i++) {
531 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
533 seq_printf(m, "Fenced object[%2d] = ", i);
535 seq_printf(m, "unused");
537 describe_obj(m, obj);
541 mutex_unlock(&dev->struct_mutex);
545 static int i915_hws_info(struct seq_file *m, void *data)
547 struct drm_info_node *node = (struct drm_info_node *) m->private;
548 struct drm_device *dev = node->minor->dev;
549 drm_i915_private_t *dev_priv = dev->dev_private;
550 struct intel_ring_buffer *ring;
551 const volatile u32 __iomem *hws;
554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
555 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
559 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
560 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
562 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
567 static int i915_ringbuffer_data(struct seq_file *m, void *data)
569 struct drm_info_node *node = (struct drm_info_node *) m->private;
570 struct drm_device *dev = node->minor->dev;
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 struct intel_ring_buffer *ring;
575 ret = mutex_lock_interruptible(&dev->struct_mutex);
579 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
581 seq_printf(m, "No ringbuffer setup\n");
583 const u8 __iomem *virt = ring->virtual_start;
586 for (off = 0; off < ring->size; off += 4) {
587 uint32_t *ptr = (uint32_t *)(virt + off);
588 seq_printf(m, "%08x : %08x\n", off, *ptr);
591 mutex_unlock(&dev->struct_mutex);
596 static int i915_ringbuffer_info(struct seq_file *m, void *data)
598 struct drm_info_node *node = (struct drm_info_node *) m->private;
599 struct drm_device *dev = node->minor->dev;
600 drm_i915_private_t *dev_priv = dev->dev_private;
601 struct intel_ring_buffer *ring;
604 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
608 ret = mutex_lock_interruptible(&dev->struct_mutex);
612 seq_printf(m, "Ring %s:\n", ring->name);
613 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
614 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
615 seq_printf(m, " Size : %08x\n", ring->size);
616 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
617 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
619 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
620 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
622 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
623 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
625 mutex_unlock(&dev->struct_mutex);
630 static const char *ring_str(int ring)
633 case RCS: return "render";
634 case VCS: return "bsd";
635 case BCS: return "blt";
640 static const char *pin_flag(int pinned)
650 static const char *tiling_flag(int tiling)
654 case I915_TILING_NONE: return "";
655 case I915_TILING_X: return " X";
656 case I915_TILING_Y: return " Y";
660 static const char *dirty_flag(int dirty)
662 return dirty ? " dirty" : "";
665 static const char *purgeable_flag(int purgeable)
667 return purgeable ? " purgeable" : "";
670 static void print_error_buffers(struct seq_file *m,
672 struct drm_i915_error_buffer *err,
675 seq_printf(m, "%s [%d]:\n", name, count);
678 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
684 pin_flag(err->pinned),
685 tiling_flag(err->tiling),
686 dirty_flag(err->dirty),
687 purgeable_flag(err->purgeable),
688 err->ring != -1 ? " " : "",
690 cache_level_str(err->cache_level));
693 seq_printf(m, " (name: %d)", err->name);
694 if (err->fence_reg != I915_FENCE_REG_NONE)
695 seq_printf(m, " (fence: %d)", err->fence_reg);
702 static void i915_ring_error_state(struct seq_file *m,
703 struct drm_device *dev,
704 struct drm_i915_error_state *error,
707 seq_printf(m, "%s command stream:\n", ring_str(ring));
708 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
709 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
710 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
711 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
712 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
713 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
714 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
715 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
716 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
718 if (INTEL_INFO(dev)->gen >= 4)
719 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
720 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
721 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
723 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
725 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
728 static int i915_error_state(struct seq_file *m, void *unused)
730 struct drm_info_node *node = (struct drm_info_node *) m->private;
731 struct drm_device *dev = node->minor->dev;
732 drm_i915_private_t *dev_priv = dev->dev_private;
733 struct drm_i915_error_state *error;
735 int i, page, offset, elt;
737 spin_lock_irqsave(&dev_priv->error_lock, flags);
738 if (!dev_priv->first_error) {
739 seq_printf(m, "no error state collected\n");
743 error = dev_priv->first_error;
745 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
746 error->time.tv_usec);
747 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
748 seq_printf(m, "EIR: 0x%08x\n", error->eir);
749 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
751 for (i = 0; i < dev_priv->num_fence_regs; i++)
752 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
754 if (INTEL_INFO(dev)->gen >= 6) {
755 seq_printf(m, "ERROR: 0x%08x\n", error->error);
756 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
759 i915_ring_error_state(m, dev, error, RCS);
761 i915_ring_error_state(m, dev, error, BCS);
763 i915_ring_error_state(m, dev, error, VCS);
765 if (error->active_bo)
766 print_error_buffers(m, "Active",
768 error->active_bo_count);
770 if (error->pinned_bo)
771 print_error_buffers(m, "Pinned",
773 error->pinned_bo_count);
775 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
776 if (error->batchbuffer[i]) {
777 struct drm_i915_error_object *obj = error->batchbuffer[i];
779 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
780 dev_priv->ring[i].name,
783 for (page = 0; page < obj->page_count; page++) {
784 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
785 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
792 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
793 if (error->ringbuffer[i]) {
794 struct drm_i915_error_object *obj = error->ringbuffer[i];
795 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
796 dev_priv->ring[i].name,
799 for (page = 0; page < obj->page_count; page++) {
800 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
801 seq_printf(m, "%08x : %08x\n",
803 obj->pages[page][elt]);
811 intel_overlay_print_error_state(m, error->overlay);
814 intel_display_print_error_state(m, dev, error->display);
817 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
822 static int i915_rstdby_delays(struct seq_file *m, void *unused)
824 struct drm_info_node *node = (struct drm_info_node *) m->private;
825 struct drm_device *dev = node->minor->dev;
826 drm_i915_private_t *dev_priv = dev->dev_private;
830 ret = mutex_lock_interruptible(&dev->struct_mutex);
834 crstanddelay = I915_READ16(CRSTANDVID);
836 mutex_unlock(&dev->struct_mutex);
838 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
843 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
845 struct drm_info_node *node = (struct drm_info_node *) m->private;
846 struct drm_device *dev = node->minor->dev;
847 drm_i915_private_t *dev_priv = dev->dev_private;
851 u16 rgvswctl = I915_READ16(MEMSWCTL);
852 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
854 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
855 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
856 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
858 seq_printf(m, "Current P-state: %d\n",
859 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
860 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
861 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
862 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
863 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
865 u32 rpupei, rpcurup, rpprevup;
866 u32 rpdownei, rpcurdown, rpprevdown;
869 /* RPSTAT1 is in the GT power well */
870 ret = mutex_lock_interruptible(&dev->struct_mutex);
874 gen6_gt_force_wake_get(dev_priv);
876 rpstat = I915_READ(GEN6_RPSTAT1);
877 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
878 rpcurup = I915_READ(GEN6_RP_CUR_UP);
879 rpprevup = I915_READ(GEN6_RP_PREV_UP);
880 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
881 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
882 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
884 gen6_gt_force_wake_put(dev_priv);
885 mutex_unlock(&dev->struct_mutex);
887 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
888 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
889 seq_printf(m, "Render p-state ratio: %d\n",
890 (gt_perf_status & 0xff00) >> 8);
891 seq_printf(m, "Render p-state VID: %d\n",
892 gt_perf_status & 0xff);
893 seq_printf(m, "Render p-state limit: %d\n",
894 rp_state_limits & 0xff);
895 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
896 GEN6_CAGF_SHIFT) * 50);
897 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
899 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
900 GEN6_CURBSYTAVG_MASK);
901 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
902 GEN6_CURBSYTAVG_MASK);
903 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
905 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
906 GEN6_CURBSYTAVG_MASK);
907 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
908 GEN6_CURBSYTAVG_MASK);
910 max_freq = (rp_state_cap & 0xff0000) >> 16;
911 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
914 max_freq = (rp_state_cap & 0xff00) >> 8;
915 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
918 max_freq = rp_state_cap & 0xff;
919 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
922 seq_printf(m, "no P-state info available\n");
928 static int i915_delayfreq_table(struct seq_file *m, void *unused)
930 struct drm_info_node *node = (struct drm_info_node *) m->private;
931 struct drm_device *dev = node->minor->dev;
932 drm_i915_private_t *dev_priv = dev->dev_private;
936 ret = mutex_lock_interruptible(&dev->struct_mutex);
940 for (i = 0; i < 16; i++) {
941 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
942 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
943 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
946 mutex_unlock(&dev->struct_mutex);
951 static inline int MAP_TO_MV(int map)
953 return 1250 - (map * 25);
956 static int i915_inttoext_table(struct seq_file *m, void *unused)
958 struct drm_info_node *node = (struct drm_info_node *) m->private;
959 struct drm_device *dev = node->minor->dev;
960 drm_i915_private_t *dev_priv = dev->dev_private;
964 ret = mutex_lock_interruptible(&dev->struct_mutex);
968 for (i = 1; i <= 32; i++) {
969 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
970 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
973 mutex_unlock(&dev->struct_mutex);
978 static int ironlake_drpc_info(struct seq_file *m)
980 struct drm_info_node *node = (struct drm_info_node *) m->private;
981 struct drm_device *dev = node->minor->dev;
982 drm_i915_private_t *dev_priv = dev->dev_private;
983 u32 rgvmodectl, rstdbyctl;
987 ret = mutex_lock_interruptible(&dev->struct_mutex);
991 rgvmodectl = I915_READ(MEMMODECTL);
992 rstdbyctl = I915_READ(RSTDBYCTL);
993 crstandvid = I915_READ16(CRSTANDVID);
995 mutex_unlock(&dev->struct_mutex);
997 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
999 seq_printf(m, "Boost freq: %d\n",
1000 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1001 MEMMODE_BOOST_FREQ_SHIFT);
1002 seq_printf(m, "HW control enabled: %s\n",
1003 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1004 seq_printf(m, "SW control enabled: %s\n",
1005 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1006 seq_printf(m, "Gated voltage change: %s\n",
1007 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1008 seq_printf(m, "Starting frequency: P%d\n",
1009 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1010 seq_printf(m, "Max P-state: P%d\n",
1011 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1012 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1013 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1014 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1015 seq_printf(m, "Render standby enabled: %s\n",
1016 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1017 seq_printf(m, "Current RS state: ");
1018 switch (rstdbyctl & RSX_STATUS_MASK) {
1020 seq_printf(m, "on\n");
1022 case RSX_STATUS_RC1:
1023 seq_printf(m, "RC1\n");
1025 case RSX_STATUS_RC1E:
1026 seq_printf(m, "RC1E\n");
1028 case RSX_STATUS_RS1:
1029 seq_printf(m, "RS1\n");
1031 case RSX_STATUS_RS2:
1032 seq_printf(m, "RS2 (RC6)\n");
1034 case RSX_STATUS_RS3:
1035 seq_printf(m, "RC3 (RC6+)\n");
1038 seq_printf(m, "unknown\n");
1045 static int gen6_drpc_info(struct seq_file *m)
1048 struct drm_info_node *node = (struct drm_info_node *) m->private;
1049 struct drm_device *dev = node->minor->dev;
1050 struct drm_i915_private *dev_priv = dev->dev_private;
1051 u32 rpmodectl1, gt_core_status, rcctl1;
1055 ret = mutex_lock_interruptible(&dev->struct_mutex);
1059 if (atomic_read(&dev_priv->forcewake_count)) {
1060 seq_printf(m, "RC information inaccurate because userspace "
1061 "holds a reference \n");
1063 /* NB: we cannot use forcewake, else we read the wrong values */
1064 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1066 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1069 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1070 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1072 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1073 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1074 mutex_unlock(&dev->struct_mutex);
1076 seq_printf(m, "Video Turbo Mode: %s\n",
1077 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1078 seq_printf(m, "HW control enabled: %s\n",
1079 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1080 seq_printf(m, "SW control enabled: %s\n",
1081 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1082 GEN6_RP_MEDIA_SW_MODE));
1083 seq_printf(m, "RC6 Enabled: %s\n",
1084 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1085 seq_printf(m, "RC6 Enabled: %s\n",
1086 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1087 seq_printf(m, "Deep RC6 Enabled: %s\n",
1088 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1089 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1090 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1091 seq_printf(m, "Current RC state: ");
1092 switch (gt_core_status & GEN6_RCn_MASK) {
1094 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1095 seq_printf(m, "Core Power Down\n");
1097 seq_printf(m, "on\n");
1100 seq_printf(m, "RC3\n");
1103 seq_printf(m, "RC6\n");
1106 seq_printf(m, "RC7\n");
1109 seq_printf(m, "Unknown\n");
1113 seq_printf(m, "Core Power Down: %s\n",
1114 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1118 static int i915_drpc_info(struct seq_file *m, void *unused)
1120 struct drm_info_node *node = (struct drm_info_node *) m->private;
1121 struct drm_device *dev = node->minor->dev;
1123 if (IS_GEN6(dev) || IS_GEN7(dev))
1124 return gen6_drpc_info(m);
1126 return ironlake_drpc_info(m);
1129 static int i915_fbc_status(struct seq_file *m, void *unused)
1131 struct drm_info_node *node = (struct drm_info_node *) m->private;
1132 struct drm_device *dev = node->minor->dev;
1133 drm_i915_private_t *dev_priv = dev->dev_private;
1135 if (!I915_HAS_FBC(dev)) {
1136 seq_printf(m, "FBC unsupported on this chipset\n");
1140 if (intel_fbc_enabled(dev)) {
1141 seq_printf(m, "FBC enabled\n");
1143 seq_printf(m, "FBC disabled: ");
1144 switch (dev_priv->no_fbc_reason) {
1146 seq_printf(m, "no outputs");
1148 case FBC_STOLEN_TOO_SMALL:
1149 seq_printf(m, "not enough stolen memory");
1151 case FBC_UNSUPPORTED_MODE:
1152 seq_printf(m, "mode not supported");
1154 case FBC_MODE_TOO_LARGE:
1155 seq_printf(m, "mode too large");
1158 seq_printf(m, "FBC unsupported on plane");
1161 seq_printf(m, "scanout buffer not tiled");
1163 case FBC_MULTIPLE_PIPES:
1164 seq_printf(m, "multiple pipes are enabled");
1166 case FBC_MODULE_PARAM:
1167 seq_printf(m, "disabled per module param (default off)");
1170 seq_printf(m, "unknown reason");
1172 seq_printf(m, "\n");
1177 static int i915_sr_status(struct seq_file *m, void *unused)
1179 struct drm_info_node *node = (struct drm_info_node *) m->private;
1180 struct drm_device *dev = node->minor->dev;
1181 drm_i915_private_t *dev_priv = dev->dev_private;
1182 bool sr_enabled = false;
1184 if (HAS_PCH_SPLIT(dev))
1185 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1186 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1187 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1188 else if (IS_I915GM(dev))
1189 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1190 else if (IS_PINEVIEW(dev))
1191 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1193 seq_printf(m, "self-refresh: %s\n",
1194 sr_enabled ? "enabled" : "disabled");
1199 static int i915_emon_status(struct seq_file *m, void *unused)
1201 struct drm_info_node *node = (struct drm_info_node *) m->private;
1202 struct drm_device *dev = node->minor->dev;
1203 drm_i915_private_t *dev_priv = dev->dev_private;
1204 unsigned long temp, chipset, gfx;
1207 ret = mutex_lock_interruptible(&dev->struct_mutex);
1211 temp = i915_mch_val(dev_priv);
1212 chipset = i915_chipset_val(dev_priv);
1213 gfx = i915_gfx_val(dev_priv);
1214 mutex_unlock(&dev->struct_mutex);
1216 seq_printf(m, "GMCH temp: %ld\n", temp);
1217 seq_printf(m, "Chipset power: %ld\n", chipset);
1218 seq_printf(m, "GFX power: %ld\n", gfx);
1219 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1224 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1226 struct drm_info_node *node = (struct drm_info_node *) m->private;
1227 struct drm_device *dev = node->minor->dev;
1228 drm_i915_private_t *dev_priv = dev->dev_private;
1230 int gpu_freq, ia_freq;
1232 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1233 seq_printf(m, "unsupported on this chipset\n");
1237 ret = mutex_lock_interruptible(&dev->struct_mutex);
1241 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1243 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1245 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1246 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1247 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1248 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1249 GEN6_PCODE_READY) == 0, 10)) {
1250 DRM_ERROR("pcode read of freq table timed out\n");
1253 ia_freq = I915_READ(GEN6_PCODE_DATA);
1254 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1257 mutex_unlock(&dev->struct_mutex);
1262 static int i915_gfxec(struct seq_file *m, void *unused)
1264 struct drm_info_node *node = (struct drm_info_node *) m->private;
1265 struct drm_device *dev = node->minor->dev;
1266 drm_i915_private_t *dev_priv = dev->dev_private;
1269 ret = mutex_lock_interruptible(&dev->struct_mutex);
1273 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1275 mutex_unlock(&dev->struct_mutex);
1280 static int i915_opregion(struct seq_file *m, void *unused)
1282 struct drm_info_node *node = (struct drm_info_node *) m->private;
1283 struct drm_device *dev = node->minor->dev;
1284 drm_i915_private_t *dev_priv = dev->dev_private;
1285 struct intel_opregion *opregion = &dev_priv->opregion;
1288 ret = mutex_lock_interruptible(&dev->struct_mutex);
1292 if (opregion->header)
1293 seq_write(m, opregion->header, OPREGION_SIZE);
1295 mutex_unlock(&dev->struct_mutex);
1300 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1302 struct drm_info_node *node = (struct drm_info_node *) m->private;
1303 struct drm_device *dev = node->minor->dev;
1304 drm_i915_private_t *dev_priv = dev->dev_private;
1305 struct intel_fbdev *ifbdev;
1306 struct intel_framebuffer *fb;
1309 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1313 ifbdev = dev_priv->fbdev;
1314 fb = to_intel_framebuffer(ifbdev->helper.fb);
1316 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1320 fb->base.bits_per_pixel);
1321 describe_obj(m, fb->obj);
1322 seq_printf(m, "\n");
1324 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1325 if (&fb->base == ifbdev->helper.fb)
1328 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1332 fb->base.bits_per_pixel);
1333 describe_obj(m, fb->obj);
1334 seq_printf(m, "\n");
1337 mutex_unlock(&dev->mode_config.mutex);
1342 static int i915_context_status(struct seq_file *m, void *unused)
1344 struct drm_info_node *node = (struct drm_info_node *) m->private;
1345 struct drm_device *dev = node->minor->dev;
1346 drm_i915_private_t *dev_priv = dev->dev_private;
1349 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1353 if (dev_priv->pwrctx) {
1354 seq_printf(m, "power context ");
1355 describe_obj(m, dev_priv->pwrctx);
1356 seq_printf(m, "\n");
1359 if (dev_priv->renderctx) {
1360 seq_printf(m, "render context ");
1361 describe_obj(m, dev_priv->renderctx);
1362 seq_printf(m, "\n");
1365 mutex_unlock(&dev->mode_config.mutex);
1370 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1372 struct drm_info_node *node = (struct drm_info_node *) m->private;
1373 struct drm_device *dev = node->minor->dev;
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1376 seq_printf(m, "forcewake count = %d\n",
1377 atomic_read(&dev_priv->forcewake_count));
1383 i915_debugfs_common_open(struct inode *inode,
1386 filp->private_data = inode->i_private;
1391 i915_wedged_read(struct file *filp,
1396 struct drm_device *dev = filp->private_data;
1397 drm_i915_private_t *dev_priv = dev->dev_private;
1401 len = snprintf(buf, sizeof(buf),
1403 atomic_read(&dev_priv->mm.wedged));
1405 if (len > sizeof(buf))
1408 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1412 i915_wedged_write(struct file *filp,
1413 const char __user *ubuf,
1417 struct drm_device *dev = filp->private_data;
1422 if (cnt > sizeof(buf) - 1)
1425 if (copy_from_user(buf, ubuf, cnt))
1429 val = simple_strtoul(buf, NULL, 0);
1432 DRM_INFO("Manually setting wedged to %d\n", val);
1433 i915_handle_error(dev, val);
1438 static const struct file_operations i915_wedged_fops = {
1439 .owner = THIS_MODULE,
1440 .open = i915_debugfs_common_open,
1441 .read = i915_wedged_read,
1442 .write = i915_wedged_write,
1443 .llseek = default_llseek,
1447 i915_max_freq_read(struct file *filp,
1452 struct drm_device *dev = filp->private_data;
1453 drm_i915_private_t *dev_priv = dev->dev_private;
1457 len = snprintf(buf, sizeof(buf),
1458 "max freq: %d\n", dev_priv->max_delay * 50);
1460 if (len > sizeof(buf))
1463 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1467 i915_max_freq_write(struct file *filp,
1468 const char __user *ubuf,
1472 struct drm_device *dev = filp->private_data;
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1478 if (cnt > sizeof(buf) - 1)
1481 if (copy_from_user(buf, ubuf, cnt))
1485 val = simple_strtoul(buf, NULL, 0);
1488 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1491 * Turbo will still be enabled, but won't go above the set value.
1493 dev_priv->max_delay = val / 50;
1495 gen6_set_rps(dev, val / 50);
1500 static const struct file_operations i915_max_freq_fops = {
1501 .owner = THIS_MODULE,
1502 .open = i915_debugfs_common_open,
1503 .read = i915_max_freq_read,
1504 .write = i915_max_freq_write,
1505 .llseek = default_llseek,
1509 i915_cache_sharing_read(struct file *filp,
1514 struct drm_device *dev = filp->private_data;
1515 drm_i915_private_t *dev_priv = dev->dev_private;
1520 mutex_lock(&dev_priv->dev->struct_mutex);
1521 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1522 mutex_unlock(&dev_priv->dev->struct_mutex);
1524 len = snprintf(buf, sizeof(buf),
1525 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1526 GEN6_MBC_SNPCR_SHIFT);
1528 if (len > sizeof(buf))
1531 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1535 i915_cache_sharing_write(struct file *filp,
1536 const char __user *ubuf,
1540 struct drm_device *dev = filp->private_data;
1541 struct drm_i915_private *dev_priv = dev->dev_private;
1547 if (cnt > sizeof(buf) - 1)
1550 if (copy_from_user(buf, ubuf, cnt))
1554 val = simple_strtoul(buf, NULL, 0);
1557 if (val < 0 || val > 3)
1560 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1562 /* Update the cache sharing policy here as well */
1563 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1564 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1565 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1566 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1571 static const struct file_operations i915_cache_sharing_fops = {
1572 .owner = THIS_MODULE,
1573 .open = i915_debugfs_common_open,
1574 .read = i915_cache_sharing_read,
1575 .write = i915_cache_sharing_write,
1576 .llseek = default_llseek,
1579 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1580 * allocated we need to hook into the minor for release. */
1582 drm_add_fake_info_node(struct drm_minor *minor,
1586 struct drm_info_node *node;
1588 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1590 debugfs_remove(ent);
1594 node->minor = minor;
1596 node->info_ent = (void *) key;
1598 mutex_lock(&minor->debugfs_lock);
1599 list_add(&node->list, &minor->debugfs_list);
1600 mutex_unlock(&minor->debugfs_lock);
1605 static int i915_forcewake_open(struct inode *inode, struct file *file)
1607 struct drm_device *dev = inode->i_private;
1608 struct drm_i915_private *dev_priv = dev->dev_private;
1614 ret = mutex_lock_interruptible(&dev->struct_mutex);
1617 gen6_gt_force_wake_get(dev_priv);
1618 mutex_unlock(&dev->struct_mutex);
1623 int i915_forcewake_release(struct inode *inode, struct file *file)
1625 struct drm_device *dev = inode->i_private;
1626 struct drm_i915_private *dev_priv = dev->dev_private;
1632 * It's bad that we can potentially hang userspace if struct_mutex gets
1633 * forever stuck. However, if we cannot acquire this lock it means that
1634 * almost certainly the driver has hung, is not unload-able. Therefore
1635 * hanging here is probably a minor inconvenience not to be seen my
1636 * almost every user.
1638 mutex_lock(&dev->struct_mutex);
1639 gen6_gt_force_wake_put(dev_priv);
1640 mutex_unlock(&dev->struct_mutex);
1645 static const struct file_operations i915_forcewake_fops = {
1646 .owner = THIS_MODULE,
1647 .open = i915_forcewake_open,
1648 .release = i915_forcewake_release,
1651 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1653 struct drm_device *dev = minor->dev;
1656 ent = debugfs_create_file("i915_forcewake_user",
1659 &i915_forcewake_fops);
1661 return PTR_ERR(ent);
1663 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1666 static int i915_debugfs_create(struct dentry *root,
1667 struct drm_minor *minor,
1669 const struct file_operations *fops)
1671 struct drm_device *dev = minor->dev;
1674 ent = debugfs_create_file(name,
1679 return PTR_ERR(ent);
1681 return drm_add_fake_info_node(minor, ent, fops);
1684 static struct drm_info_list i915_debugfs_list[] = {
1685 {"i915_capabilities", i915_capabilities, 0},
1686 {"i915_gem_objects", i915_gem_object_info, 0},
1687 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1688 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1689 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1690 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1691 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1692 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1693 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1694 {"i915_gem_request", i915_gem_request_info, 0},
1695 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1696 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1697 {"i915_gem_interrupt", i915_interrupt_info, 0},
1698 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1699 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1700 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1701 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1702 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1703 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1704 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1705 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1706 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1707 {"i915_error_state", i915_error_state, 0},
1708 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1709 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1710 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1711 {"i915_inttoext_table", i915_inttoext_table, 0},
1712 {"i915_drpc_info", i915_drpc_info, 0},
1713 {"i915_emon_status", i915_emon_status, 0},
1714 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1715 {"i915_gfxec", i915_gfxec, 0},
1716 {"i915_fbc_status", i915_fbc_status, 0},
1717 {"i915_sr_status", i915_sr_status, 0},
1718 {"i915_opregion", i915_opregion, 0},
1719 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1720 {"i915_context_status", i915_context_status, 0},
1721 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1723 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1725 int i915_debugfs_init(struct drm_minor *minor)
1729 ret = i915_debugfs_create(minor->debugfs_root, minor,
1735 ret = i915_forcewake_create(minor->debugfs_root, minor);
1739 ret = i915_debugfs_create(minor->debugfs_root, minor,
1741 &i915_max_freq_fops);
1745 ret = i915_debugfs_create(minor->debugfs_root, minor,
1746 "i915_cache_sharing",
1747 &i915_cache_sharing_fops);
1751 return drm_debugfs_create_files(i915_debugfs_list,
1752 I915_DEBUGFS_ENTRIES,
1753 minor->debugfs_root, minor);
1756 void i915_debugfs_cleanup(struct drm_minor *minor)
1758 drm_debugfs_remove_files(i915_debugfs_list,
1759 I915_DEBUGFS_ENTRIES, minor);
1760 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1762 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1764 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1766 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1770 #endif /* CONFIG_DEBUG_FS */