2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets;
81 bool duplicate, need_mmap_lock = false;
84 if (p->chunk_relocs_idx == -1) {
87 chunk = &p->chunks[p->chunk_relocs_idx];
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
100 radeon_cs_buckets_init(&buckets);
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) {
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
116 p->relocs[i].handle = 0;
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
139 /* the first reloc of an UVD job is the msg and that must be in
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
147 /* TODO: is this still needed for NI+ ? */
148 p->relocs[i].prefered_domains =
149 RADEON_GEM_DOMAIN_VRAM;
151 p->relocs[i].allowed_domains =
152 RADEON_GEM_DOMAIN_VRAM;
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
166 p->relocs[i].prefered_domains = domain;
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
169 p->relocs[i].allowed_domains = domain;
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain;
187 p->relocs[i].handle = r->handle;
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
199 down_read(¤t->mm->mmap_sem);
201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
204 up_read(¤t->mm->mmap_sem);
209 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
211 p->priority = priority;
215 DRM_ERROR("unknown ring id: %d\n", ring);
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
220 case RADEON_CS_RING_COMPUTE:
221 if (p->rdev->family >= CHIP_TAHITI) {
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
232 p->ring = R600_RING_TYPE_DMA_INDEX;
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
235 } else if (p->rdev->family >= CHIP_RV770) {
236 p->ring = R600_RING_TYPE_DMA_INDEX;
241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
252 static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
254 struct radeon_cs_reloc *reloc;
257 list_for_each_entry(reloc, &p->validated, tv.head) {
258 struct reservation_object *resv;
260 resv = reloc->robj->tbo.resv;
261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
269 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
270 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
272 struct drm_radeon_cs *cs = data;
273 uint64_t *chunk_array_ptr;
275 u32 ring = RADEON_CS_RING_GFX;
278 if (!cs->num_chunks) {
282 INIT_LIST_HEAD(&p->validated);
285 p->ib.semaphore = NULL;
286 p->const_ib.sa_bo = NULL;
287 p->const_ib.semaphore = NULL;
288 p->chunk_ib_idx = -1;
289 p->chunk_relocs_idx = -1;
290 p->chunk_flags_idx = -1;
291 p->chunk_const_ib_idx = -1;
292 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
293 if (p->chunks_array == NULL) {
296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
297 if (copy_from_user(p->chunks_array, chunk_array_ptr,
298 sizeof(uint64_t)*cs->num_chunks)) {
302 p->nchunks = cs->num_chunks;
303 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
304 if (p->chunks == NULL) {
307 for (i = 0; i < p->nchunks; i++) {
308 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
309 struct drm_radeon_cs_chunk user_chunk;
310 uint32_t __user *cdata;
312 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
313 if (copy_from_user(&user_chunk, chunk_ptr,
314 sizeof(struct drm_radeon_cs_chunk))) {
317 p->chunks[i].length_dw = user_chunk.length_dw;
318 p->chunks[i].chunk_id = user_chunk.chunk_id;
319 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
320 p->chunk_relocs_idx = i;
322 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
324 /* zero length IB isn't useful */
325 if (p->chunks[i].length_dw == 0)
328 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
329 p->chunk_const_ib_idx = i;
330 /* zero length CONST IB isn't useful */
331 if (p->chunks[i].length_dw == 0)
334 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
335 p->chunk_flags_idx = i;
336 /* zero length flags aren't useful */
337 if (p->chunks[i].length_dw == 0)
341 size = p->chunks[i].length_dw;
342 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
343 p->chunks[i].user_ptr = cdata;
344 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
347 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
348 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
352 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
353 size *= sizeof(uint32_t);
354 if (p->chunks[i].kdata == NULL) {
357 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
360 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
361 p->cs_flags = p->chunks[i].kdata[0];
362 if (p->chunks[i].length_dw > 1)
363 ring = p->chunks[i].kdata[1];
364 if (p->chunks[i].length_dw > 2)
365 priority = (s32)p->chunks[i].kdata[2];
369 /* these are KMS only */
371 if ((p->cs_flags & RADEON_CS_USE_VM) &&
372 !p->rdev->vm_manager.enabled) {
373 DRM_ERROR("VM not active on asic!\n");
377 if (radeon_cs_get_ring(p, ring, priority))
380 /* we only support VM on some SI+ rings */
381 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
382 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
383 DRM_ERROR("Ring %d requires VM!\n", p->ring);
387 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
388 DRM_ERROR("VM not supported on ring %d!\n",
398 static int cmp_size_smaller_first(void *priv, struct list_head *a,
401 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
402 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
404 /* Sort A before B if A is smaller. */
405 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
409 * cs_parser_fini() - clean parser states
410 * @parser: parser structure holding parsing context.
411 * @error: error number
413 * If error is set than unvalidate buffer, otherwise just free memory
414 * used by parsing context.
416 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
421 /* Sort the buffer list from the smallest to largest buffer,
422 * which affects the order of buffers in the LRU list.
423 * This assures that the smallest buffers are added first
424 * to the LRU list, so they are likely to be later evicted
425 * first, instead of large buffers whose eviction is more
428 * This slightly lowers the number of bytes moved by TTM
429 * per frame under memory pressure.
431 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
433 ttm_eu_fence_buffer_objects(&parser->ticket,
435 &parser->ib.fence->base);
436 } else if (backoff) {
437 ttm_eu_backoff_reservation(&parser->ticket,
441 if (parser->relocs != NULL) {
442 for (i = 0; i < parser->nrelocs; i++) {
443 if (parser->relocs[i].gobj)
444 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
447 kfree(parser->track);
448 kfree(parser->relocs);
449 kfree(parser->relocs_ptr);
450 drm_free_large(parser->vm_bos);
451 for (i = 0; i < parser->nchunks; i++)
452 drm_free_large(parser->chunks[i].kdata);
453 kfree(parser->chunks);
454 kfree(parser->chunks_array);
455 radeon_ib_free(parser->rdev, &parser->ib);
456 radeon_ib_free(parser->rdev, &parser->const_ib);
459 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
460 struct radeon_cs_parser *parser)
464 if (parser->chunk_ib_idx == -1)
467 if (parser->cs_flags & RADEON_CS_USE_VM)
470 r = radeon_cs_parse(rdev, parser->ring, parser);
471 if (r || parser->parser_error) {
472 DRM_ERROR("Invalid command stream !\n");
476 r = radeon_cs_sync_rings(parser);
478 if (r != -ERESTARTSYS)
479 DRM_ERROR("Failed to sync rings: %i\n", r);
483 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
484 radeon_uvd_note_usage(rdev);
485 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
486 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
487 radeon_vce_note_usage(rdev);
489 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
491 DRM_ERROR("Failed to schedule IB !\n");
496 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
497 struct radeon_vm *vm)
499 struct radeon_device *rdev = p->rdev;
500 struct radeon_bo_va *bo_va;
503 r = radeon_vm_update_page_directory(rdev, vm);
507 r = radeon_vm_clear_freed(rdev, vm);
511 if (vm->ib_bo_va == NULL) {
512 DRM_ERROR("Tmp BO not in VM!\n");
516 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
517 &rdev->ring_tmp_bo.bo->tbo.mem);
521 for (i = 0; i < p->nrelocs; i++) {
522 struct radeon_bo *bo;
524 /* ignore duplicates */
525 if (p->relocs_ptr[i] != &p->relocs[i])
528 bo = p->relocs[i].robj;
529 bo_va = radeon_vm_bo_find(vm, bo);
531 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
535 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
540 return radeon_vm_clear_invalids(rdev, vm);
543 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
544 struct radeon_cs_parser *parser)
546 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
547 struct radeon_vm *vm = &fpriv->vm;
550 if (parser->chunk_ib_idx == -1)
552 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
555 if (parser->const_ib.length_dw) {
556 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
562 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
567 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
568 radeon_uvd_note_usage(rdev);
570 mutex_lock(&vm->mutex);
571 r = radeon_bo_vm_update_pte(parser, vm);
576 r = radeon_cs_sync_rings(parser);
578 if (r != -ERESTARTSYS)
579 DRM_ERROR("Failed to sync rings: %i\n", r);
582 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
584 if ((rdev->family >= CHIP_TAHITI) &&
585 (parser->chunk_const_ib_idx != -1)) {
586 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
588 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
592 mutex_unlock(&vm->mutex);
596 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
599 r = radeon_gpu_reset(rdev);
606 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
608 struct radeon_cs_chunk *ib_chunk;
609 struct radeon_vm *vm = NULL;
612 if (parser->chunk_ib_idx == -1)
615 if (parser->cs_flags & RADEON_CS_USE_VM) {
616 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
619 if ((rdev->family >= CHIP_TAHITI) &&
620 (parser->chunk_const_ib_idx != -1)) {
621 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
622 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
623 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
626 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
627 vm, ib_chunk->length_dw * 4);
629 DRM_ERROR("Failed to get const ib !\n");
632 parser->const_ib.is_const_ib = true;
633 parser->const_ib.length_dw = ib_chunk->length_dw;
634 if (copy_from_user(parser->const_ib.ptr,
636 ib_chunk->length_dw * 4))
640 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
641 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
642 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
646 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
648 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
649 vm, ib_chunk->length_dw * 4);
651 DRM_ERROR("Failed to get ib !\n");
654 parser->ib.length_dw = ib_chunk->length_dw;
656 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
657 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
662 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
664 struct radeon_device *rdev = dev->dev_private;
665 struct radeon_cs_parser parser;
668 down_read(&rdev->exclusive_lock);
669 if (!rdev->accel_working) {
670 up_read(&rdev->exclusive_lock);
673 if (rdev->in_reset) {
674 up_read(&rdev->exclusive_lock);
675 r = radeon_gpu_reset(rdev);
680 /* initialize parser */
681 memset(&parser, 0, sizeof(struct radeon_cs_parser));
684 parser.dev = rdev->dev;
685 parser.family = rdev->family;
686 r = radeon_cs_parser_init(&parser, data);
688 DRM_ERROR("Failed to initialize parser !\n");
689 radeon_cs_parser_fini(&parser, r, false);
690 up_read(&rdev->exclusive_lock);
691 r = radeon_cs_handle_lockup(rdev, r);
695 r = radeon_cs_ib_fill(rdev, &parser);
697 r = radeon_cs_parser_relocs(&parser);
698 if (r && r != -ERESTARTSYS)
699 DRM_ERROR("Failed to parse relocation %d!\n", r);
703 radeon_cs_parser_fini(&parser, r, false);
704 up_read(&rdev->exclusive_lock);
705 r = radeon_cs_handle_lockup(rdev, r);
709 trace_radeon_cs(&parser);
711 r = radeon_cs_ib_chunk(rdev, &parser);
715 r = radeon_cs_ib_vm_chunk(rdev, &parser);
720 radeon_cs_parser_fini(&parser, r, true);
721 up_read(&rdev->exclusive_lock);
722 r = radeon_cs_handle_lockup(rdev, r);
727 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
728 * @parser: parser structure holding parsing context.
729 * @pkt: where to store packet information
731 * Assume that chunk_ib_index is properly set. Will return -EINVAL
732 * if packet is bigger than remaining ib size. or if packets is unknown.
734 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
735 struct radeon_cs_packet *pkt,
738 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
739 struct radeon_device *rdev = p->rdev;
742 if (idx >= ib_chunk->length_dw) {
743 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
744 idx, ib_chunk->length_dw);
747 header = radeon_get_ib_value(p, idx);
749 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
750 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
753 case RADEON_PACKET_TYPE0:
754 if (rdev->family < CHIP_R600) {
755 pkt->reg = R100_CP_PACKET0_GET_REG(header);
757 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
759 pkt->reg = R600_CP_PACKET0_GET_REG(header);
761 case RADEON_PACKET_TYPE3:
762 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
764 case RADEON_PACKET_TYPE2:
768 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
771 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
772 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
773 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
780 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
781 * @p: structure holding the parser context.
783 * Check if the next packet is NOP relocation packet3.
785 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
787 struct radeon_cs_packet p3reloc;
790 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
793 if (p3reloc.type != RADEON_PACKET_TYPE3)
795 if (p3reloc.opcode != RADEON_PACKET3_NOP)
801 * radeon_cs_dump_packet() - dump raw packet context
802 * @p: structure holding the parser context.
803 * @pkt: structure holding the packet.
805 * Used mostly for debugging and error reporting.
807 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
808 struct radeon_cs_packet *pkt)
810 volatile uint32_t *ib;
816 for (i = 0; i <= (pkt->count + 1); i++, idx++)
817 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
821 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
822 * @parser: parser structure holding parsing context.
823 * @data: pointer to relocation data
824 * @offset_start: starting offset
825 * @offset_mask: offset mask (to align start offset on)
826 * @reloc: reloc informations
828 * Check if next packet is relocation packet3, do bo validation and compute
829 * GPU offset using the provided start.
831 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
832 struct radeon_cs_reloc **cs_reloc,
835 struct radeon_cs_chunk *relocs_chunk;
836 struct radeon_cs_packet p3reloc;
840 if (p->chunk_relocs_idx == -1) {
841 DRM_ERROR("No relocation chunk !\n");
845 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
846 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
849 p->idx += p3reloc.count + 2;
850 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
851 p3reloc.opcode != RADEON_PACKET3_NOP) {
852 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
854 radeon_cs_dump_packet(p, &p3reloc);
857 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
858 if (idx >= relocs_chunk->length_dw) {
859 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
860 idx, relocs_chunk->length_dw);
861 radeon_cs_dump_packet(p, &p3reloc);
864 /* FIXME: we assume reloc size is 4 dwords */
866 *cs_reloc = p->relocs;
867 (*cs_reloc)->gpu_offset =
868 (u64)relocs_chunk->kdata[idx + 3] << 32;
869 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
871 *cs_reloc = p->relocs_ptr[(idx / 4)];