2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets;
81 bool duplicate, need_mmap_lock = false;
84 if (p->chunk_relocs_idx == -1) {
87 chunk = &p->chunks[p->chunk_relocs_idx];
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
100 radeon_cs_buckets_init(&buckets);
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) {
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
116 p->relocs[i].handle = 0;
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
139 /* the first reloc of an UVD job is the msg and that must be in
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
147 /* TODO: is this still needed for NI+ ? */
148 p->relocs[i].prefered_domains =
149 RADEON_GEM_DOMAIN_VRAM;
151 p->relocs[i].allowed_domains =
152 RADEON_GEM_DOMAIN_VRAM;
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
166 p->relocs[i].prefered_domains = domain;
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
169 p->relocs[i].allowed_domains = domain;
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain;
187 p->relocs[i].handle = r->handle;
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
199 down_read(¤t->mm->mmap_sem);
201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
204 up_read(¤t->mm->mmap_sem);
209 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
211 p->priority = priority;
215 DRM_ERROR("unknown ring id: %d\n", ring);
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
220 case RADEON_CS_RING_COMPUTE:
221 if (p->rdev->family >= CHIP_TAHITI) {
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
232 p->ring = R600_RING_TYPE_DMA_INDEX;
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
235 } else if (p->rdev->family >= CHIP_RV770) {
236 p->ring = R600_RING_TYPE_DMA_INDEX;
241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
252 static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
256 for (i = 0; i < p->nrelocs; i++) {
257 struct reservation_object *resv;
259 if (!p->relocs[i].robj)
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared);
272 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
273 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
275 struct drm_radeon_cs *cs = data;
276 uint64_t *chunk_array_ptr;
278 u32 ring = RADEON_CS_RING_GFX;
281 if (!cs->num_chunks) {
285 INIT_LIST_HEAD(&p->validated);
288 p->ib.semaphore = NULL;
289 p->const_ib.sa_bo = NULL;
290 p->const_ib.semaphore = NULL;
291 p->chunk_ib_idx = -1;
292 p->chunk_relocs_idx = -1;
293 p->chunk_flags_idx = -1;
294 p->chunk_const_ib_idx = -1;
295 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
296 if (p->chunks_array == NULL) {
299 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
300 if (copy_from_user(p->chunks_array, chunk_array_ptr,
301 sizeof(uint64_t)*cs->num_chunks)) {
305 p->nchunks = cs->num_chunks;
306 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
307 if (p->chunks == NULL) {
310 for (i = 0; i < p->nchunks; i++) {
311 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
312 struct drm_radeon_cs_chunk user_chunk;
313 uint32_t __user *cdata;
315 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
316 if (copy_from_user(&user_chunk, chunk_ptr,
317 sizeof(struct drm_radeon_cs_chunk))) {
320 p->chunks[i].length_dw = user_chunk.length_dw;
321 p->chunks[i].chunk_id = user_chunk.chunk_id;
322 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
323 p->chunk_relocs_idx = i;
325 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
327 /* zero length IB isn't useful */
328 if (p->chunks[i].length_dw == 0)
331 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
332 p->chunk_const_ib_idx = i;
333 /* zero length CONST IB isn't useful */
334 if (p->chunks[i].length_dw == 0)
337 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
338 p->chunk_flags_idx = i;
339 /* zero length flags aren't useful */
340 if (p->chunks[i].length_dw == 0)
344 size = p->chunks[i].length_dw;
345 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
346 p->chunks[i].user_ptr = cdata;
347 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
350 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
351 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
355 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
356 size *= sizeof(uint32_t);
357 if (p->chunks[i].kdata == NULL) {
360 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
363 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
364 p->cs_flags = p->chunks[i].kdata[0];
365 if (p->chunks[i].length_dw > 1)
366 ring = p->chunks[i].kdata[1];
367 if (p->chunks[i].length_dw > 2)
368 priority = (s32)p->chunks[i].kdata[2];
372 /* these are KMS only */
374 if ((p->cs_flags & RADEON_CS_USE_VM) &&
375 !p->rdev->vm_manager.enabled) {
376 DRM_ERROR("VM not active on asic!\n");
380 if (radeon_cs_get_ring(p, ring, priority))
383 /* we only support VM on some SI+ rings */
384 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
385 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
386 DRM_ERROR("Ring %d requires VM!\n", p->ring);
390 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
391 DRM_ERROR("VM not supported on ring %d!\n",
401 static int cmp_size_smaller_first(void *priv, struct list_head *a,
404 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
405 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
407 /* Sort A before B if A is smaller. */
408 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
412 * cs_parser_fini() - clean parser states
413 * @parser: parser structure holding parsing context.
414 * @error: error number
416 * If error is set than unvalidate buffer, otherwise just free memory
417 * used by parsing context.
419 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
424 /* Sort the buffer list from the smallest to largest buffer,
425 * which affects the order of buffers in the LRU list.
426 * This assures that the smallest buffers are added first
427 * to the LRU list, so they are likely to be later evicted
428 * first, instead of large buffers whose eviction is more
431 * This slightly lowers the number of bytes moved by TTM
432 * per frame under memory pressure.
434 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
436 ttm_eu_fence_buffer_objects(&parser->ticket,
438 &parser->ib.fence->base);
439 } else if (backoff) {
440 ttm_eu_backoff_reservation(&parser->ticket,
444 if (parser->relocs != NULL) {
445 for (i = 0; i < parser->nrelocs; i++) {
446 if (parser->relocs[i].gobj)
447 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
450 kfree(parser->track);
451 kfree(parser->relocs);
452 kfree(parser->relocs_ptr);
453 drm_free_large(parser->vm_bos);
454 for (i = 0; i < parser->nchunks; i++)
455 drm_free_large(parser->chunks[i].kdata);
456 kfree(parser->chunks);
457 kfree(parser->chunks_array);
458 radeon_ib_free(parser->rdev, &parser->ib);
459 radeon_ib_free(parser->rdev, &parser->const_ib);
462 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
463 struct radeon_cs_parser *parser)
467 if (parser->chunk_ib_idx == -1)
470 if (parser->cs_flags & RADEON_CS_USE_VM)
473 r = radeon_cs_parse(rdev, parser->ring, parser);
474 if (r || parser->parser_error) {
475 DRM_ERROR("Invalid command stream !\n");
479 r = radeon_cs_sync_rings(parser);
481 if (r != -ERESTARTSYS)
482 DRM_ERROR("Failed to sync rings: %i\n", r);
486 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
487 radeon_uvd_note_usage(rdev);
488 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
489 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
490 radeon_vce_note_usage(rdev);
492 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
494 DRM_ERROR("Failed to schedule IB !\n");
499 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
500 struct radeon_vm *vm)
502 struct radeon_device *rdev = p->rdev;
503 struct radeon_bo_va *bo_va;
506 r = radeon_vm_update_page_directory(rdev, vm);
510 r = radeon_vm_clear_freed(rdev, vm);
514 if (vm->ib_bo_va == NULL) {
515 DRM_ERROR("Tmp BO not in VM!\n");
519 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
520 &rdev->ring_tmp_bo.bo->tbo.mem);
524 for (i = 0; i < p->nrelocs; i++) {
525 struct radeon_bo *bo;
527 /* ignore duplicates */
528 if (p->relocs_ptr[i] != &p->relocs[i])
531 bo = p->relocs[i].robj;
532 bo_va = radeon_vm_bo_find(vm, bo);
534 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
538 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
543 return radeon_vm_clear_invalids(rdev, vm);
546 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
547 struct radeon_cs_parser *parser)
549 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
550 struct radeon_vm *vm = &fpriv->vm;
553 if (parser->chunk_ib_idx == -1)
555 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
558 if (parser->const_ib.length_dw) {
559 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
565 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
570 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
571 radeon_uvd_note_usage(rdev);
573 mutex_lock(&vm->mutex);
574 r = radeon_bo_vm_update_pte(parser, vm);
579 r = radeon_cs_sync_rings(parser);
581 if (r != -ERESTARTSYS)
582 DRM_ERROR("Failed to sync rings: %i\n", r);
585 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
587 if ((rdev->family >= CHIP_TAHITI) &&
588 (parser->chunk_const_ib_idx != -1)) {
589 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
591 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
595 mutex_unlock(&vm->mutex);
599 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
602 r = radeon_gpu_reset(rdev);
609 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
611 struct radeon_cs_chunk *ib_chunk;
612 struct radeon_vm *vm = NULL;
615 if (parser->chunk_ib_idx == -1)
618 if (parser->cs_flags & RADEON_CS_USE_VM) {
619 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
622 if ((rdev->family >= CHIP_TAHITI) &&
623 (parser->chunk_const_ib_idx != -1)) {
624 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
625 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
626 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
629 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
630 vm, ib_chunk->length_dw * 4);
632 DRM_ERROR("Failed to get const ib !\n");
635 parser->const_ib.is_const_ib = true;
636 parser->const_ib.length_dw = ib_chunk->length_dw;
637 if (copy_from_user(parser->const_ib.ptr,
639 ib_chunk->length_dw * 4))
643 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
644 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
645 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
649 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
651 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
652 vm, ib_chunk->length_dw * 4);
654 DRM_ERROR("Failed to get ib !\n");
657 parser->ib.length_dw = ib_chunk->length_dw;
659 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
660 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
665 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
667 struct radeon_device *rdev = dev->dev_private;
668 struct radeon_cs_parser parser;
671 down_read(&rdev->exclusive_lock);
672 if (!rdev->accel_working) {
673 up_read(&rdev->exclusive_lock);
676 if (rdev->in_reset) {
677 up_read(&rdev->exclusive_lock);
678 r = radeon_gpu_reset(rdev);
683 /* initialize parser */
684 memset(&parser, 0, sizeof(struct radeon_cs_parser));
687 parser.dev = rdev->dev;
688 parser.family = rdev->family;
689 r = radeon_cs_parser_init(&parser, data);
691 DRM_ERROR("Failed to initialize parser !\n");
692 radeon_cs_parser_fini(&parser, r, false);
693 up_read(&rdev->exclusive_lock);
694 r = radeon_cs_handle_lockup(rdev, r);
698 r = radeon_cs_ib_fill(rdev, &parser);
700 r = radeon_cs_parser_relocs(&parser);
701 if (r && r != -ERESTARTSYS)
702 DRM_ERROR("Failed to parse relocation %d!\n", r);
706 radeon_cs_parser_fini(&parser, r, false);
707 up_read(&rdev->exclusive_lock);
708 r = radeon_cs_handle_lockup(rdev, r);
712 trace_radeon_cs(&parser);
714 r = radeon_cs_ib_chunk(rdev, &parser);
718 r = radeon_cs_ib_vm_chunk(rdev, &parser);
723 radeon_cs_parser_fini(&parser, r, true);
724 up_read(&rdev->exclusive_lock);
725 r = radeon_cs_handle_lockup(rdev, r);
730 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
731 * @parser: parser structure holding parsing context.
732 * @pkt: where to store packet information
734 * Assume that chunk_ib_index is properly set. Will return -EINVAL
735 * if packet is bigger than remaining ib size. or if packets is unknown.
737 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
738 struct radeon_cs_packet *pkt,
741 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
742 struct radeon_device *rdev = p->rdev;
745 if (idx >= ib_chunk->length_dw) {
746 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
747 idx, ib_chunk->length_dw);
750 header = radeon_get_ib_value(p, idx);
752 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
753 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
756 case RADEON_PACKET_TYPE0:
757 if (rdev->family < CHIP_R600) {
758 pkt->reg = R100_CP_PACKET0_GET_REG(header);
760 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
762 pkt->reg = R600_CP_PACKET0_GET_REG(header);
764 case RADEON_PACKET_TYPE3:
765 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
767 case RADEON_PACKET_TYPE2:
771 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
774 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
775 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
776 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
783 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
784 * @p: structure holding the parser context.
786 * Check if the next packet is NOP relocation packet3.
788 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
790 struct radeon_cs_packet p3reloc;
793 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
796 if (p3reloc.type != RADEON_PACKET_TYPE3)
798 if (p3reloc.opcode != RADEON_PACKET3_NOP)
804 * radeon_cs_dump_packet() - dump raw packet context
805 * @p: structure holding the parser context.
806 * @pkt: structure holding the packet.
808 * Used mostly for debugging and error reporting.
810 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
811 struct radeon_cs_packet *pkt)
813 volatile uint32_t *ib;
819 for (i = 0; i <= (pkt->count + 1); i++, idx++)
820 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
824 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
825 * @parser: parser structure holding parsing context.
826 * @data: pointer to relocation data
827 * @offset_start: starting offset
828 * @offset_mask: offset mask (to align start offset on)
829 * @reloc: reloc informations
831 * Check if next packet is relocation packet3, do bo validation and compute
832 * GPU offset using the provided start.
834 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
835 struct radeon_cs_reloc **cs_reloc,
838 struct radeon_cs_chunk *relocs_chunk;
839 struct radeon_cs_packet p3reloc;
843 if (p->chunk_relocs_idx == -1) {
844 DRM_ERROR("No relocation chunk !\n");
848 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
849 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
852 p->idx += p3reloc.count + 2;
853 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
854 p3reloc.opcode != RADEON_PACKET3_NOP) {
855 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
857 radeon_cs_dump_packet(p, &p3reloc);
860 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
861 if (idx >= relocs_chunk->length_dw) {
862 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
863 idx, relocs_chunk->length_dw);
864 radeon_cs_dump_packet(p, &p3reloc);
867 /* FIXME: we assume reloc size is 4 dwords */
869 *cs_reloc = p->relocs;
870 (*cs_reloc)->gpu_offset =
871 (u64)relocs_chunk->kdata[idx + 3] << 32;
872 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
874 *cs_reloc = p->relocs_ptr[(idx / 4)];