* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
+ * @resv: reservation object to sync to
*
* Copy GPU paging using the CP DMA engine (CIK+).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int cik_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct reservation_object *resv)
{
struct radeon_semaphore *sem = NULL;
+ struct radeon_fence *fence;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control;
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
+ return ERR_PTR(r);
}
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
- radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
dst_offset += cur_size_in_bytes;
}
- r = radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
- return r;
+ return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, *fence);
+ radeon_semaphore_free(rdev, &sem, fence);
- return r;
+ return fence;
}
/*
WREG32(0x15D8, 0);
WREG32(0x15DC, 0);
- /* empty context1-15 */
- /* FIXME start with 4G, once using 2 level pt switch to full
- * vm size space
- */
+ /* restore context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->gart.table_addr >> 12);
+ rdev->vm_manager.saved_table_addr[i]);
else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
- rdev->gart.table_addr >> 12);
+ rdev->vm_manager.saved_table_addr[i]);
}
/* enable context1-15 */
*/
static void cik_pcie_gart_disable(struct radeon_device *rdev)
{
+ unsigned i;
+
+ for (i = 1; i < 16; ++i) {
+ uint32_t reg;
+ if (i < 8)
+ reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+ else
+ reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+ rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+ }
+
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
- if (queue_reset)
- schedule_work(&rdev->reset_work);
+ if (queue_reset) {
+ rdev->needs_reset = true;
+ wake_up_all(&rdev->fence_queue);
+ }
if (queue_thermal)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
int ret, i;
u16 tmp16;
+ if (pci_is_root_bus(rdev->pdev->bus))
+ return;
+
if (radeon_pcie_gen2 == 0)
return;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
- if (!disable_clkreq) {
+ if (!disable_clkreq &&
+ !pci_is_root_bus(rdev->pdev->bus)) {
struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap;