| .. | .. |
|---|
| 86 | 86 | uint64_t src_offset, |
|---|
| 87 | 87 | uint64_t dst_offset, |
|---|
| 88 | 88 | unsigned num_gpu_pages, |
|---|
| 89 | | - struct reservation_object *resv); |
|---|
| 89 | + struct dma_resv *resv); |
|---|
| 90 | 90 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
|---|
| 91 | 91 | uint32_t tiling_flags, uint32_t pitch, |
|---|
| 92 | 92 | uint32_t offset, uint32_t obj_size); |
|---|
| .. | .. |
|---|
| 157 | 157 | uint64_t src_offset, |
|---|
| 158 | 158 | uint64_t dst_offset, |
|---|
| 159 | 159 | unsigned num_gpu_pages, |
|---|
| 160 | | - struct reservation_object *resv); |
|---|
| 160 | + struct dma_resv *resv); |
|---|
| 161 | 161 | void r200_set_safe_registers(struct radeon_device *rdev); |
|---|
| 162 | 162 | |
|---|
| 163 | 163 | /* |
|---|
| .. | .. |
|---|
| 347 | 347 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
|---|
| 348 | 348 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 349 | 349 | unsigned num_gpu_pages, |
|---|
| 350 | | - struct reservation_object *resv); |
|---|
| 350 | + struct dma_resv *resv); |
|---|
| 351 | 351 | struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, |
|---|
| 352 | 352 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 353 | 353 | unsigned num_gpu_pages, |
|---|
| 354 | | - struct reservation_object *resv); |
|---|
| 354 | + struct dma_resv *resv); |
|---|
| 355 | 355 | void r600_hpd_init(struct radeon_device *rdev); |
|---|
| 356 | 356 | void r600_hpd_fini(struct radeon_device *rdev); |
|---|
| 357 | 357 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
|---|
| .. | .. |
|---|
| 473 | 473 | struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
|---|
| 474 | 474 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 475 | 475 | unsigned num_gpu_pages, |
|---|
| 476 | | - struct reservation_object *resv); |
|---|
| 476 | + struct dma_resv *resv); |
|---|
| 477 | 477 | u32 rv770_get_xclk(struct radeon_device *rdev); |
|---|
| 478 | 478 | int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
|---|
| 479 | 479 | int rv770_get_temp(struct radeon_device *rdev); |
|---|
| .. | .. |
|---|
| 547 | 547 | struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, |
|---|
| 548 | 548 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 549 | 549 | unsigned num_gpu_pages, |
|---|
| 550 | | - struct reservation_object *resv); |
|---|
| 550 | + struct dma_resv *resv); |
|---|
| 551 | 551 | int evergreen_get_temp(struct radeon_device *rdev); |
|---|
| 552 | 552 | int evergreen_get_allowed_info_register(struct radeon_device *rdev, |
|---|
| 553 | 553 | u32 reg, u32 *val); |
|---|
| .. | .. |
|---|
| 725 | 725 | struct radeon_fence *si_copy_dma(struct radeon_device *rdev, |
|---|
| 726 | 726 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 727 | 727 | unsigned num_gpu_pages, |
|---|
| 728 | | - struct reservation_object *resv); |
|---|
| 728 | + struct dma_resv *resv); |
|---|
| 729 | 729 | |
|---|
| 730 | 730 | void si_dma_vm_copy_pages(struct radeon_device *rdev, |
|---|
| 731 | 731 | struct radeon_ib *ib, |
|---|
| .. | .. |
|---|
| 796 | 796 | struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, |
|---|
| 797 | 797 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 798 | 798 | unsigned num_gpu_pages, |
|---|
| 799 | | - struct reservation_object *resv); |
|---|
| 799 | + struct dma_resv *resv); |
|---|
| 800 | 800 | struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
|---|
| 801 | 801 | uint64_t src_offset, uint64_t dst_offset, |
|---|
| 802 | 802 | unsigned num_gpu_pages, |
|---|
| 803 | | - struct reservation_object *resv); |
|---|
| 803 | + struct dma_resv *resv); |
|---|
| 804 | 804 | int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); |
|---|
| 805 | 805 | int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
|---|
| 806 | 806 | bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
|---|