.. | .. |
---|
28 | 28 | #ifndef __AMDGPU_H__ |
---|
29 | 29 | #define __AMDGPU_H__ |
---|
30 | 30 | |
---|
| 31 | +#ifdef pr_fmt |
---|
| 32 | +#undef pr_fmt |
---|
| 33 | +#endif |
---|
| 34 | + |
---|
| 35 | +#define pr_fmt(fmt) "amdgpu: " fmt |
---|
| 36 | + |
---|
| 37 | +#ifdef dev_fmt |
---|
| 38 | +#undef dev_fmt |
---|
| 39 | +#endif |
---|
| 40 | + |
---|
| 41 | +#define dev_fmt(fmt) "amdgpu: " fmt |
---|
| 42 | + |
---|
| 43 | +#include "amdgpu_ctx.h" |
---|
| 44 | + |
---|
31 | 45 | #include <linux/atomic.h> |
---|
32 | 46 | #include <linux/wait.h> |
---|
33 | 47 | #include <linux/list.h> |
---|
.. | .. |
---|
35 | 49 | #include <linux/rbtree.h> |
---|
36 | 50 | #include <linux/hashtable.h> |
---|
37 | 51 | #include <linux/dma-fence.h> |
---|
| 52 | +#include <linux/pci.h> |
---|
| 53 | +#include <linux/aer.h> |
---|
38 | 54 | |
---|
39 | 55 | #include <drm/ttm/ttm_bo_api.h> |
---|
40 | 56 | #include <drm/ttm/ttm_bo_driver.h> |
---|
.. | .. |
---|
42 | 58 | #include <drm/ttm/ttm_module.h> |
---|
43 | 59 | #include <drm/ttm/ttm_execbuf_util.h> |
---|
44 | 60 | |
---|
45 | | -#include <drm/drmP.h> |
---|
46 | | -#include <drm/drm_gem.h> |
---|
47 | 61 | #include <drm/amdgpu_drm.h> |
---|
| 62 | +#include <drm/drm_gem.h> |
---|
| 63 | +#include <drm/drm_ioctl.h> |
---|
48 | 64 | #include <drm/gpu_scheduler.h> |
---|
49 | 65 | |
---|
50 | 66 | #include <kgd_kfd_interface.h> |
---|
.. | .. |
---|
67 | 83 | #include "amdgpu_uvd.h" |
---|
68 | 84 | #include "amdgpu_vce.h" |
---|
69 | 85 | #include "amdgpu_vcn.h" |
---|
| 86 | +#include "amdgpu_jpeg.h" |
---|
70 | 87 | #include "amdgpu_mn.h" |
---|
71 | 88 | #include "amdgpu_gmc.h" |
---|
| 89 | +#include "amdgpu_gfx.h" |
---|
| 90 | +#include "amdgpu_sdma.h" |
---|
| 91 | +#include "amdgpu_nbio.h" |
---|
72 | 92 | #include "amdgpu_dm.h" |
---|
73 | 93 | #include "amdgpu_virt.h" |
---|
| 94 | +#include "amdgpu_csa.h" |
---|
74 | 95 | #include "amdgpu_gart.h" |
---|
75 | 96 | #include "amdgpu_debugfs.h" |
---|
76 | 97 | #include "amdgpu_job.h" |
---|
77 | 98 | #include "amdgpu_bo_list.h" |
---|
| 99 | +#include "amdgpu_gem.h" |
---|
| 100 | +#include "amdgpu_doorbell.h" |
---|
| 101 | +#include "amdgpu_amdkfd.h" |
---|
| 102 | +#include "amdgpu_smu.h" |
---|
| 103 | +#include "amdgpu_discovery.h" |
---|
| 104 | +#include "amdgpu_mes.h" |
---|
| 105 | +#include "amdgpu_umc.h" |
---|
| 106 | +#include "amdgpu_mmhub.h" |
---|
| 107 | +#include "amdgpu_gfxhub.h" |
---|
| 108 | +#include "amdgpu_df.h" |
---|
| 109 | + |
---|
| 110 | +#define MAX_GPU_INSTANCE 16 |
---|
| 111 | + |
---|
| 112 | +struct amdgpu_gpu_instance |
---|
| 113 | +{ |
---|
| 114 | + struct amdgpu_device *adev; |
---|
| 115 | + int mgpu_fan_enabled; |
---|
| 116 | +}; |
---|
| 117 | + |
---|
| 118 | +struct amdgpu_mgpu_info |
---|
| 119 | +{ |
---|
| 120 | + struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; |
---|
| 121 | + struct mutex mutex; |
---|
| 122 | + uint32_t num_gpu; |
---|
| 123 | + uint32_t num_dgpu; |
---|
| 124 | + uint32_t num_apu; |
---|
| 125 | +}; |
---|
| 126 | + |
---|
| 127 | +#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 |
---|
78 | 128 | |
---|
79 | 129 | /* |
---|
80 | 130 | * Modules parameters. |
---|
.. | .. |
---|
92 | 142 | extern int amdgpu_hw_i2c; |
---|
93 | 143 | extern int amdgpu_pcie_gen2; |
---|
94 | 144 | extern int amdgpu_msi; |
---|
95 | | -extern int amdgpu_lockup_timeout; |
---|
| 145 | +extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; |
---|
96 | 146 | extern int amdgpu_dpm; |
---|
97 | 147 | extern int amdgpu_fw_load_type; |
---|
98 | 148 | extern int amdgpu_aspm; |
---|
.. | .. |
---|
106 | 156 | extern int amdgpu_vm_fault_stop; |
---|
107 | 157 | extern int amdgpu_vm_debug; |
---|
108 | 158 | extern int amdgpu_vm_update_mode; |
---|
| 159 | +extern int amdgpu_exp_hw_support; |
---|
109 | 160 | extern int amdgpu_dc; |
---|
110 | 161 | extern int amdgpu_sched_jobs; |
---|
111 | 162 | extern int amdgpu_sched_hw_submission; |
---|
.. | .. |
---|
117 | 168 | extern char *amdgpu_disable_cu; |
---|
118 | 169 | extern char *amdgpu_virtual_display; |
---|
119 | 170 | extern uint amdgpu_pp_feature_mask; |
---|
120 | | -extern int amdgpu_vram_page_split; |
---|
121 | | -extern int amdgpu_ngg; |
---|
122 | | -extern int amdgpu_prim_buf_per_se; |
---|
123 | | -extern int amdgpu_pos_buf_per_se; |
---|
124 | | -extern int amdgpu_cntl_sb_buf_per_se; |
---|
125 | | -extern int amdgpu_param_buf_per_se; |
---|
| 171 | +extern uint amdgpu_force_long_training; |
---|
126 | 172 | extern int amdgpu_job_hang_limit; |
---|
127 | 173 | extern int amdgpu_lbpw; |
---|
128 | 174 | extern int amdgpu_compute_multipipe; |
---|
129 | 175 | extern int amdgpu_gpu_recovery; |
---|
130 | 176 | extern int amdgpu_emu_mode; |
---|
131 | 177 | extern uint amdgpu_smu_memory_pool_size; |
---|
| 178 | +extern uint amdgpu_dc_feature_mask; |
---|
| 179 | +extern uint amdgpu_dc_debug_mask; |
---|
| 180 | +extern uint amdgpu_dm_abm_level; |
---|
| 181 | +extern int amdgpu_backlight; |
---|
| 182 | +extern struct amdgpu_mgpu_info mgpu_info; |
---|
| 183 | +extern int amdgpu_ras_enable; |
---|
| 184 | +extern uint amdgpu_ras_mask; |
---|
| 185 | +extern int amdgpu_bad_page_threshold; |
---|
| 186 | +extern int amdgpu_async_gfx_ring; |
---|
| 187 | +extern int amdgpu_mcbp; |
---|
| 188 | +extern int amdgpu_discovery; |
---|
| 189 | +extern int amdgpu_mes; |
---|
| 190 | +extern int amdgpu_noretry; |
---|
| 191 | +extern int amdgpu_force_asic_type; |
---|
| 192 | +#ifdef CONFIG_HSA_AMD |
---|
| 193 | +extern int sched_policy; |
---|
| 194 | +extern bool debug_evictions; |
---|
| 195 | +extern bool no_system_mem_limit; |
---|
| 196 | +#else |
---|
| 197 | +static const int sched_policy = KFD_SCHED_POLICY_HWS; |
---|
| 198 | +static const bool debug_evictions; /* = false */ |
---|
| 199 | +static const bool no_system_mem_limit; |
---|
| 200 | +#endif |
---|
| 201 | + |
---|
| 202 | +extern int amdgpu_tmz; |
---|
| 203 | +extern int amdgpu_reset_method; |
---|
132 | 204 | |
---|
133 | 205 | #ifdef CONFIG_DRM_AMDGPU_SI |
---|
134 | 206 | extern int amdgpu_si_support; |
---|
.. | .. |
---|
136 | 208 | #ifdef CONFIG_DRM_AMDGPU_CIK |
---|
137 | 209 | extern int amdgpu_cik_support; |
---|
138 | 210 | #endif |
---|
| 211 | +extern int amdgpu_num_kcq; |
---|
139 | 212 | |
---|
| 213 | +#define AMDGPU_VM_MAX_NUM_CTX 4096 |
---|
140 | 214 | #define AMDGPU_SG_THRESHOLD (256*1024*1024) |
---|
141 | 215 | #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ |
---|
142 | 216 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
---|
143 | 217 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
---|
144 | 218 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
---|
145 | | -/* AMDGPU_IB_POOL_SIZE must be a power of 2 */ |
---|
146 | | -#define AMDGPU_IB_POOL_SIZE 16 |
---|
147 | 219 | #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 |
---|
148 | 220 | #define AMDGPUFB_CONN_LIMIT 4 |
---|
149 | 221 | #define AMDGPU_BIOS_NUM_SCRATCH 16 |
---|
150 | 222 | |
---|
151 | | -/* max number of IP instances */ |
---|
152 | | -#define AMDGPU_MAX_SDMA_INSTANCES 2 |
---|
| 223 | +#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ |
---|
153 | 224 | |
---|
154 | 225 | /* hard reset data */ |
---|
155 | 226 | #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b |
---|
.. | .. |
---|
171 | 242 | #define AMDGPU_RESET_VCE (1 << 13) |
---|
172 | 243 | #define AMDGPU_RESET_VCE1 (1 << 14) |
---|
173 | 244 | |
---|
174 | | -/* GFX current status */ |
---|
175 | | -#define AMDGPU_GFX_NORMAL_MODE 0x00000000L |
---|
176 | | -#define AMDGPU_GFX_SAFE_MODE 0x00000001L |
---|
177 | | -#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L |
---|
178 | | -#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L |
---|
179 | | -#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L |
---|
180 | | - |
---|
181 | 245 | /* max cursor sizes (in pixels) */ |
---|
182 | 246 | #define CIK_CURSOR_WIDTH 128 |
---|
183 | 247 | #define CIK_CURSOR_HEIGHT 128 |
---|
.. | .. |
---|
190 | 254 | struct amdgpu_fpriv; |
---|
191 | 255 | struct amdgpu_bo_va_mapping; |
---|
192 | 256 | struct amdgpu_atif; |
---|
| 257 | +struct kfd_vm_fault_info; |
---|
| 258 | +struct amdgpu_hive_info; |
---|
193 | 259 | |
---|
194 | 260 | enum amdgpu_cp_irq { |
---|
195 | | - AMDGPU_CP_IRQ_GFX_EOP = 0, |
---|
| 261 | + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, |
---|
| 262 | + AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, |
---|
196 | 263 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, |
---|
197 | 264 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, |
---|
198 | 265 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, |
---|
.. | .. |
---|
203 | 270 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, |
---|
204 | 271 | |
---|
205 | 272 | AMDGPU_CP_IRQ_LAST |
---|
206 | | -}; |
---|
207 | | - |
---|
208 | | -enum amdgpu_sdma_irq { |
---|
209 | | - AMDGPU_SDMA_IRQ_TRAP0 = 0, |
---|
210 | | - AMDGPU_SDMA_IRQ_TRAP1, |
---|
211 | | - |
---|
212 | | - AMDGPU_SDMA_IRQ_LAST |
---|
213 | 273 | }; |
---|
214 | 274 | |
---|
215 | 275 | enum amdgpu_thermal_irq { |
---|
.. | .. |
---|
223 | 283 | AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, |
---|
224 | 284 | AMDGPU_CP_KIQ_IRQ_LAST |
---|
225 | 285 | }; |
---|
| 286 | +#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ |
---|
| 287 | +#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ |
---|
| 288 | +#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ |
---|
| 289 | +#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ |
---|
226 | 290 | |
---|
227 | 291 | int amdgpu_device_ip_set_clockgating_state(void *dev, |
---|
228 | 292 | enum amd_ip_block_type block_type, |
---|
.. | .. |
---|
255 | 319 | const struct amd_ip_funcs *funcs; |
---|
256 | 320 | }; |
---|
257 | 321 | |
---|
| 322 | +#define HW_REV(_Major, _Minor, _Rev) \ |
---|
| 323 | + ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) |
---|
| 324 | + |
---|
258 | 325 | struct amdgpu_ip_block { |
---|
259 | 326 | struct amdgpu_ip_block_status status; |
---|
260 | 327 | const struct amdgpu_ip_block_version *version; |
---|
.. | .. |
---|
270 | 337 | |
---|
271 | 338 | int amdgpu_device_ip_block_add(struct amdgpu_device *adev, |
---|
272 | 339 | const struct amdgpu_ip_block_version *ip_block_version); |
---|
273 | | - |
---|
274 | | -/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
---|
275 | | -struct amdgpu_buffer_funcs { |
---|
276 | | - /* maximum bytes in a single operation */ |
---|
277 | | - uint32_t copy_max_bytes; |
---|
278 | | - |
---|
279 | | - /* number of dw to reserve per operation */ |
---|
280 | | - unsigned copy_num_dw; |
---|
281 | | - |
---|
282 | | - /* used for buffer migration */ |
---|
283 | | - void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
---|
284 | | - /* src addr in bytes */ |
---|
285 | | - uint64_t src_offset, |
---|
286 | | - /* dst addr in bytes */ |
---|
287 | | - uint64_t dst_offset, |
---|
288 | | - /* number of byte to transfer */ |
---|
289 | | - uint32_t byte_count); |
---|
290 | | - |
---|
291 | | - /* maximum bytes in a single operation */ |
---|
292 | | - uint32_t fill_max_bytes; |
---|
293 | | - |
---|
294 | | - /* number of dw to reserve per operation */ |
---|
295 | | - unsigned fill_num_dw; |
---|
296 | | - |
---|
297 | | - /* used for buffer clearing */ |
---|
298 | | - void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
---|
299 | | - /* value to write to memory */ |
---|
300 | | - uint32_t src_data, |
---|
301 | | - /* dst addr in bytes */ |
---|
302 | | - uint64_t dst_offset, |
---|
303 | | - /* number of byte to fill */ |
---|
304 | | - uint32_t byte_count); |
---|
305 | | -}; |
---|
306 | | - |
---|
307 | | -/* provided by hw blocks that can write ptes, e.g., sdma */ |
---|
308 | | -struct amdgpu_vm_pte_funcs { |
---|
309 | | - /* number of dw to reserve per operation */ |
---|
310 | | - unsigned copy_pte_num_dw; |
---|
311 | | - |
---|
312 | | - /* copy pte entries from GART */ |
---|
313 | | - void (*copy_pte)(struct amdgpu_ib *ib, |
---|
314 | | - uint64_t pe, uint64_t src, |
---|
315 | | - unsigned count); |
---|
316 | | - |
---|
317 | | - /* write pte one entry at a time with addr mapping */ |
---|
318 | | - void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, |
---|
319 | | - uint64_t value, unsigned count, |
---|
320 | | - uint32_t incr); |
---|
321 | | - /* for linear pte/pde updates without addr mapping */ |
---|
322 | | - void (*set_pte_pde)(struct amdgpu_ib *ib, |
---|
323 | | - uint64_t pe, |
---|
324 | | - uint64_t addr, unsigned count, |
---|
325 | | - uint32_t incr, uint64_t flags); |
---|
326 | | -}; |
---|
327 | | - |
---|
328 | | -/* provided by the ih block */ |
---|
329 | | -struct amdgpu_ih_funcs { |
---|
330 | | - /* ring read/write ptr handling, called from interrupt context */ |
---|
331 | | - u32 (*get_wptr)(struct amdgpu_device *adev); |
---|
332 | | - bool (*prescreen_iv)(struct amdgpu_device *adev); |
---|
333 | | - void (*decode_iv)(struct amdgpu_device *adev, |
---|
334 | | - struct amdgpu_iv_entry *entry); |
---|
335 | | - void (*set_rptr)(struct amdgpu_device *adev); |
---|
336 | | -}; |
---|
337 | 340 | |
---|
338 | 341 | /* |
---|
339 | 342 | * BIOS. |
---|
.. | .. |
---|
359 | 362 | uint32_t dp_extclk; |
---|
360 | 363 | uint32_t max_pixel_clock; |
---|
361 | 364 | }; |
---|
362 | | - |
---|
363 | | -/* |
---|
364 | | - * GEM. |
---|
365 | | - */ |
---|
366 | | - |
---|
367 | | -#define AMDGPU_GEM_DOMAIN_MAX 0x3 |
---|
368 | | -#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) |
---|
369 | | - |
---|
370 | | -void amdgpu_gem_object_free(struct drm_gem_object *obj); |
---|
371 | | -int amdgpu_gem_object_open(struct drm_gem_object *obj, |
---|
372 | | - struct drm_file *file_priv); |
---|
373 | | -void amdgpu_gem_object_close(struct drm_gem_object *obj, |
---|
374 | | - struct drm_file *file_priv); |
---|
375 | | -unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); |
---|
376 | | -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); |
---|
377 | | -struct drm_gem_object * |
---|
378 | | -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, |
---|
379 | | - struct dma_buf_attachment *attach, |
---|
380 | | - struct sg_table *sg); |
---|
381 | | -struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
---|
382 | | - struct drm_gem_object *gobj, |
---|
383 | | - int flags); |
---|
384 | | -struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, |
---|
385 | | - struct dma_buf *dma_buf); |
---|
386 | | -struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); |
---|
387 | | -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); |
---|
388 | | -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
---|
389 | | -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
---|
390 | 365 | |
---|
391 | 366 | /* sub-allocation manager, it has to be protected by another lock. |
---|
392 | 367 | * By conception this is an helper for other part of the driver |
---|
.. | .. |
---|
437 | 412 | struct dma_fence *fence; |
---|
438 | 413 | }; |
---|
439 | 414 | |
---|
440 | | -/* |
---|
441 | | - * GEM objects. |
---|
442 | | - */ |
---|
443 | | -void amdgpu_gem_force_release(struct amdgpu_device *adev); |
---|
444 | | -int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
---|
445 | | - int alignment, u32 initial_domain, |
---|
446 | | - u64 flags, enum ttm_bo_type type, |
---|
447 | | - struct reservation_object *resv, |
---|
448 | | - struct drm_gem_object **obj); |
---|
449 | | - |
---|
450 | | -int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
---|
451 | | - struct drm_device *dev, |
---|
452 | | - struct drm_mode_create_dumb *args); |
---|
453 | | -int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
---|
454 | | - struct drm_device *dev, |
---|
455 | | - uint32_t handle, uint64_t *offset_p); |
---|
456 | 415 | int amdgpu_fence_slab_init(void); |
---|
457 | 416 | void amdgpu_fence_slab_fini(void); |
---|
458 | | - |
---|
459 | | -/* |
---|
460 | | - * GPU doorbell structures, functions & helpers |
---|
461 | | - */ |
---|
462 | | -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT |
---|
463 | | -{ |
---|
464 | | - AMDGPU_DOORBELL_KIQ = 0x000, |
---|
465 | | - AMDGPU_DOORBELL_HIQ = 0x001, |
---|
466 | | - AMDGPU_DOORBELL_DIQ = 0x002, |
---|
467 | | - AMDGPU_DOORBELL_MEC_RING0 = 0x010, |
---|
468 | | - AMDGPU_DOORBELL_MEC_RING1 = 0x011, |
---|
469 | | - AMDGPU_DOORBELL_MEC_RING2 = 0x012, |
---|
470 | | - AMDGPU_DOORBELL_MEC_RING3 = 0x013, |
---|
471 | | - AMDGPU_DOORBELL_MEC_RING4 = 0x014, |
---|
472 | | - AMDGPU_DOORBELL_MEC_RING5 = 0x015, |
---|
473 | | - AMDGPU_DOORBELL_MEC_RING6 = 0x016, |
---|
474 | | - AMDGPU_DOORBELL_MEC_RING7 = 0x017, |
---|
475 | | - AMDGPU_DOORBELL_GFX_RING0 = 0x020, |
---|
476 | | - AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, |
---|
477 | | - AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, |
---|
478 | | - AMDGPU_DOORBELL_IH = 0x1E8, |
---|
479 | | - AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, |
---|
480 | | - AMDGPU_DOORBELL_INVALID = 0xFFFF |
---|
481 | | -} AMDGPU_DOORBELL_ASSIGNMENT; |
---|
482 | | - |
---|
483 | | -struct amdgpu_doorbell { |
---|
484 | | - /* doorbell mmio */ |
---|
485 | | - resource_size_t base; |
---|
486 | | - resource_size_t size; |
---|
487 | | - u32 __iomem *ptr; |
---|
488 | | - u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ |
---|
489 | | -}; |
---|
490 | | - |
---|
491 | | -/* |
---|
492 | | - * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space |
---|
493 | | - */ |
---|
494 | | -typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT |
---|
495 | | -{ |
---|
496 | | - /* |
---|
497 | | - * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in |
---|
498 | | - * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. |
---|
499 | | - * Compute related doorbells are allocated from 0x00 to 0x8a |
---|
500 | | - */ |
---|
501 | | - |
---|
502 | | - |
---|
503 | | - /* kernel scheduling */ |
---|
504 | | - AMDGPU_DOORBELL64_KIQ = 0x00, |
---|
505 | | - |
---|
506 | | - /* HSA interface queue and debug queue */ |
---|
507 | | - AMDGPU_DOORBELL64_HIQ = 0x01, |
---|
508 | | - AMDGPU_DOORBELL64_DIQ = 0x02, |
---|
509 | | - |
---|
510 | | - /* Compute engines */ |
---|
511 | | - AMDGPU_DOORBELL64_MEC_RING0 = 0x03, |
---|
512 | | - AMDGPU_DOORBELL64_MEC_RING1 = 0x04, |
---|
513 | | - AMDGPU_DOORBELL64_MEC_RING2 = 0x05, |
---|
514 | | - AMDGPU_DOORBELL64_MEC_RING3 = 0x06, |
---|
515 | | - AMDGPU_DOORBELL64_MEC_RING4 = 0x07, |
---|
516 | | - AMDGPU_DOORBELL64_MEC_RING5 = 0x08, |
---|
517 | | - AMDGPU_DOORBELL64_MEC_RING6 = 0x09, |
---|
518 | | - AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, |
---|
519 | | - |
---|
520 | | - /* User queue doorbell range (128 doorbells) */ |
---|
521 | | - AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, |
---|
522 | | - AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, |
---|
523 | | - |
---|
524 | | - /* Graphics engine */ |
---|
525 | | - AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, |
---|
526 | | - |
---|
527 | | - /* |
---|
528 | | - * Other graphics doorbells can be allocated here: from 0x8c to 0xef |
---|
529 | | - * Graphics voltage island aperture 1 |
---|
530 | | - * default non-graphics QWORD index is 0xF0 - 0xFF inclusive |
---|
531 | | - */ |
---|
532 | | - |
---|
533 | | - /* sDMA engines */ |
---|
534 | | - AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0, |
---|
535 | | - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, |
---|
536 | | - AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2, |
---|
537 | | - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, |
---|
538 | | - |
---|
539 | | - /* Interrupt handler */ |
---|
540 | | - AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ |
---|
541 | | - AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ |
---|
542 | | - AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ |
---|
543 | | - |
---|
544 | | - /* VCN engine use 32 bits doorbell */ |
---|
545 | | - AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ |
---|
546 | | - AMDGPU_DOORBELL64_VCN2_3 = 0xF9, |
---|
547 | | - AMDGPU_DOORBELL64_VCN4_5 = 0xFA, |
---|
548 | | - AMDGPU_DOORBELL64_VCN6_7 = 0xFB, |
---|
549 | | - |
---|
550 | | - /* overlap the doorbell assignment with VCN as they are mutually exclusive |
---|
551 | | - * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD |
---|
552 | | - */ |
---|
553 | | - AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, |
---|
554 | | - AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, |
---|
555 | | - AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, |
---|
556 | | - AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, |
---|
557 | | - |
---|
558 | | - AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, |
---|
559 | | - AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, |
---|
560 | | - AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, |
---|
561 | | - AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, |
---|
562 | | - |
---|
563 | | - AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, |
---|
564 | | - AMDGPU_DOORBELL64_INVALID = 0xFFFF |
---|
565 | | -} AMDGPU_DOORBELL64_ASSIGNMENT; |
---|
566 | 417 | |
---|
567 | 418 | /* |
---|
568 | 419 | * IRQS. |
---|
.. | .. |
---|
600 | 451 | extern const struct drm_sched_backend_ops amdgpu_sched_ops; |
---|
601 | 452 | |
---|
602 | 453 | /* |
---|
603 | | - * Queue manager |
---|
604 | | - */ |
---|
605 | | -struct amdgpu_queue_mapper { |
---|
606 | | - int hw_ip; |
---|
607 | | - struct mutex lock; |
---|
608 | | - /* protected by lock */ |
---|
609 | | - struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS]; |
---|
610 | | -}; |
---|
611 | | - |
---|
612 | | -struct amdgpu_queue_mgr { |
---|
613 | | - struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM]; |
---|
614 | | -}; |
---|
615 | | - |
---|
616 | | -int amdgpu_queue_mgr_init(struct amdgpu_device *adev, |
---|
617 | | - struct amdgpu_queue_mgr *mgr); |
---|
618 | | -int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, |
---|
619 | | - struct amdgpu_queue_mgr *mgr); |
---|
620 | | -int amdgpu_queue_mgr_map(struct amdgpu_device *adev, |
---|
621 | | - struct amdgpu_queue_mgr *mgr, |
---|
622 | | - u32 hw_ip, u32 instance, u32 ring, |
---|
623 | | - struct amdgpu_ring **out_ring); |
---|
624 | | - |
---|
625 | | -/* |
---|
626 | | - * context related structures |
---|
627 | | - */ |
---|
628 | | - |
---|
629 | | -struct amdgpu_ctx_ring { |
---|
630 | | - uint64_t sequence; |
---|
631 | | - struct dma_fence **fences; |
---|
632 | | - struct drm_sched_entity entity; |
---|
633 | | -}; |
---|
634 | | - |
---|
635 | | -struct amdgpu_ctx { |
---|
636 | | - struct kref refcount; |
---|
637 | | - struct amdgpu_device *adev; |
---|
638 | | - struct amdgpu_queue_mgr queue_mgr; |
---|
639 | | - unsigned reset_counter; |
---|
640 | | - unsigned reset_counter_query; |
---|
641 | | - uint32_t vram_lost_counter; |
---|
642 | | - spinlock_t ring_lock; |
---|
643 | | - struct dma_fence **fences; |
---|
644 | | - struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
---|
645 | | - bool preamble_presented; |
---|
646 | | - enum drm_sched_priority init_priority; |
---|
647 | | - enum drm_sched_priority override_priority; |
---|
648 | | - struct mutex lock; |
---|
649 | | - atomic_t guilty; |
---|
650 | | -}; |
---|
651 | | - |
---|
652 | | -struct amdgpu_ctx_mgr { |
---|
653 | | - struct amdgpu_device *adev; |
---|
654 | | - struct mutex lock; |
---|
655 | | - /* protected by lock */ |
---|
656 | | - struct idr ctx_handles; |
---|
657 | | -}; |
---|
658 | | - |
---|
659 | | -struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); |
---|
660 | | -int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
---|
661 | | - |
---|
662 | | -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
---|
663 | | - struct dma_fence *fence, uint64_t *seq); |
---|
664 | | -struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
---|
665 | | - struct amdgpu_ring *ring, uint64_t seq); |
---|
666 | | -void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, |
---|
667 | | - enum drm_sched_priority priority); |
---|
668 | | - |
---|
669 | | -int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
---|
670 | | - struct drm_file *filp); |
---|
671 | | - |
---|
672 | | -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); |
---|
673 | | - |
---|
674 | | -void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); |
---|
675 | | -void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); |
---|
676 | | -void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr); |
---|
677 | | -void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); |
---|
678 | | - |
---|
679 | | - |
---|
680 | | -/* |
---|
681 | 454 | * file private structure |
---|
682 | 455 | */ |
---|
683 | 456 | |
---|
.. | .. |
---|
690 | 463 | struct amdgpu_ctx_mgr ctx_mgr; |
---|
691 | 464 | }; |
---|
692 | 465 | |
---|
693 | | -/* |
---|
694 | | - * GFX stuff |
---|
695 | | - */ |
---|
696 | | -#include "clearstate_defs.h" |
---|
697 | | - |
---|
698 | | -struct amdgpu_rlc_funcs { |
---|
699 | | - void (*enter_safe_mode)(struct amdgpu_device *adev); |
---|
700 | | - void (*exit_safe_mode)(struct amdgpu_device *adev); |
---|
701 | | -}; |
---|
702 | | - |
---|
703 | | -struct amdgpu_rlc { |
---|
704 | | - /* for power gating */ |
---|
705 | | - struct amdgpu_bo *save_restore_obj; |
---|
706 | | - uint64_t save_restore_gpu_addr; |
---|
707 | | - volatile uint32_t *sr_ptr; |
---|
708 | | - const u32 *reg_list; |
---|
709 | | - u32 reg_list_size; |
---|
710 | | - /* for clear state */ |
---|
711 | | - struct amdgpu_bo *clear_state_obj; |
---|
712 | | - uint64_t clear_state_gpu_addr; |
---|
713 | | - volatile uint32_t *cs_ptr; |
---|
714 | | - const struct cs_section_def *cs_data; |
---|
715 | | - u32 clear_state_size; |
---|
716 | | - /* for cp tables */ |
---|
717 | | - struct amdgpu_bo *cp_table_obj; |
---|
718 | | - uint64_t cp_table_gpu_addr; |
---|
719 | | - volatile uint32_t *cp_table_ptr; |
---|
720 | | - u32 cp_table_size; |
---|
721 | | - |
---|
722 | | - /* safe mode for updating CG/PG state */ |
---|
723 | | - bool in_safe_mode; |
---|
724 | | - const struct amdgpu_rlc_funcs *funcs; |
---|
725 | | - |
---|
726 | | - /* for firmware data */ |
---|
727 | | - u32 save_and_restore_offset; |
---|
728 | | - u32 clear_state_descriptor_offset; |
---|
729 | | - u32 avail_scratch_ram_locations; |
---|
730 | | - u32 reg_restore_list_size; |
---|
731 | | - u32 reg_list_format_start; |
---|
732 | | - u32 reg_list_format_separate_start; |
---|
733 | | - u32 starting_offsets_start; |
---|
734 | | - u32 reg_list_format_size_bytes; |
---|
735 | | - u32 reg_list_size_bytes; |
---|
736 | | - u32 reg_list_format_direct_reg_list_length; |
---|
737 | | - u32 save_restore_list_cntl_size_bytes; |
---|
738 | | - u32 save_restore_list_gpm_size_bytes; |
---|
739 | | - u32 save_restore_list_srm_size_bytes; |
---|
740 | | - |
---|
741 | | - u32 *register_list_format; |
---|
742 | | - u32 *register_restore; |
---|
743 | | - u8 *save_restore_list_cntl; |
---|
744 | | - u8 *save_restore_list_gpm; |
---|
745 | | - u8 *save_restore_list_srm; |
---|
746 | | - |
---|
747 | | - bool is_rlc_v2_1; |
---|
748 | | -}; |
---|
749 | | - |
---|
750 | | -#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES |
---|
751 | | - |
---|
752 | | -struct amdgpu_mec { |
---|
753 | | - struct amdgpu_bo *hpd_eop_obj; |
---|
754 | | - u64 hpd_eop_gpu_addr; |
---|
755 | | - struct amdgpu_bo *mec_fw_obj; |
---|
756 | | - u64 mec_fw_gpu_addr; |
---|
757 | | - u32 num_mec; |
---|
758 | | - u32 num_pipe_per_mec; |
---|
759 | | - u32 num_queue_per_pipe; |
---|
760 | | - void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; |
---|
761 | | - |
---|
762 | | - /* These are the resources for which amdgpu takes ownership */ |
---|
763 | | - DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); |
---|
764 | | -}; |
---|
765 | | - |
---|
766 | | -struct amdgpu_kiq { |
---|
767 | | - u64 eop_gpu_addr; |
---|
768 | | - struct amdgpu_bo *eop_obj; |
---|
769 | | - spinlock_t ring_lock; |
---|
770 | | - struct amdgpu_ring ring; |
---|
771 | | - struct amdgpu_irq_src irq; |
---|
772 | | -}; |
---|
773 | | - |
---|
774 | | -/* |
---|
775 | | - * GPU scratch registers structures, functions & helpers |
---|
776 | | - */ |
---|
777 | | -struct amdgpu_scratch { |
---|
778 | | - unsigned num_reg; |
---|
779 | | - uint32_t reg_base; |
---|
780 | | - uint32_t free_mask; |
---|
781 | | -}; |
---|
782 | | - |
---|
783 | | -/* |
---|
784 | | - * GFX configurations |
---|
785 | | - */ |
---|
786 | | -#define AMDGPU_GFX_MAX_SE 4 |
---|
787 | | -#define AMDGPU_GFX_MAX_SH_PER_SE 2 |
---|
788 | | - |
---|
789 | | -struct amdgpu_rb_config { |
---|
790 | | - uint32_t rb_backend_disable; |
---|
791 | | - uint32_t user_rb_backend_disable; |
---|
792 | | - uint32_t raster_config; |
---|
793 | | - uint32_t raster_config_1; |
---|
794 | | -}; |
---|
795 | | - |
---|
796 | | -struct gb_addr_config { |
---|
797 | | - uint16_t pipe_interleave_size; |
---|
798 | | - uint8_t num_pipes; |
---|
799 | | - uint8_t max_compress_frags; |
---|
800 | | - uint8_t num_banks; |
---|
801 | | - uint8_t num_se; |
---|
802 | | - uint8_t num_rb_per_se; |
---|
803 | | -}; |
---|
804 | | - |
---|
805 | | -struct amdgpu_gfx_config { |
---|
806 | | - unsigned max_shader_engines; |
---|
807 | | - unsigned max_tile_pipes; |
---|
808 | | - unsigned max_cu_per_sh; |
---|
809 | | - unsigned max_sh_per_se; |
---|
810 | | - unsigned max_backends_per_se; |
---|
811 | | - unsigned max_texture_channel_caches; |
---|
812 | | - unsigned max_gprs; |
---|
813 | | - unsigned max_gs_threads; |
---|
814 | | - unsigned max_hw_contexts; |
---|
815 | | - unsigned sc_prim_fifo_size_frontend; |
---|
816 | | - unsigned sc_prim_fifo_size_backend; |
---|
817 | | - unsigned sc_hiz_tile_fifo_size; |
---|
818 | | - unsigned sc_earlyz_tile_fifo_size; |
---|
819 | | - |
---|
820 | | - unsigned num_tile_pipes; |
---|
821 | | - unsigned backend_enable_mask; |
---|
822 | | - unsigned mem_max_burst_length_bytes; |
---|
823 | | - unsigned mem_row_size_in_kb; |
---|
824 | | - unsigned shader_engine_tile_size; |
---|
825 | | - unsigned num_gpus; |
---|
826 | | - unsigned multi_gpu_tile_size; |
---|
827 | | - unsigned mc_arb_ramcfg; |
---|
828 | | - unsigned gb_addr_config; |
---|
829 | | - unsigned num_rbs; |
---|
830 | | - unsigned gs_vgt_table_depth; |
---|
831 | | - unsigned gs_prim_buffer_depth; |
---|
832 | | - |
---|
833 | | - uint32_t tile_mode_array[32]; |
---|
834 | | - uint32_t macrotile_mode_array[16]; |
---|
835 | | - |
---|
836 | | - struct gb_addr_config gb_addr_config_fields; |
---|
837 | | - struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; |
---|
838 | | - |
---|
839 | | - /* gfx configure feature */ |
---|
840 | | - uint32_t double_offchip_lds_buf; |
---|
841 | | - /* cached value of DB_DEBUG2 */ |
---|
842 | | - uint32_t db_debug2; |
---|
843 | | -}; |
---|
844 | | - |
---|
845 | | -struct amdgpu_cu_info { |
---|
846 | | - uint32_t simd_per_cu; |
---|
847 | | - uint32_t max_waves_per_simd; |
---|
848 | | - uint32_t wave_front_size; |
---|
849 | | - uint32_t max_scratch_slots_per_cu; |
---|
850 | | - uint32_t lds_size; |
---|
851 | | - |
---|
852 | | - /* total active CU number */ |
---|
853 | | - uint32_t number; |
---|
854 | | - uint32_t ao_cu_mask; |
---|
855 | | - uint32_t ao_cu_bitmap[4][4]; |
---|
856 | | - uint32_t bitmap[4][4]; |
---|
857 | | -}; |
---|
858 | | - |
---|
859 | | -struct amdgpu_gfx_funcs { |
---|
860 | | - /* get the gpu clock counter */ |
---|
861 | | - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
---|
862 | | - void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); |
---|
863 | | - void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); |
---|
864 | | - void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); |
---|
865 | | - void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); |
---|
866 | | - void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue); |
---|
867 | | -}; |
---|
868 | | - |
---|
869 | | -struct amdgpu_ngg_buf { |
---|
870 | | - struct amdgpu_bo *bo; |
---|
871 | | - uint64_t gpu_addr; |
---|
872 | | - uint32_t size; |
---|
873 | | - uint32_t bo_size; |
---|
874 | | -}; |
---|
875 | | - |
---|
876 | | -enum { |
---|
877 | | - NGG_PRIM = 0, |
---|
878 | | - NGG_POS, |
---|
879 | | - NGG_CNTL, |
---|
880 | | - NGG_PARAM, |
---|
881 | | - NGG_BUF_MAX |
---|
882 | | -}; |
---|
883 | | - |
---|
884 | | -struct amdgpu_ngg { |
---|
885 | | - struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; |
---|
886 | | - uint32_t gds_reserve_addr; |
---|
887 | | - uint32_t gds_reserve_size; |
---|
888 | | - bool init; |
---|
889 | | -}; |
---|
890 | | - |
---|
891 | | -struct sq_work { |
---|
892 | | - struct work_struct work; |
---|
893 | | - unsigned ih_data; |
---|
894 | | -}; |
---|
895 | | - |
---|
896 | | -struct amdgpu_gfx { |
---|
897 | | - struct mutex gpu_clock_mutex; |
---|
898 | | - struct amdgpu_gfx_config config; |
---|
899 | | - struct amdgpu_rlc rlc; |
---|
900 | | - struct amdgpu_mec mec; |
---|
901 | | - struct amdgpu_kiq kiq; |
---|
902 | | - struct amdgpu_scratch scratch; |
---|
903 | | - const struct firmware *me_fw; /* ME firmware */ |
---|
904 | | - uint32_t me_fw_version; |
---|
905 | | - const struct firmware *pfp_fw; /* PFP firmware */ |
---|
906 | | - uint32_t pfp_fw_version; |
---|
907 | | - const struct firmware *ce_fw; /* CE firmware */ |
---|
908 | | - uint32_t ce_fw_version; |
---|
909 | | - const struct firmware *rlc_fw; /* RLC firmware */ |
---|
910 | | - uint32_t rlc_fw_version; |
---|
911 | | - const struct firmware *mec_fw; /* MEC firmware */ |
---|
912 | | - uint32_t mec_fw_version; |
---|
913 | | - const struct firmware *mec2_fw; /* MEC2 firmware */ |
---|
914 | | - uint32_t mec2_fw_version; |
---|
915 | | - uint32_t me_feature_version; |
---|
916 | | - uint32_t ce_feature_version; |
---|
917 | | - uint32_t pfp_feature_version; |
---|
918 | | - uint32_t rlc_feature_version; |
---|
919 | | - uint32_t rlc_srlc_fw_version; |
---|
920 | | - uint32_t rlc_srlc_feature_version; |
---|
921 | | - uint32_t rlc_srlg_fw_version; |
---|
922 | | - uint32_t rlc_srlg_feature_version; |
---|
923 | | - uint32_t rlc_srls_fw_version; |
---|
924 | | - uint32_t rlc_srls_feature_version; |
---|
925 | | - uint32_t mec_feature_version; |
---|
926 | | - uint32_t mec2_feature_version; |
---|
927 | | - struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; |
---|
928 | | - unsigned num_gfx_rings; |
---|
929 | | - struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; |
---|
930 | | - unsigned num_compute_rings; |
---|
931 | | - struct amdgpu_irq_src eop_irq; |
---|
932 | | - struct amdgpu_irq_src priv_reg_irq; |
---|
933 | | - struct amdgpu_irq_src priv_inst_irq; |
---|
934 | | - struct amdgpu_irq_src cp_ecc_error_irq; |
---|
935 | | - struct amdgpu_irq_src sq_irq; |
---|
936 | | - struct sq_work sq_work; |
---|
937 | | - |
---|
938 | | - /* gfx status */ |
---|
939 | | - uint32_t gfx_current_status; |
---|
940 | | - /* ce ram size*/ |
---|
941 | | - unsigned ce_ram_size; |
---|
942 | | - struct amdgpu_cu_info cu_info; |
---|
943 | | - const struct amdgpu_gfx_funcs *funcs; |
---|
944 | | - |
---|
945 | | - /* reset mask */ |
---|
946 | | - uint32_t grbm_soft_reset; |
---|
947 | | - uint32_t srbm_soft_reset; |
---|
948 | | - /* s3/s4 mask */ |
---|
949 | | - bool in_suspend; |
---|
950 | | - /* NGG */ |
---|
951 | | - struct amdgpu_ngg ngg; |
---|
952 | | - |
---|
953 | | - /* pipe reservation */ |
---|
954 | | - struct mutex pipe_reserve_mutex; |
---|
955 | | - DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); |
---|
956 | | -}; |
---|
| 466 | +int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); |
---|
957 | 467 | |
---|
958 | 468 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
---|
959 | | - unsigned size, struct amdgpu_ib *ib); |
---|
| 469 | + unsigned size, |
---|
| 470 | + enum amdgpu_ib_pool_type pool, |
---|
| 471 | + struct amdgpu_ib *ib); |
---|
960 | 472 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
---|
961 | 473 | struct dma_fence *f); |
---|
962 | 474 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
---|
.. | .. |
---|
975 | 487 | void *kdata; |
---|
976 | 488 | }; |
---|
977 | 489 | |
---|
| 490 | +struct amdgpu_cs_post_dep { |
---|
| 491 | + struct drm_syncobj *syncobj; |
---|
| 492 | + struct dma_fence_chain *chain; |
---|
| 493 | + u64 point; |
---|
| 494 | +}; |
---|
| 495 | + |
---|
978 | 496 | struct amdgpu_cs_parser { |
---|
979 | 497 | struct amdgpu_device *adev; |
---|
980 | 498 | struct drm_file *filp; |
---|
.. | .. |
---|
986 | 504 | |
---|
987 | 505 | /* scheduler job object */ |
---|
988 | 506 | struct amdgpu_job *job; |
---|
989 | | - struct amdgpu_ring *ring; |
---|
| 507 | + struct drm_sched_entity *entity; |
---|
990 | 508 | |
---|
991 | 509 | /* buffer objects */ |
---|
992 | 510 | struct ww_acquire_ctx ticket; |
---|
.. | .. |
---|
999 | 517 | uint64_t bytes_moved_vis_threshold; |
---|
1000 | 518 | uint64_t bytes_moved; |
---|
1001 | 519 | uint64_t bytes_moved_vis; |
---|
1002 | | - struct amdgpu_bo_list_entry *evictable; |
---|
1003 | 520 | |
---|
1004 | 521 | /* user fence */ |
---|
1005 | 522 | struct amdgpu_bo_list_entry uf_entry; |
---|
1006 | 523 | |
---|
1007 | | - unsigned num_post_dep_syncobjs; |
---|
1008 | | - struct drm_syncobj **post_dep_syncobjs; |
---|
| 524 | + unsigned num_post_deps; |
---|
| 525 | + struct amdgpu_cs_post_dep *post_deps; |
---|
1009 | 526 | }; |
---|
1010 | 527 | |
---|
1011 | 528 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, |
---|
.. | .. |
---|
1024 | 541 | /* |
---|
1025 | 542 | * Writeback |
---|
1026 | 543 | */ |
---|
1027 | | -#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ |
---|
| 544 | +#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ |
---|
1028 | 545 | |
---|
1029 | 546 | struct amdgpu_wb { |
---|
1030 | 547 | struct amdgpu_bo *wb_obj; |
---|
.. | .. |
---|
1038 | 555 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); |
---|
1039 | 556 | |
---|
1040 | 557 | /* |
---|
1041 | | - * SDMA |
---|
1042 | | - */ |
---|
1043 | | -struct amdgpu_sdma_instance { |
---|
1044 | | - /* SDMA firmware */ |
---|
1045 | | - const struct firmware *fw; |
---|
1046 | | - uint32_t fw_version; |
---|
1047 | | - uint32_t feature_version; |
---|
1048 | | - |
---|
1049 | | - struct amdgpu_ring ring; |
---|
1050 | | - bool burst_nop; |
---|
1051 | | -}; |
---|
1052 | | - |
---|
1053 | | -struct amdgpu_sdma { |
---|
1054 | | - struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; |
---|
1055 | | -#ifdef CONFIG_DRM_AMDGPU_SI |
---|
1056 | | - //SI DMA has a difference trap irq number for the second engine |
---|
1057 | | - struct amdgpu_irq_src trap_irq_1; |
---|
1058 | | -#endif |
---|
1059 | | - struct amdgpu_irq_src trap_irq; |
---|
1060 | | - struct amdgpu_irq_src illegal_inst_irq; |
---|
1061 | | - int num_instances; |
---|
1062 | | - uint32_t srbm_soft_reset; |
---|
1063 | | -}; |
---|
1064 | | - |
---|
1065 | | -/* |
---|
1066 | | - * Firmware |
---|
1067 | | - */ |
---|
1068 | | -enum amdgpu_firmware_load_type { |
---|
1069 | | - AMDGPU_FW_LOAD_DIRECT = 0, |
---|
1070 | | - AMDGPU_FW_LOAD_SMU, |
---|
1071 | | - AMDGPU_FW_LOAD_PSP, |
---|
1072 | | -}; |
---|
1073 | | - |
---|
1074 | | -struct amdgpu_firmware { |
---|
1075 | | - struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; |
---|
1076 | | - enum amdgpu_firmware_load_type load_type; |
---|
1077 | | - struct amdgpu_bo *fw_buf; |
---|
1078 | | - unsigned int fw_size; |
---|
1079 | | - unsigned int max_ucodes; |
---|
1080 | | - /* firmwares are loaded by psp instead of smu from vega10 */ |
---|
1081 | | - const struct amdgpu_psp_funcs *funcs; |
---|
1082 | | - struct amdgpu_bo *rbuf; |
---|
1083 | | - struct mutex mutex; |
---|
1084 | | - |
---|
1085 | | - /* gpu info firmware data pointer */ |
---|
1086 | | - const struct firmware *gpu_info_fw; |
---|
1087 | | - |
---|
1088 | | - void *fw_buf_ptr; |
---|
1089 | | - uint64_t fw_buf_mc; |
---|
1090 | | -}; |
---|
1091 | | - |
---|
1092 | | -/* |
---|
1093 | 558 | * Benchmarking |
---|
1094 | 559 | */ |
---|
1095 | 560 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); |
---|
.. | .. |
---|
1100 | 565 | */ |
---|
1101 | 566 | void amdgpu_test_moves(struct amdgpu_device *adev); |
---|
1102 | 567 | |
---|
1103 | | - |
---|
1104 | | -/* |
---|
1105 | | - * amdgpu smumgr functions |
---|
1106 | | - */ |
---|
1107 | | -struct amdgpu_smumgr_funcs { |
---|
1108 | | - int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); |
---|
1109 | | - int (*request_smu_load_fw)(struct amdgpu_device *adev); |
---|
1110 | | - int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); |
---|
1111 | | -}; |
---|
1112 | | - |
---|
1113 | | -/* |
---|
1114 | | - * amdgpu smumgr |
---|
1115 | | - */ |
---|
1116 | | -struct amdgpu_smumgr { |
---|
1117 | | - struct amdgpu_bo *toc_buf; |
---|
1118 | | - struct amdgpu_bo *smu_buf; |
---|
1119 | | - /* asic priv smu data */ |
---|
1120 | | - void *priv; |
---|
1121 | | - spinlock_t smu_lock; |
---|
1122 | | - /* smumgr functions */ |
---|
1123 | | - const struct amdgpu_smumgr_funcs *smumgr_funcs; |
---|
1124 | | - /* ucode loading complete flag */ |
---|
1125 | | - uint32_t fw_flags; |
---|
1126 | | -}; |
---|
1127 | | - |
---|
1128 | 568 | /* |
---|
1129 | 569 | * ASIC specific register table accessible by UMD |
---|
1130 | 570 | */ |
---|
1131 | 571 | struct amdgpu_allowed_register_entry { |
---|
1132 | 572 | uint32_t reg_offset; |
---|
1133 | 573 | bool grbm_indexed; |
---|
| 574 | +}; |
---|
| 575 | + |
---|
| 576 | +enum amd_reset_method { |
---|
| 577 | + AMD_RESET_METHOD_LEGACY = 0, |
---|
| 578 | + AMD_RESET_METHOD_MODE0, |
---|
| 579 | + AMD_RESET_METHOD_MODE1, |
---|
| 580 | + AMD_RESET_METHOD_MODE2, |
---|
| 581 | + AMD_RESET_METHOD_BACO |
---|
1134 | 582 | }; |
---|
1135 | 583 | |
---|
1136 | 584 | /* |
---|
.. | .. |
---|
1144 | 592 | u32 sh_num, u32 reg_offset, u32 *value); |
---|
1145 | 593 | void (*set_vga_state)(struct amdgpu_device *adev, bool state); |
---|
1146 | 594 | int (*reset)(struct amdgpu_device *adev); |
---|
| 595 | + enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); |
---|
1147 | 596 | /* get the reference clock */ |
---|
1148 | 597 | u32 (*get_xclk)(struct amdgpu_device *adev); |
---|
1149 | 598 | /* MM block clocks */ |
---|
.. | .. |
---|
1159 | 608 | /* invalidate hdp read cache */ |
---|
1160 | 609 | void (*invalidate_hdp)(struct amdgpu_device *adev, |
---|
1161 | 610 | struct amdgpu_ring *ring); |
---|
| 611 | + void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); |
---|
1162 | 612 | /* check if the asic needs a full reset of if soft reset will work */ |
---|
1163 | 613 | bool (*need_full_reset)(struct amdgpu_device *adev); |
---|
| 614 | + /* initialize doorbell layout for specific asic*/ |
---|
| 615 | + void (*init_doorbell_index)(struct amdgpu_device *adev); |
---|
| 616 | + /* PCIe bandwidth usage */ |
---|
| 617 | + void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, |
---|
| 618 | + uint64_t *count1); |
---|
| 619 | + /* do we need to reset the asic at init time (e.g., kexec) */ |
---|
| 620 | + bool (*need_reset_on_init)(struct amdgpu_device *adev); |
---|
| 621 | + /* PCIe replay counter */ |
---|
| 622 | + uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); |
---|
| 623 | + /* device supports BACO */ |
---|
| 624 | + bool (*supports_baco)(struct amdgpu_device *adev); |
---|
| 625 | + /* pre asic_init quirks */ |
---|
| 626 | + void (*pre_asic_init)(struct amdgpu_device *adev); |
---|
1164 | 627 | }; |
---|
1165 | 628 | |
---|
1166 | 629 | /* |
---|
1167 | 630 | * IOCTL. |
---|
1168 | 631 | */ |
---|
1169 | | -int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
---|
1170 | | - struct drm_file *filp); |
---|
1171 | 632 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, |
---|
1172 | 633 | struct drm_file *filp); |
---|
1173 | 634 | |
---|
1174 | | -int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, |
---|
1175 | | - struct drm_file *filp); |
---|
1176 | | -int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, |
---|
1177 | | - struct drm_file *filp); |
---|
1178 | | -int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, |
---|
1179 | | - struct drm_file *filp); |
---|
1180 | | -int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
---|
1181 | | - struct drm_file *filp); |
---|
1182 | | -int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
---|
1183 | | - struct drm_file *filp); |
---|
1184 | | -int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
---|
1185 | | - struct drm_file *filp); |
---|
1186 | 635 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
---|
1187 | 636 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, |
---|
1188 | 637 | struct drm_file *filp); |
---|
1189 | 638 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
---|
1190 | 639 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, |
---|
1191 | | - struct drm_file *filp); |
---|
1192 | | - |
---|
1193 | | -int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, |
---|
1194 | 640 | struct drm_file *filp); |
---|
1195 | 641 | |
---|
1196 | 642 | /* VRAM scratch page for HDP bug, default vram page */ |
---|
.. | .. |
---|
1215 | 661 | }; |
---|
1216 | 662 | |
---|
1217 | 663 | /* |
---|
1218 | | - * Firmware VRAM reservation |
---|
1219 | | - */ |
---|
1220 | | -struct amdgpu_fw_vram_usage { |
---|
1221 | | - u64 start_offset; |
---|
1222 | | - u64 size; |
---|
1223 | | - struct amdgpu_bo *reserved_bo; |
---|
1224 | | - void *va; |
---|
1225 | | -}; |
---|
1226 | | - |
---|
1227 | | -/* |
---|
1228 | 664 | * CGS |
---|
1229 | 665 | */ |
---|
1230 | 666 | struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
---|
.. | .. |
---|
1236 | 672 | typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); |
---|
1237 | 673 | typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
---|
1238 | 674 | |
---|
| 675 | +typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); |
---|
| 676 | +typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); |
---|
| 677 | + |
---|
1239 | 678 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
---|
1240 | 679 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
---|
1241 | 680 | |
---|
1242 | | - |
---|
1243 | | -/* |
---|
1244 | | - * amdgpu nbio functions |
---|
1245 | | - * |
---|
1246 | | - */ |
---|
1247 | | -struct nbio_hdp_flush_reg { |
---|
1248 | | - u32 ref_and_mask_cp0; |
---|
1249 | | - u32 ref_and_mask_cp1; |
---|
1250 | | - u32 ref_and_mask_cp2; |
---|
1251 | | - u32 ref_and_mask_cp3; |
---|
1252 | | - u32 ref_and_mask_cp4; |
---|
1253 | | - u32 ref_and_mask_cp5; |
---|
1254 | | - u32 ref_and_mask_cp6; |
---|
1255 | | - u32 ref_and_mask_cp7; |
---|
1256 | | - u32 ref_and_mask_cp8; |
---|
1257 | | - u32 ref_and_mask_cp9; |
---|
1258 | | - u32 ref_and_mask_sdma0; |
---|
1259 | | - u32 ref_and_mask_sdma1; |
---|
| 681 | +struct amdgpu_mmio_remap { |
---|
| 682 | + u32 reg_offset; |
---|
| 683 | + resource_size_t bus_addr; |
---|
1260 | 684 | }; |
---|
1261 | 685 | |
---|
1262 | | -struct amdgpu_nbio_funcs { |
---|
1263 | | - const struct nbio_hdp_flush_reg *hdp_flush_reg; |
---|
1264 | | - u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); |
---|
1265 | | - u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); |
---|
1266 | | - u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); |
---|
1267 | | - u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); |
---|
1268 | | - u32 (*get_rev_id)(struct amdgpu_device *adev); |
---|
1269 | | - void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); |
---|
1270 | | - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); |
---|
1271 | | - u32 (*get_memsize)(struct amdgpu_device *adev); |
---|
1272 | | - void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, |
---|
1273 | | - bool use_doorbell, int doorbell_index); |
---|
1274 | | - void (*enable_doorbell_aperture)(struct amdgpu_device *adev, |
---|
1275 | | - bool enable); |
---|
1276 | | - void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, |
---|
1277 | | - bool enable); |
---|
1278 | | - void (*ih_doorbell_range)(struct amdgpu_device *adev, |
---|
1279 | | - bool use_doorbell, int doorbell_index); |
---|
1280 | | - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, |
---|
1281 | | - bool enable); |
---|
1282 | | - void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, |
---|
1283 | | - bool enable); |
---|
1284 | | - void (*get_clockgating_state)(struct amdgpu_device *adev, |
---|
1285 | | - u32 *flags); |
---|
1286 | | - void (*ih_control)(struct amdgpu_device *adev); |
---|
1287 | | - void (*init_registers)(struct amdgpu_device *adev); |
---|
1288 | | - void (*detect_hw_virt)(struct amdgpu_device *adev); |
---|
1289 | | -}; |
---|
1290 | | - |
---|
1291 | | -struct amdgpu_df_funcs { |
---|
1292 | | - void (*init)(struct amdgpu_device *adev); |
---|
1293 | | - void (*enable_broadcast_mode)(struct amdgpu_device *adev, |
---|
1294 | | - bool enable); |
---|
1295 | | - u32 (*get_fb_channel_number)(struct amdgpu_device *adev); |
---|
1296 | | - u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); |
---|
1297 | | - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, |
---|
1298 | | - bool enable); |
---|
1299 | | - void (*get_clockgating_state)(struct amdgpu_device *adev, |
---|
1300 | | - u32 *flags); |
---|
1301 | | - void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, |
---|
1302 | | - bool enable); |
---|
1303 | | -}; |
---|
1304 | 686 | /* Define the HW IP blocks will be used in driver , add more if necessary */ |
---|
1305 | 687 | enum amd_hw_ip_block_type { |
---|
1306 | 688 | GC_HWIP = 1, |
---|
1307 | 689 | HDP_HWIP, |
---|
1308 | 690 | SDMA0_HWIP, |
---|
1309 | 691 | SDMA1_HWIP, |
---|
| 692 | + SDMA2_HWIP, |
---|
| 693 | + SDMA3_HWIP, |
---|
| 694 | + SDMA4_HWIP, |
---|
| 695 | + SDMA5_HWIP, |
---|
| 696 | + SDMA6_HWIP, |
---|
| 697 | + SDMA7_HWIP, |
---|
1310 | 698 | MMHUB_HWIP, |
---|
1311 | 699 | ATHUB_HWIP, |
---|
1312 | 700 | NBIO_HWIP, |
---|
.. | .. |
---|
1314 | 702 | MP1_HWIP, |
---|
1315 | 703 | UVD_HWIP, |
---|
1316 | 704 | VCN_HWIP = UVD_HWIP, |
---|
| 705 | + JPEG_HWIP = VCN_HWIP, |
---|
1317 | 706 | VCE_HWIP, |
---|
1318 | 707 | DF_HWIP, |
---|
1319 | 708 | DCE_HWIP, |
---|
.. | .. |
---|
1323 | 712 | NBIF_HWIP, |
---|
1324 | 713 | THM_HWIP, |
---|
1325 | 714 | CLK_HWIP, |
---|
| 715 | + UMC_HWIP, |
---|
| 716 | + RSMU_HWIP, |
---|
1326 | 717 | MAX_HWIP |
---|
1327 | 718 | }; |
---|
1328 | 719 | |
---|
1329 | | -#define HWIP_MAX_INSTANCE 6 |
---|
| 720 | +#define HWIP_MAX_INSTANCE 10 |
---|
1330 | 721 | |
---|
1331 | 722 | struct amd_powerplay { |
---|
1332 | 723 | void *pp_handle; |
---|
1333 | 724 | const struct amd_pm_funcs *pp_funcs; |
---|
1334 | | - uint32_t pp_feature; |
---|
1335 | 725 | }; |
---|
1336 | 726 | |
---|
1337 | 727 | #define AMDGPU_RESET_MAGIC_NUM 64 |
---|
| 728 | +#define AMDGPU_MAX_DF_PERFMONS 4 |
---|
1338 | 729 | struct amdgpu_device { |
---|
1339 | 730 | struct device *dev; |
---|
1340 | | - struct drm_device *ddev; |
---|
1341 | 731 | struct pci_dev *pdev; |
---|
| 732 | + struct drm_device ddev; |
---|
1342 | 733 | |
---|
1343 | 734 | #ifdef CONFIG_DRM_AMD_ACP |
---|
1344 | 735 | struct amdgpu_acp acp; |
---|
1345 | 736 | #endif |
---|
1346 | | - |
---|
| 737 | + struct amdgpu_hive_info *hive; |
---|
1347 | 738 | /* ASIC */ |
---|
1348 | 739 | enum amd_asic_type asic_type; |
---|
1349 | 740 | uint32_t family; |
---|
1350 | 741 | uint32_t rev_id; |
---|
1351 | 742 | uint32_t external_rev_id; |
---|
1352 | 743 | unsigned long flags; |
---|
| 744 | + unsigned long apu_flags; |
---|
1353 | 745 | int usec_timeout; |
---|
1354 | 746 | const struct amdgpu_asic_funcs *asic_funcs; |
---|
1355 | 747 | bool shutdown; |
---|
1356 | | - bool need_dma32; |
---|
1357 | 748 | bool need_swiotlb; |
---|
1358 | 749 | bool accel_working; |
---|
1359 | | - struct work_struct reset_work; |
---|
1360 | 750 | struct notifier_block acpi_nb; |
---|
1361 | 751 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; |
---|
1362 | 752 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
---|
1363 | 753 | unsigned debugfs_count; |
---|
1364 | 754 | #if defined(CONFIG_DEBUG_FS) |
---|
| 755 | + struct dentry *debugfs_preempt; |
---|
1365 | 756 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
---|
1366 | 757 | #endif |
---|
1367 | 758 | struct amdgpu_atif *atif; |
---|
.. | .. |
---|
1371 | 762 | struct mutex grbm_idx_mutex; |
---|
1372 | 763 | struct dev_pm_domain vga_pm_domain; |
---|
1373 | 764 | bool have_disp_power_ref; |
---|
| 765 | + bool have_atomics_support; |
---|
1374 | 766 | |
---|
1375 | 767 | /* BIOS */ |
---|
1376 | 768 | bool is_atom_fw; |
---|
1377 | 769 | uint8_t *bios; |
---|
1378 | 770 | uint32_t bios_size; |
---|
1379 | | - struct amdgpu_bo *stolen_vga_memory; |
---|
1380 | 771 | uint32_t bios_scratch_reg_offset; |
---|
1381 | 772 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
---|
1382 | 773 | |
---|
.. | .. |
---|
1386 | 777 | void __iomem *rmmio; |
---|
1387 | 778 | /* protects concurrent MM_INDEX/DATA based register access */ |
---|
1388 | 779 | spinlock_t mmio_idx_lock; |
---|
| 780 | + struct amdgpu_mmio_remap rmmio_remap; |
---|
1389 | 781 | /* protects concurrent SMC based register access */ |
---|
1390 | 782 | spinlock_t smc_idx_lock; |
---|
1391 | 783 | amdgpu_rreg_t smc_rreg; |
---|
.. | .. |
---|
1396 | 788 | amdgpu_wreg_t pcie_wreg; |
---|
1397 | 789 | amdgpu_rreg_t pciep_rreg; |
---|
1398 | 790 | amdgpu_wreg_t pciep_wreg; |
---|
| 791 | + amdgpu_rreg64_t pcie_rreg64; |
---|
| 792 | + amdgpu_wreg64_t pcie_wreg64; |
---|
1399 | 793 | /* protects concurrent UVD register access */ |
---|
1400 | 794 | spinlock_t uvd_ctx_idx_lock; |
---|
1401 | 795 | amdgpu_rreg_t uvd_ctx_rreg; |
---|
.. | .. |
---|
1429 | 823 | dma_addr_t dummy_page_addr; |
---|
1430 | 824 | struct amdgpu_vm_manager vm_manager; |
---|
1431 | 825 | struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; |
---|
| 826 | + unsigned num_vmhubs; |
---|
1432 | 827 | |
---|
1433 | 828 | /* memory management */ |
---|
1434 | 829 | struct amdgpu_mman mman; |
---|
.. | .. |
---|
1455 | 850 | /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ |
---|
1456 | 851 | struct work_struct hotplug_work; |
---|
1457 | 852 | struct amdgpu_irq_src crtc_irq; |
---|
| 853 | + struct amdgpu_irq_src vupdate_irq; |
---|
1458 | 854 | struct amdgpu_irq_src pageflip_irq; |
---|
1459 | 855 | struct amdgpu_irq_src hpd_irq; |
---|
1460 | 856 | |
---|
.. | .. |
---|
1463 | 859 | unsigned num_rings; |
---|
1464 | 860 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
---|
1465 | 861 | bool ib_pool_ready; |
---|
1466 | | - struct amdgpu_sa_manager ring_tmp_bo; |
---|
| 862 | + struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; |
---|
| 863 | + struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; |
---|
1467 | 864 | |
---|
1468 | 865 | /* interrupts */ |
---|
1469 | 866 | struct amdgpu_irq irq; |
---|
.. | .. |
---|
1472 | 869 | struct amd_powerplay powerplay; |
---|
1473 | 870 | bool pp_force_state_enabled; |
---|
1474 | 871 | |
---|
| 872 | + /* smu */ |
---|
| 873 | + struct smu_context smu; |
---|
| 874 | + |
---|
1475 | 875 | /* dpm */ |
---|
1476 | 876 | struct amdgpu_pm pm; |
---|
1477 | 877 | u32 cg_flags; |
---|
1478 | 878 | u32 pg_flags; |
---|
1479 | 879 | |
---|
1480 | | - /* amdgpu smumgr */ |
---|
1481 | | - struct amdgpu_smumgr smu; |
---|
| 880 | + /* nbio */ |
---|
| 881 | + struct amdgpu_nbio nbio; |
---|
| 882 | + |
---|
| 883 | + /* mmhub */ |
---|
| 884 | + struct amdgpu_mmhub mmhub; |
---|
| 885 | + |
---|
| 886 | + /* gfxhub */ |
---|
| 887 | + struct amdgpu_gfxhub gfxhub; |
---|
1482 | 888 | |
---|
1483 | 889 | /* gfx */ |
---|
1484 | 890 | struct amdgpu_gfx gfx; |
---|
.. | .. |
---|
1495 | 901 | /* vcn */ |
---|
1496 | 902 | struct amdgpu_vcn vcn; |
---|
1497 | 903 | |
---|
| 904 | + /* jpeg */ |
---|
| 905 | + struct amdgpu_jpeg jpeg; |
---|
| 906 | + |
---|
1498 | 907 | /* firmwares */ |
---|
1499 | 908 | struct amdgpu_firmware firmware; |
---|
1500 | 909 | |
---|
.. | .. |
---|
1504 | 913 | /* GDS */ |
---|
1505 | 914 | struct amdgpu_gds gds; |
---|
1506 | 915 | |
---|
| 916 | + /* KFD */ |
---|
| 917 | + struct amdgpu_kfd_dev kfd; |
---|
| 918 | + |
---|
| 919 | + /* UMC */ |
---|
| 920 | + struct amdgpu_umc umc; |
---|
| 921 | + |
---|
1507 | 922 | /* display related functionality */ |
---|
1508 | 923 | struct amdgpu_display_manager dm; |
---|
| 924 | + |
---|
| 925 | + /* mes */ |
---|
| 926 | + bool enable_mes; |
---|
| 927 | + struct amdgpu_mes mes; |
---|
| 928 | + |
---|
| 929 | + /* df */ |
---|
| 930 | + struct amdgpu_df df; |
---|
1509 | 931 | |
---|
1510 | 932 | struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; |
---|
1511 | 933 | int num_ip_blocks; |
---|
.. | .. |
---|
1517 | 939 | atomic64_t visible_pin_size; |
---|
1518 | 940 | atomic64_t gart_pin_size; |
---|
1519 | 941 | |
---|
1520 | | - /* amdkfd interface */ |
---|
1521 | | - struct kfd_dev *kfd; |
---|
1522 | | - |
---|
1523 | 942 | /* soc15 register offset based on ip, instance and segment */ |
---|
1524 | | - uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; |
---|
1525 | | - |
---|
1526 | | - const struct amdgpu_nbio_funcs *nbio_funcs; |
---|
1527 | | - const struct amdgpu_df_funcs *df_funcs; |
---|
| 943 | + uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; |
---|
1528 | 944 | |
---|
1529 | 945 | /* delayed work_func for deferring clockgating during resume */ |
---|
1530 | | - struct delayed_work late_init_work; |
---|
| 946 | + struct delayed_work delayed_init_work; |
---|
1531 | 947 | |
---|
1532 | 948 | struct amdgpu_virt virt; |
---|
1533 | | - /* firmware VRAM reservation */ |
---|
1534 | | - struct amdgpu_fw_vram_usage fw_vram_usage; |
---|
1535 | 949 | |
---|
1536 | 950 | /* link all shadow bo */ |
---|
1537 | 951 | struct list_head shadow_list; |
---|
1538 | 952 | struct mutex shadow_list_lock; |
---|
1539 | | - /* keep an lru list of rings by HW IP */ |
---|
1540 | | - struct list_head ring_lru_list; |
---|
1541 | | - spinlock_t ring_lru_list_lock; |
---|
1542 | 953 | |
---|
1543 | 954 | /* record hw reset is performed */ |
---|
1544 | 955 | bool has_hw_reset; |
---|
1545 | 956 | u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; |
---|
1546 | 957 | |
---|
1547 | | - /* record last mm index being written through WREG32*/ |
---|
1548 | | - unsigned long last_mm_index; |
---|
1549 | | - bool in_gpu_reset; |
---|
1550 | | - struct mutex lock_reset; |
---|
| 958 | + /* s3/s4 mask */ |
---|
| 959 | + bool in_suspend; |
---|
| 960 | + bool in_hibernate; |
---|
| 961 | + |
---|
| 962 | + atomic_t in_gpu_reset; |
---|
| 963 | + enum pp_mp1_state mp1_state; |
---|
| 964 | + struct rw_semaphore reset_sem; |
---|
| 965 | + struct amdgpu_doorbell_index doorbell_index; |
---|
| 966 | + |
---|
| 967 | + struct mutex notifier_lock; |
---|
| 968 | + |
---|
| 969 | + int asic_reset_res; |
---|
| 970 | + struct work_struct xgmi_reset_work; |
---|
| 971 | + |
---|
| 972 | + long gfx_timeout; |
---|
| 973 | + long sdma_timeout; |
---|
| 974 | + long video_timeout; |
---|
| 975 | + long compute_timeout; |
---|
| 976 | + |
---|
| 977 | + uint64_t unique_id; |
---|
| 978 | + uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; |
---|
| 979 | + |
---|
| 980 | + /* enable runtime pm on the device */ |
---|
| 981 | + bool runpm; |
---|
| 982 | + bool in_runpm; |
---|
| 983 | + |
---|
| 984 | + bool pm_sysfs_en; |
---|
| 985 | + bool ucode_sysfs_en; |
---|
| 986 | + |
---|
| 987 | + /* Chip product information */ |
---|
| 988 | + char product_number[16]; |
---|
| 989 | + char product_name[32]; |
---|
| 990 | + char serial[20]; |
---|
| 991 | + |
---|
| 992 | + struct amdgpu_autodump autodump; |
---|
| 993 | + |
---|
| 994 | + atomic_t throttling_logging_enabled; |
---|
| 995 | + struct ratelimit_state throttling_logging_rs; |
---|
| 996 | + uint32_t ras_features; |
---|
| 997 | + |
---|
| 998 | + bool in_pci_err_recovery; |
---|
| 999 | + struct pci_saved_state *pci_state; |
---|
1551 | 1000 | }; |
---|
| 1001 | + |
---|
| 1002 | +static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) |
---|
| 1003 | +{ |
---|
| 1004 | + return container_of(ddev, struct amdgpu_device, ddev); |
---|
| 1005 | +} |
---|
| 1006 | + |
---|
| 1007 | +static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) |
---|
| 1008 | +{ |
---|
| 1009 | + return &adev->ddev; |
---|
| 1010 | +} |
---|
1552 | 1011 | |
---|
1553 | 1012 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) |
---|
1554 | 1013 | { |
---|
.. | .. |
---|
1556 | 1015 | } |
---|
1557 | 1016 | |
---|
1558 | 1017 | int amdgpu_device_init(struct amdgpu_device *adev, |
---|
1559 | | - struct drm_device *ddev, |
---|
1560 | | - struct pci_dev *pdev, |
---|
1561 | 1018 | uint32_t flags); |
---|
1562 | 1019 | void amdgpu_device_fini(struct amdgpu_device *adev); |
---|
1563 | 1020 | int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); |
---|
1564 | 1021 | |
---|
1565 | | -uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, |
---|
| 1022 | +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, |
---|
| 1023 | + uint32_t *buf, size_t size, bool write); |
---|
| 1024 | +uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, |
---|
| 1025 | + uint32_t reg, uint32_t acc_flags); |
---|
| 1026 | +void amdgpu_device_wreg(struct amdgpu_device *adev, |
---|
| 1027 | + uint32_t reg, uint32_t v, |
---|
1566 | 1028 | uint32_t acc_flags); |
---|
1567 | | -void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, |
---|
1568 | | - uint32_t acc_flags); |
---|
| 1029 | +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, |
---|
| 1030 | + uint32_t reg, uint32_t v); |
---|
1569 | 1031 | void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); |
---|
1570 | 1032 | uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); |
---|
1571 | 1033 | |
---|
1572 | 1034 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); |
---|
1573 | 1035 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); |
---|
1574 | 1036 | |
---|
1575 | | -u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); |
---|
1576 | | -void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); |
---|
1577 | | -u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); |
---|
1578 | | -void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); |
---|
| 1037 | +u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, |
---|
| 1038 | + u32 pcie_index, u32 pcie_data, |
---|
| 1039 | + u32 reg_addr); |
---|
| 1040 | +u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, |
---|
| 1041 | + u32 pcie_index, u32 pcie_data, |
---|
| 1042 | + u32 reg_addr); |
---|
| 1043 | +void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, |
---|
| 1044 | + u32 pcie_index, u32 pcie_data, |
---|
| 1045 | + u32 reg_addr, u32 reg_data); |
---|
| 1046 | +void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, |
---|
| 1047 | + u32 pcie_index, u32 pcie_data, |
---|
| 1048 | + u32 reg_addr, u64 reg_data); |
---|
1579 | 1049 | |
---|
1580 | 1050 | bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); |
---|
1581 | 1051 | bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); |
---|
.. | .. |
---|
1585 | 1055 | /* |
---|
1586 | 1056 | * Registers read & write functions. |
---|
1587 | 1057 | */ |
---|
1588 | | - |
---|
1589 | | -#define AMDGPU_REGS_IDX (1<<0) |
---|
1590 | 1058 | #define AMDGPU_REGS_NO_KIQ (1<<1) |
---|
1591 | 1059 | |
---|
1592 | | -#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) |
---|
1593 | | -#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) |
---|
| 1060 | +#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) |
---|
| 1061 | +#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) |
---|
| 1062 | + |
---|
| 1063 | +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) |
---|
| 1064 | +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) |
---|
1594 | 1065 | |
---|
1595 | 1066 | #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) |
---|
1596 | 1067 | #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) |
---|
1597 | 1068 | |
---|
1598 | | -#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) |
---|
1599 | | -#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) |
---|
1600 | | -#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) |
---|
1601 | | -#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) |
---|
1602 | | -#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) |
---|
| 1069 | +#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) |
---|
| 1070 | +#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) |
---|
| 1071 | +#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) |
---|
1603 | 1072 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
---|
1604 | 1073 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
---|
1605 | 1074 | #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) |
---|
1606 | 1075 | #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) |
---|
1607 | 1076 | #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) |
---|
1608 | 1077 | #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) |
---|
| 1078 | +#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) |
---|
| 1079 | +#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) |
---|
1609 | 1080 | #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) |
---|
1610 | 1081 | #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) |
---|
1611 | 1082 | #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) |
---|
.. | .. |
---|
1634 | 1105 | tmp_ |= ((val) & ~(mask)); \ |
---|
1635 | 1106 | WREG32_PLL(reg, tmp_); \ |
---|
1636 | 1107 | } while (0) |
---|
1637 | | -#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) |
---|
| 1108 | + |
---|
| 1109 | +#define WREG32_SMC_P(_Reg, _Val, _Mask) \ |
---|
| 1110 | + do { \ |
---|
| 1111 | + u32 tmp = RREG32_SMC(_Reg); \ |
---|
| 1112 | + tmp &= (_Mask); \ |
---|
| 1113 | + tmp |= ((_Val) & ~(_Mask)); \ |
---|
| 1114 | + WREG32_SMC(_Reg, tmp); \ |
---|
| 1115 | + } while (0) |
---|
| 1116 | + |
---|
| 1117 | +#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) |
---|
1638 | 1118 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) |
---|
1639 | 1119 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) |
---|
1640 | | - |
---|
1641 | | -#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) |
---|
1642 | | -#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) |
---|
1643 | | -#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) |
---|
1644 | | -#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) |
---|
1645 | 1120 | |
---|
1646 | 1121 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT |
---|
1647 | 1122 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK |
---|
.. | .. |
---|
1666 | 1141 | #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) |
---|
1667 | 1142 | #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) |
---|
1668 | 1143 | |
---|
1669 | | -static inline struct amdgpu_sdma_instance * |
---|
1670 | | -amdgpu_get_sdma_instance(struct amdgpu_ring *ring) |
---|
1671 | | -{ |
---|
1672 | | - struct amdgpu_device *adev = ring->adev; |
---|
1673 | | - int i; |
---|
1674 | | - |
---|
1675 | | - for (i = 0; i < adev->sdma.num_instances; i++) |
---|
1676 | | - if (&adev->sdma.instance[i].ring == ring) |
---|
1677 | | - break; |
---|
1678 | | - |
---|
1679 | | - if (i < AMDGPU_MAX_SDMA_INSTANCES) |
---|
1680 | | - return &adev->sdma.instance[i]; |
---|
1681 | | - else |
---|
1682 | | - return NULL; |
---|
1683 | | -} |
---|
1684 | | - |
---|
1685 | 1144 | /* |
---|
1686 | 1145 | * ASICs macro. |
---|
1687 | 1146 | */ |
---|
1688 | 1147 | #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) |
---|
1689 | 1148 | #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) |
---|
| 1149 | +#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) |
---|
1690 | 1150 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
---|
1691 | 1151 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) |
---|
1692 | 1152 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) |
---|
.. | .. |
---|
1700 | 1160 | #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) |
---|
1701 | 1161 | #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) |
---|
1702 | 1162 | #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) |
---|
1703 | | -#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) |
---|
1704 | | -#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) |
---|
1705 | | -#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) |
---|
1706 | | -#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) |
---|
1707 | | -#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) |
---|
1708 | | -#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) |
---|
1709 | | -#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) |
---|
1710 | | -#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) |
---|
1711 | | -#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) |
---|
1712 | | -#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) |
---|
1713 | | -#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib))) |
---|
1714 | | -#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) |
---|
1715 | | -#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) |
---|
1716 | | -#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
---|
1717 | | -#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) |
---|
1718 | | -#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) |
---|
1719 | | -#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) |
---|
1720 | | -#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) |
---|
1721 | | -#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
---|
1722 | | -#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
---|
1723 | | -#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
---|
1724 | | -#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) |
---|
1725 | | -#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) |
---|
1726 | | -#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) |
---|
1727 | | -#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) |
---|
1728 | | -#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) |
---|
1729 | | -#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) |
---|
1730 | | -#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) |
---|
1731 | | -#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) |
---|
1732 | | -#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
---|
1733 | | -#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
---|
1734 | | -#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
---|
1735 | | -#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
---|
1736 | | -#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev)) |
---|
1737 | | -#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
---|
1738 | | -#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
---|
1739 | | -#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) |
---|
1740 | | -#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) |
---|
1741 | | -#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) |
---|
1742 | | -#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) |
---|
1743 | | -#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) |
---|
1744 | | -#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) |
---|
1745 | | -#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) |
---|
1746 | | -#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) |
---|
1747 | | -#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) |
---|
1748 | | -#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) |
---|
1749 | | -#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) |
---|
1750 | | -#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
---|
1751 | | -#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
---|
1752 | | -#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
---|
1753 | | -#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) |
---|
1754 | | -#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
---|
1755 | | -#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) |
---|
1756 | | -#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q)) |
---|
| 1163 | +#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) |
---|
| 1164 | +#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) |
---|
| 1165 | +#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) |
---|
| 1166 | +#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) |
---|
| 1167 | +#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) |
---|
| 1168 | +#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev)) |
---|
| 1169 | + |
---|
| 1170 | +#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); |
---|
1757 | 1171 | |
---|
1758 | 1172 | /* Common functions */ |
---|
| 1173 | +bool amdgpu_device_has_job_running(struct amdgpu_device *adev); |
---|
| 1174 | +bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); |
---|
1759 | 1175 | int amdgpu_device_gpu_recover(struct amdgpu_device *adev, |
---|
1760 | | - struct amdgpu_job* job, bool force); |
---|
| 1176 | + struct amdgpu_job* job); |
---|
1761 | 1177 | void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); |
---|
1762 | 1178 | bool amdgpu_device_need_post(struct amdgpu_device *adev); |
---|
1763 | | -void amdgpu_display_update_priority(struct amdgpu_device *adev); |
---|
1764 | 1179 | |
---|
1765 | 1180 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, |
---|
1766 | 1181 | u64 num_vis_bytes); |
---|
1767 | | -void amdgpu_device_vram_location(struct amdgpu_device *adev, |
---|
1768 | | - struct amdgpu_gmc *mc, u64 base); |
---|
1769 | | -void amdgpu_device_gart_location(struct amdgpu_device *adev, |
---|
1770 | | - struct amdgpu_gmc *mc); |
---|
1771 | 1182 | int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); |
---|
1772 | 1183 | void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, |
---|
1773 | 1184 | const u32 *registers, |
---|
1774 | 1185 | const u32 array_size); |
---|
1775 | 1186 | |
---|
1776 | | -bool amdgpu_device_is_px(struct drm_device *dev); |
---|
| 1187 | +bool amdgpu_device_supports_boco(struct drm_device *dev); |
---|
| 1188 | +bool amdgpu_device_supports_baco(struct drm_device *dev); |
---|
| 1189 | +bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, |
---|
| 1190 | + struct amdgpu_device *peer_adev); |
---|
| 1191 | +int amdgpu_device_baco_enter(struct drm_device *dev); |
---|
| 1192 | +int amdgpu_device_baco_exit(struct drm_device *dev); |
---|
| 1193 | + |
---|
1777 | 1194 | /* atpx handler */ |
---|
1778 | 1195 | #if defined(CONFIG_VGA_SWITCHEROO) |
---|
1779 | 1196 | void amdgpu_register_atpx_handler(void); |
---|
.. | .. |
---|
1803 | 1220 | extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; |
---|
1804 | 1221 | extern const int amdgpu_max_kms_ioctl; |
---|
1805 | 1222 | |
---|
1806 | | -int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); |
---|
| 1223 | +int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); |
---|
1807 | 1224 | void amdgpu_driver_unload_kms(struct drm_device *dev); |
---|
1808 | 1225 | void amdgpu_driver_lastclose_kms(struct drm_device *dev); |
---|
1809 | 1226 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); |
---|
1810 | 1227 | void amdgpu_driver_postclose_kms(struct drm_device *dev, |
---|
1811 | 1228 | struct drm_file *file_priv); |
---|
1812 | 1229 | int amdgpu_device_ip_suspend(struct amdgpu_device *adev); |
---|
1813 | | -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); |
---|
1814 | | -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); |
---|
1815 | | -u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); |
---|
1816 | | -int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
---|
1817 | | -void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
---|
| 1230 | +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); |
---|
| 1231 | +int amdgpu_device_resume(struct drm_device *dev, bool fbcon); |
---|
| 1232 | +u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); |
---|
| 1233 | +int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); |
---|
| 1234 | +void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); |
---|
1818 | 1235 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
---|
1819 | 1236 | unsigned long arg); |
---|
1820 | 1237 | |
---|
.. | .. |
---|
1845 | 1262 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, |
---|
1846 | 1263 | u8 perf_req, bool advertise); |
---|
1847 | 1264 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); |
---|
| 1265 | + |
---|
| 1266 | +void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, |
---|
| 1267 | + struct amdgpu_dm_backlight_caps *caps); |
---|
1848 | 1268 | #else |
---|
1849 | 1269 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } |
---|
1850 | 1270 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } |
---|
.. | .. |
---|
1860 | 1280 | static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } |
---|
1861 | 1281 | #endif |
---|
1862 | 1282 | |
---|
| 1283 | + |
---|
| 1284 | +void amdgpu_register_gpu_instance(struct amdgpu_device *adev); |
---|
| 1285 | +void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); |
---|
| 1286 | + |
---|
| 1287 | +pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, |
---|
| 1288 | + pci_channel_state_t state); |
---|
| 1289 | +pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); |
---|
| 1290 | +pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); |
---|
| 1291 | +void amdgpu_pci_resume(struct pci_dev *pdev); |
---|
| 1292 | + |
---|
| 1293 | +bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); |
---|
| 1294 | +bool amdgpu_device_load_pci_state(struct pci_dev *pdev); |
---|
| 1295 | + |
---|
1863 | 1296 | #include "amdgpu_object.h" |
---|
| 1297 | + |
---|
| 1298 | +/* used by df_v3_6.c and amdgpu_pmu.c */ |
---|
| 1299 | +#define AMDGPU_PMU_ATTR(_name, _object) \ |
---|
| 1300 | +static ssize_t \ |
---|
| 1301 | +_name##_show(struct device *dev, \ |
---|
| 1302 | + struct device_attribute *attr, \ |
---|
| 1303 | + char *page) \ |
---|
| 1304 | +{ \ |
---|
| 1305 | + BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \ |
---|
| 1306 | + return sprintf(page, _object "\n"); \ |
---|
| 1307 | +} \ |
---|
| 1308 | + \ |
---|
| 1309 | +static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name) |
---|
| 1310 | + |
---|
| 1311 | +static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) |
---|
| 1312 | +{ |
---|
| 1313 | + return adev->gmc.tmz_enabled; |
---|
| 1314 | +} |
---|
| 1315 | + |
---|
| 1316 | +static inline int amdgpu_in_reset(struct amdgpu_device *adev) |
---|
| 1317 | +{ |
---|
| 1318 | + return atomic_read(&adev->in_gpu_reset); |
---|
| 1319 | +} |
---|
1864 | 1320 | #endif |
---|