.. | .. |
---|
1 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|
2 | | -/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */ |
---|
| 2 | +/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ |
---|
3 | 3 | |
---|
4 | 4 | #ifndef __A6XX_GPU_H__ |
---|
5 | 5 | #define __A6XX_GPU_H__ |
---|
.. | .. |
---|
20 | 20 | |
---|
21 | 21 | struct msm_ringbuffer *cur_ring; |
---|
22 | 22 | |
---|
| 23 | + /** |
---|
| 24 | + * cur_ctx_seqno: |
---|
| 25 | + * |
---|
| 26 | + * The ctx->seqno value of the context with current pgtables |
---|
| 27 | + * installed. Tracked by seqno rather than pointer value to |
---|
| 28 | + * avoid dangling pointers, and cases where a ctx can be freed |
---|
| 29 | + * and a new one created with the same address. |
---|
| 30 | + */ |
---|
| 31 | + int cur_ctx_seqno; |
---|
| 32 | + |
---|
23 | 33 | struct a6xx_gmu gmu; |
---|
| 34 | + |
---|
| 35 | + struct drm_gem_object *shadow_bo; |
---|
| 36 | + uint64_t shadow_iova; |
---|
| 37 | + uint32_t *shadow; |
---|
| 38 | + |
---|
| 39 | + bool has_whereami; |
---|
24 | 40 | }; |
---|
25 | 41 | |
---|
26 | 42 | #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) |
---|
.. | .. |
---|
30 | 46 | * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len |
---|
31 | 47 | * registers starting at _reg. |
---|
32 | 48 | */ |
---|
33 | | -#define A6XX_PROTECT_RW(_reg, _len) \ |
---|
| 49 | +#define A6XX_PROTECT_NORDWR(_reg, _len) \ |
---|
34 | 50 | ((1 << 31) | \ |
---|
35 | 51 | (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) |
---|
36 | 52 | |
---|
.. | .. |
---|
42 | 58 | #define A6XX_PROTECT_RDONLY(_reg, _len) \ |
---|
43 | 59 | ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) |
---|
44 | 60 | |
---|
| 61 | +static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) |
---|
| 62 | +{ |
---|
| 63 | + if(adreno_is_a630(gpu)) |
---|
| 64 | + return false; |
---|
| 65 | + |
---|
| 66 | + return true; |
---|
| 67 | +} |
---|
| 68 | + |
---|
| 69 | +#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ |
---|
| 70 | + ((_ring)->id * sizeof(uint32_t))) |
---|
45 | 71 | |
---|
46 | 72 | int a6xx_gmu_resume(struct a6xx_gpu *gpu); |
---|
47 | 73 | int a6xx_gmu_stop(struct a6xx_gpu *gpu); |
---|
48 | 74 | |
---|
49 | | -int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu); |
---|
| 75 | +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); |
---|
50 | 76 | |
---|
51 | | -int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu); |
---|
52 | 77 | bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); |
---|
53 | 78 | |
---|
54 | 79 | int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); |
---|
55 | 80 | void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); |
---|
56 | 81 | |
---|
57 | | -int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node); |
---|
| 82 | +int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); |
---|
58 | 83 | void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); |
---|
59 | 84 | |
---|
| 85 | +void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp); |
---|
| 86 | +unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); |
---|
| 87 | + |
---|
| 88 | +void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, |
---|
| 89 | + struct drm_printer *p); |
---|
| 90 | + |
---|
| 91 | +struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu); |
---|
| 92 | +int a6xx_gpu_state_put(struct msm_gpu_state *state); |
---|
| 93 | + |
---|
60 | 94 | #endif /* __A6XX_GPU_H__ */ |
---|