.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. |
---|
3 | 4 | * Copyright (C) 2013 Red Hat |
---|
4 | 5 | * Author: Rob Clark <robdclark@gmail.com> |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify it |
---|
7 | | - * under the terms of the GNU General Public License version 2 as published by |
---|
8 | | - * the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
---|
11 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | | - * more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License along with |
---|
16 | | - * this program. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | 6 | */ |
---|
18 | 7 | |
---|
19 | 8 | #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ |
---|
20 | 9 | |
---|
21 | | -#include <drm/drm_crtc.h> |
---|
22 | 10 | #include <linux/debugfs.h> |
---|
23 | | -#include <linux/of_irq.h> |
---|
24 | 11 | #include <linux/dma-buf.h> |
---|
| 12 | +#include <linux/of_irq.h> |
---|
| 13 | +#include <linux/pm_opp.h> |
---|
| 14 | + |
---|
| 15 | +#include <drm/drm_crtc.h> |
---|
| 16 | +#include <drm/drm_file.h> |
---|
25 | 17 | |
---|
26 | 18 | #include "msm_drv.h" |
---|
27 | 19 | #include "msm_mmu.h" |
---|
.. | .. |
---|
39 | 31 | #define CREATE_TRACE_POINTS |
---|
40 | 32 | #include "dpu_trace.h" |
---|
41 | 33 | |
---|
42 | | -static const char * const iommu_ports[] = { |
---|
43 | | - "mdp_0", |
---|
44 | | -}; |
---|
45 | | - |
---|
46 | 34 | /* |
---|
47 | 35 | * To enable overall DRM driver logging |
---|
48 | 36 | * # echo 0x2 > /sys/module/drm/parameters/debug |
---|
.. | .. |
---|
55 | 43 | #define DPU_DEBUGFS_DIR "msm_dpu" |
---|
56 | 44 | #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" |
---|
57 | 45 | |
---|
| 46 | +#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */ |
---|
| 47 | + |
---|
58 | 48 | static int dpu_kms_hw_init(struct msm_kms *kms); |
---|
59 | | -static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); |
---|
60 | | - |
---|
61 | | -static unsigned long dpu_iomap_size(struct platform_device *pdev, |
---|
62 | | - const char *name) |
---|
63 | | -{ |
---|
64 | | - struct resource *res; |
---|
65 | | - |
---|
66 | | - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); |
---|
67 | | - if (!res) { |
---|
68 | | - DRM_ERROR("failed to get memory resource: %s\n", name); |
---|
69 | | - return 0; |
---|
70 | | - } |
---|
71 | | - |
---|
72 | | - return resource_size(res); |
---|
73 | | -} |
---|
| 49 | +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); |
---|
74 | 50 | |
---|
75 | 51 | #ifdef CONFIG_DEBUG_FS |
---|
76 | 52 | static int _dpu_danger_signal_status(struct seq_file *s, |
---|
77 | 53 | bool danger_status) |
---|
78 | 54 | { |
---|
79 | 55 | struct dpu_kms *kms = (struct dpu_kms *)s->private; |
---|
80 | | - struct msm_drm_private *priv; |
---|
81 | 56 | struct dpu_danger_safe_status status; |
---|
82 | 57 | int i; |
---|
83 | 58 | |
---|
84 | | - if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { |
---|
| 59 | + if (!kms->hw_mdp) { |
---|
85 | 60 | DPU_ERROR("invalid arg(s)\n"); |
---|
86 | 61 | return 0; |
---|
87 | 62 | } |
---|
88 | 63 | |
---|
89 | | - priv = kms->dev->dev_private; |
---|
90 | 64 | memset(&status, 0, sizeof(struct dpu_danger_safe_status)); |
---|
91 | 65 | |
---|
92 | 66 | pm_runtime_get_sync(&kms->pdev->dev); |
---|
.. | .. |
---|
113 | 87 | return 0; |
---|
114 | 88 | } |
---|
115 | 89 | |
---|
116 | | -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ |
---|
117 | | -static int __prefix ## _open(struct inode *inode, struct file *file) \ |
---|
118 | | -{ \ |
---|
119 | | - return single_open(file, __prefix ## _show, inode->i_private); \ |
---|
120 | | -} \ |
---|
121 | | -static const struct file_operations __prefix ## _fops = { \ |
---|
122 | | - .owner = THIS_MODULE, \ |
---|
123 | | - .open = __prefix ## _open, \ |
---|
124 | | - .release = single_release, \ |
---|
125 | | - .read = seq_read, \ |
---|
126 | | - .llseek = seq_lseek, \ |
---|
127 | | -} |
---|
128 | | - |
---|
129 | 90 | static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) |
---|
130 | 91 | { |
---|
131 | 92 | return _dpu_danger_signal_status(s, true); |
---|
132 | 93 | } |
---|
133 | | -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats); |
---|
| 94 | +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); |
---|
134 | 95 | |
---|
135 | 96 | static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) |
---|
136 | 97 | { |
---|
137 | 98 | return _dpu_danger_signal_status(s, false); |
---|
138 | 99 | } |
---|
139 | | -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats); |
---|
| 100 | +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); |
---|
140 | 101 | |
---|
141 | | -static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms) |
---|
142 | | -{ |
---|
143 | | - debugfs_remove_recursive(dpu_kms->debugfs_danger); |
---|
144 | | - dpu_kms->debugfs_danger = NULL; |
---|
145 | | -} |
---|
146 | | - |
---|
147 | | -static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, |
---|
| 102 | +static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, |
---|
148 | 103 | struct dentry *parent) |
---|
149 | 104 | { |
---|
150 | | - dpu_kms->debugfs_danger = debugfs_create_dir("danger", |
---|
151 | | - parent); |
---|
152 | | - if (!dpu_kms->debugfs_danger) { |
---|
153 | | - DPU_ERROR("failed to create danger debugfs\n"); |
---|
154 | | - return -EINVAL; |
---|
155 | | - } |
---|
| 105 | + struct dentry *entry = debugfs_create_dir("danger", parent); |
---|
156 | 106 | |
---|
157 | | - debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger, |
---|
| 107 | + debugfs_create_file("danger_status", 0600, entry, |
---|
158 | 108 | dpu_kms, &dpu_debugfs_danger_stats_fops); |
---|
159 | | - debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger, |
---|
| 109 | + debugfs_create_file("safe_status", 0600, entry, |
---|
160 | 110 | dpu_kms, &dpu_debugfs_safe_stats_fops); |
---|
161 | | - |
---|
162 | | - return 0; |
---|
163 | 111 | } |
---|
164 | 112 | |
---|
165 | 113 | static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) |
---|
166 | 114 | { |
---|
167 | | - struct dpu_debugfs_regset32 *regset; |
---|
168 | | - struct dpu_kms *dpu_kms; |
---|
169 | | - struct drm_device *dev; |
---|
170 | | - struct msm_drm_private *priv; |
---|
| 115 | + struct dpu_debugfs_regset32 *regset = s->private; |
---|
| 116 | + struct dpu_kms *dpu_kms = regset->dpu_kms; |
---|
171 | 117 | void __iomem *base; |
---|
172 | 118 | uint32_t i, addr; |
---|
173 | 119 | |
---|
174 | | - if (!s || !s->private) |
---|
175 | | - return 0; |
---|
176 | | - |
---|
177 | | - regset = s->private; |
---|
178 | | - |
---|
179 | | - dpu_kms = regset->dpu_kms; |
---|
180 | | - if (!dpu_kms || !dpu_kms->mmio) |
---|
181 | | - return 0; |
---|
182 | | - |
---|
183 | | - dev = dpu_kms->dev; |
---|
184 | | - if (!dev) |
---|
185 | | - return 0; |
---|
186 | | - |
---|
187 | | - priv = dev->dev_private; |
---|
188 | | - if (!priv) |
---|
| 120 | + if (!dpu_kms->mmio) |
---|
189 | 121 | return 0; |
---|
190 | 122 | |
---|
191 | 123 | base = dpu_kms->mmio + regset->offset; |
---|
.. | .. |
---|
235 | 167 | } |
---|
236 | 168 | } |
---|
237 | 169 | |
---|
238 | | -void *dpu_debugfs_create_regset32(const char *name, umode_t mode, |
---|
| 170 | +void dpu_debugfs_create_regset32(const char *name, umode_t mode, |
---|
239 | 171 | void *parent, struct dpu_debugfs_regset32 *regset) |
---|
240 | 172 | { |
---|
241 | 173 | if (!name || !regset || !regset->dpu_kms || !regset->blk_len) |
---|
242 | | - return NULL; |
---|
| 174 | + return; |
---|
243 | 175 | |
---|
244 | 176 | /* make sure offset is a multiple of 4 */ |
---|
245 | 177 | regset->offset = round_down(regset->offset, 4); |
---|
246 | 178 | |
---|
247 | | - return debugfs_create_file(name, mode, parent, |
---|
248 | | - regset, &dpu_fops_regset32); |
---|
| 179 | + debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); |
---|
249 | 180 | } |
---|
250 | 181 | |
---|
251 | | -static int _dpu_debugfs_init(struct dpu_kms *dpu_kms) |
---|
| 182 | +static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) |
---|
252 | 183 | { |
---|
253 | | - void *p; |
---|
254 | | - int rc; |
---|
| 184 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 185 | + void *p = dpu_hw_util_get_log_mask_ptr(); |
---|
| 186 | + struct dentry *entry; |
---|
| 187 | + struct drm_device *dev; |
---|
| 188 | + struct msm_drm_private *priv; |
---|
255 | 189 | |
---|
256 | | - p = dpu_hw_util_get_log_mask_ptr(); |
---|
257 | | - |
---|
258 | | - if (!dpu_kms || !p) |
---|
| 190 | + if (!p) |
---|
259 | 191 | return -EINVAL; |
---|
260 | 192 | |
---|
261 | | - dpu_kms->debugfs_root = debugfs_create_dir("debug", |
---|
262 | | - dpu_kms->dev->primary->debugfs_root); |
---|
263 | | - if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) { |
---|
264 | | - DRM_ERROR("debugfs create_dir failed %ld\n", |
---|
265 | | - PTR_ERR(dpu_kms->debugfs_root)); |
---|
266 | | - return PTR_ERR(dpu_kms->debugfs_root); |
---|
267 | | - } |
---|
| 193 | + dev = dpu_kms->dev; |
---|
| 194 | + priv = dev->dev_private; |
---|
268 | 195 | |
---|
269 | | - rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root); |
---|
270 | | - if (rc) { |
---|
271 | | - DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc); |
---|
272 | | - return rc; |
---|
273 | | - } |
---|
| 196 | + entry = debugfs_create_dir("debug", minor->debugfs_root); |
---|
274 | 197 | |
---|
275 | | - /* allow root to be NULL */ |
---|
276 | | - debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p); |
---|
| 198 | + debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); |
---|
277 | 199 | |
---|
278 | | - (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root); |
---|
279 | | - (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root); |
---|
280 | | - (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root); |
---|
| 200 | + dpu_debugfs_danger_init(dpu_kms, entry); |
---|
| 201 | + dpu_debugfs_vbif_init(dpu_kms, entry); |
---|
| 202 | + dpu_debugfs_core_irq_init(dpu_kms, entry); |
---|
281 | 203 | |
---|
282 | | - rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root); |
---|
283 | | - if (rc) { |
---|
284 | | - DPU_ERROR("failed to init perf %d\n", rc); |
---|
285 | | - return rc; |
---|
286 | | - } |
---|
| 204 | + if (priv->dp) |
---|
| 205 | + msm_dp_debugfs_init(priv->dp, minor); |
---|
287 | 206 | |
---|
| 207 | + return dpu_core_perf_debugfs_init(dpu_kms, entry); |
---|
| 208 | +} |
---|
| 209 | +#endif |
---|
| 210 | + |
---|
| 211 | +/* Global/shared object state funcs */ |
---|
| 212 | + |
---|
| 213 | +/* |
---|
| 214 | + * This is a helper that returns the private state currently in operation. |
---|
| 215 | + * Note that this would return the "old_state" if called in the atomic check |
---|
| 216 | + * path, and the "new_state" after the atomic swap has been done. |
---|
| 217 | + */ |
---|
| 218 | +struct dpu_global_state * |
---|
| 219 | +dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) |
---|
| 220 | +{ |
---|
| 221 | + return to_dpu_global_state(dpu_kms->global_state.state); |
---|
| 222 | +} |
---|
| 223 | + |
---|
| 224 | +/* |
---|
| 225 | + * This acquires the modeset lock set aside for global state, creates |
---|
| 226 | + * a new duplicated private object state. |
---|
| 227 | + */ |
---|
| 228 | +struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) |
---|
| 229 | +{ |
---|
| 230 | + struct msm_drm_private *priv = s->dev->dev_private; |
---|
| 231 | + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); |
---|
| 232 | + struct drm_private_state *priv_state; |
---|
| 233 | + int ret; |
---|
| 234 | + |
---|
| 235 | + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); |
---|
| 236 | + if (ret) |
---|
| 237 | + return ERR_PTR(ret); |
---|
| 238 | + |
---|
| 239 | + priv_state = drm_atomic_get_private_obj_state(s, |
---|
| 240 | + &dpu_kms->global_state); |
---|
| 241 | + if (IS_ERR(priv_state)) |
---|
| 242 | + return ERR_CAST(priv_state); |
---|
| 243 | + |
---|
| 244 | + return to_dpu_global_state(priv_state); |
---|
| 245 | +} |
---|
| 246 | + |
---|
| 247 | +static struct drm_private_state * |
---|
| 248 | +dpu_kms_global_duplicate_state(struct drm_private_obj *obj) |
---|
| 249 | +{ |
---|
| 250 | + struct dpu_global_state *state; |
---|
| 251 | + |
---|
| 252 | + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); |
---|
| 253 | + if (!state) |
---|
| 254 | + return NULL; |
---|
| 255 | + |
---|
| 256 | + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); |
---|
| 257 | + |
---|
| 258 | + return &state->base; |
---|
| 259 | +} |
---|
| 260 | + |
---|
| 261 | +static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, |
---|
| 262 | + struct drm_private_state *state) |
---|
| 263 | +{ |
---|
| 264 | + struct dpu_global_state *dpu_state = to_dpu_global_state(state); |
---|
| 265 | + |
---|
| 266 | + kfree(dpu_state); |
---|
| 267 | +} |
---|
| 268 | + |
---|
| 269 | +static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { |
---|
| 270 | + .atomic_duplicate_state = dpu_kms_global_duplicate_state, |
---|
| 271 | + .atomic_destroy_state = dpu_kms_global_destroy_state, |
---|
| 272 | +}; |
---|
| 273 | + |
---|
| 274 | +static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) |
---|
| 275 | +{ |
---|
| 276 | + struct dpu_global_state *state; |
---|
| 277 | + |
---|
| 278 | + drm_modeset_lock_init(&dpu_kms->global_state_lock); |
---|
| 279 | + |
---|
| 280 | + state = kzalloc(sizeof(*state), GFP_KERNEL); |
---|
| 281 | + if (!state) |
---|
| 282 | + return -ENOMEM; |
---|
| 283 | + |
---|
| 284 | + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, |
---|
| 285 | + &state->base, |
---|
| 286 | + &dpu_kms_global_state_funcs); |
---|
288 | 287 | return 0; |
---|
289 | 288 | } |
---|
290 | 289 | |
---|
291 | | -static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms) |
---|
| 290 | +static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) |
---|
292 | 291 | { |
---|
293 | | - /* don't need to NULL check debugfs_root */ |
---|
294 | | - if (dpu_kms) { |
---|
295 | | - dpu_debugfs_vbif_destroy(dpu_kms); |
---|
296 | | - dpu_debugfs_danger_destroy(dpu_kms); |
---|
297 | | - dpu_debugfs_core_irq_destroy(dpu_kms); |
---|
298 | | - debugfs_remove_recursive(dpu_kms->debugfs_root); |
---|
| 292 | + struct icc_path *path0; |
---|
| 293 | + struct icc_path *path1; |
---|
| 294 | + struct drm_device *dev = dpu_kms->dev; |
---|
| 295 | + |
---|
| 296 | + path0 = of_icc_get(dev->dev, "mdp0-mem"); |
---|
| 297 | + path1 = of_icc_get(dev->dev, "mdp1-mem"); |
---|
| 298 | + |
---|
| 299 | + if (IS_ERR_OR_NULL(path0)) |
---|
| 300 | + return PTR_ERR_OR_ZERO(path0); |
---|
| 301 | + |
---|
| 302 | + dpu_kms->path[0] = path0; |
---|
| 303 | + dpu_kms->num_paths = 1; |
---|
| 304 | + |
---|
| 305 | + if (!IS_ERR_OR_NULL(path1)) { |
---|
| 306 | + dpu_kms->path[1] = path1; |
---|
| 307 | + dpu_kms->num_paths++; |
---|
299 | 308 | } |
---|
| 309 | + return 0; |
---|
300 | 310 | } |
---|
301 | | -#else |
---|
302 | | -static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms) |
---|
303 | | -{ |
---|
304 | | -} |
---|
305 | | -#endif |
---|
306 | 311 | |
---|
307 | 312 | static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) |
---|
308 | 313 | { |
---|
.. | .. |
---|
314 | 319 | dpu_crtc_vblank(crtc, false); |
---|
315 | 320 | } |
---|
316 | 321 | |
---|
| 322 | +static void dpu_kms_enable_commit(struct msm_kms *kms) |
---|
| 323 | +{ |
---|
| 324 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 325 | + pm_runtime_get_sync(&dpu_kms->pdev->dev); |
---|
| 326 | +} |
---|
| 327 | + |
---|
| 328 | +static void dpu_kms_disable_commit(struct msm_kms *kms) |
---|
| 329 | +{ |
---|
| 330 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 331 | + pm_runtime_put_sync(&dpu_kms->pdev->dev); |
---|
| 332 | +} |
---|
| 333 | + |
---|
| 334 | +static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) |
---|
| 335 | +{ |
---|
| 336 | + struct drm_encoder *encoder; |
---|
| 337 | + |
---|
| 338 | + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { |
---|
| 339 | + ktime_t vsync_time; |
---|
| 340 | + |
---|
| 341 | + if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0) |
---|
| 342 | + return vsync_time; |
---|
| 343 | + } |
---|
| 344 | + |
---|
| 345 | + return ktime_get(); |
---|
| 346 | +} |
---|
| 347 | + |
---|
317 | 348 | static void dpu_kms_prepare_commit(struct msm_kms *kms, |
---|
318 | 349 | struct drm_atomic_state *state) |
---|
319 | 350 | { |
---|
320 | | - struct dpu_kms *dpu_kms; |
---|
321 | | - struct msm_drm_private *priv; |
---|
322 | | - struct drm_device *dev; |
---|
| 351 | + struct drm_crtc *crtc; |
---|
| 352 | + struct drm_crtc_state *crtc_state; |
---|
323 | 353 | struct drm_encoder *encoder; |
---|
| 354 | + int i; |
---|
324 | 355 | |
---|
325 | 356 | if (!kms) |
---|
326 | 357 | return; |
---|
327 | | - dpu_kms = to_dpu_kms(kms); |
---|
328 | | - dev = dpu_kms->dev; |
---|
329 | 358 | |
---|
330 | | - if (!dev || !dev->dev_private) |
---|
331 | | - return; |
---|
332 | | - priv = dev->dev_private; |
---|
333 | | - pm_runtime_get_sync(&dpu_kms->pdev->dev); |
---|
334 | | - |
---|
335 | | - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
---|
336 | | - if (encoder->crtc != NULL) |
---|
| 359 | + /* Call prepare_commit for all affected encoders */ |
---|
| 360 | + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
---|
| 361 | + drm_for_each_encoder_mask(encoder, crtc->dev, |
---|
| 362 | + crtc_state->encoder_mask) { |
---|
337 | 363 | dpu_encoder_prepare_commit(encoder); |
---|
| 364 | + } |
---|
| 365 | + } |
---|
| 366 | +} |
---|
| 367 | + |
---|
| 368 | +static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) |
---|
| 369 | +{ |
---|
| 370 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 371 | + struct drm_crtc *crtc; |
---|
| 372 | + |
---|
| 373 | + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) { |
---|
| 374 | + if (!crtc->state->active) |
---|
| 375 | + continue; |
---|
| 376 | + |
---|
| 377 | + trace_dpu_kms_commit(DRMID(crtc)); |
---|
| 378 | + dpu_crtc_commit_kickoff(crtc); |
---|
| 379 | + } |
---|
338 | 380 | } |
---|
339 | 381 | |
---|
340 | 382 | /* |
---|
.. | .. |
---|
344 | 386 | void dpu_kms_encoder_enable(struct drm_encoder *encoder) |
---|
345 | 387 | { |
---|
346 | 388 | const struct drm_encoder_helper_funcs *funcs = encoder->helper_private; |
---|
347 | | - struct drm_crtc *crtc = encoder->crtc; |
---|
| 389 | + struct drm_device *dev = encoder->dev; |
---|
| 390 | + struct drm_crtc *crtc; |
---|
348 | 391 | |
---|
349 | 392 | /* Forward this enable call to the commit hook */ |
---|
350 | 393 | if (funcs && funcs->commit) |
---|
351 | 394 | funcs->commit(encoder); |
---|
352 | 395 | |
---|
353 | | - if (crtc && crtc->state->active) { |
---|
354 | | - trace_dpu_kms_enc_enable(DRMID(crtc)); |
---|
355 | | - dpu_crtc_commit_kickoff(crtc); |
---|
356 | | - } |
---|
357 | | -} |
---|
358 | | - |
---|
359 | | -static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state) |
---|
360 | | -{ |
---|
361 | | - struct drm_crtc *crtc; |
---|
362 | | - struct drm_crtc_state *crtc_state; |
---|
363 | | - int i; |
---|
364 | | - |
---|
365 | | - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
---|
366 | | - /* If modeset is required, kickoff is run in encoder_enable */ |
---|
367 | | - if (drm_atomic_crtc_needs_modeset(crtc_state)) |
---|
| 396 | + drm_for_each_crtc(crtc, dev) { |
---|
| 397 | + if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) |
---|
368 | 398 | continue; |
---|
369 | 399 | |
---|
370 | | - if (crtc->state->active) { |
---|
371 | | - trace_dpu_kms_commit(DRMID(crtc)); |
---|
372 | | - dpu_crtc_commit_kickoff(crtc); |
---|
373 | | - } |
---|
| 400 | + trace_dpu_kms_enc_enable(DRMID(crtc)); |
---|
374 | 401 | } |
---|
375 | 402 | } |
---|
376 | 403 | |
---|
377 | | -static void dpu_kms_complete_commit(struct msm_kms *kms, |
---|
378 | | - struct drm_atomic_state *old_state) |
---|
| 404 | +static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) |
---|
379 | 405 | { |
---|
380 | | - struct dpu_kms *dpu_kms; |
---|
381 | | - struct msm_drm_private *priv; |
---|
| 406 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
382 | 407 | struct drm_crtc *crtc; |
---|
383 | | - struct drm_crtc_state *old_crtc_state; |
---|
384 | | - int i; |
---|
385 | | - |
---|
386 | | - if (!kms || !old_state) |
---|
387 | | - return; |
---|
388 | | - dpu_kms = to_dpu_kms(kms); |
---|
389 | | - |
---|
390 | | - if (!dpu_kms->dev || !dpu_kms->dev->dev_private) |
---|
391 | | - return; |
---|
392 | | - priv = dpu_kms->dev->dev_private; |
---|
393 | 408 | |
---|
394 | 409 | DPU_ATRACE_BEGIN("kms_complete_commit"); |
---|
395 | 410 | |
---|
396 | | - for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) |
---|
397 | | - dpu_crtc_complete_commit(crtc, old_crtc_state); |
---|
398 | | - |
---|
399 | | - pm_runtime_put_sync(&dpu_kms->pdev->dev); |
---|
| 411 | + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) |
---|
| 412 | + dpu_crtc_complete_commit(crtc); |
---|
400 | 413 | |
---|
401 | 414 | DPU_ATRACE_END("kms_complete_commit"); |
---|
402 | 415 | } |
---|
.. | .. |
---|
442 | 455 | } |
---|
443 | 456 | } |
---|
444 | 457 | |
---|
| 458 | +static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask) |
---|
| 459 | +{ |
---|
| 460 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 461 | + struct drm_crtc *crtc; |
---|
| 462 | + |
---|
| 463 | + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) |
---|
| 464 | + dpu_kms_wait_for_commit_done(kms, crtc); |
---|
| 465 | +} |
---|
| 466 | + |
---|
445 | 467 | static int _dpu_kms_initialize_dsi(struct drm_device *dev, |
---|
446 | 468 | struct msm_drm_private *priv, |
---|
447 | 469 | struct dpu_kms *dpu_kms) |
---|
.. | .. |
---|
476 | 498 | return rc; |
---|
477 | 499 | } |
---|
478 | 500 | |
---|
| 501 | +static int _dpu_kms_initialize_displayport(struct drm_device *dev, |
---|
| 502 | + struct msm_drm_private *priv, |
---|
| 503 | + struct dpu_kms *dpu_kms) |
---|
| 504 | +{ |
---|
| 505 | + struct drm_encoder *encoder = NULL; |
---|
| 506 | + int rc = 0; |
---|
| 507 | + |
---|
| 508 | + if (!priv->dp) |
---|
| 509 | + return rc; |
---|
| 510 | + |
---|
| 511 | + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); |
---|
| 512 | + if (IS_ERR(encoder)) { |
---|
| 513 | + DPU_ERROR("encoder init failed for dsi display\n"); |
---|
| 514 | + return PTR_ERR(encoder); |
---|
| 515 | + } |
---|
| 516 | + |
---|
| 517 | + rc = msm_dp_modeset_init(priv->dp, dev, encoder); |
---|
| 518 | + if (rc) { |
---|
| 519 | + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); |
---|
| 520 | + drm_encoder_cleanup(encoder); |
---|
| 521 | + return rc; |
---|
| 522 | + } |
---|
| 523 | + |
---|
| 524 | + priv->encoders[priv->num_encoders++] = encoder; |
---|
| 525 | + return rc; |
---|
| 526 | +} |
---|
| 527 | + |
---|
479 | 528 | /** |
---|
480 | 529 | * _dpu_kms_setup_displays - create encoders, bridges and connectors |
---|
481 | 530 | * for underlying displays |
---|
.. | .. |
---|
488 | 537 | struct msm_drm_private *priv, |
---|
489 | 538 | struct dpu_kms *dpu_kms) |
---|
490 | 539 | { |
---|
491 | | - /** |
---|
492 | | - * Extend this function to initialize other |
---|
493 | | - * types of displays |
---|
494 | | - */ |
---|
| 540 | + int rc = 0; |
---|
495 | 541 | |
---|
496 | | - return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); |
---|
| 542 | + rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); |
---|
| 543 | + if (rc) { |
---|
| 544 | + DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); |
---|
| 545 | + return rc; |
---|
| 546 | + } |
---|
| 547 | + |
---|
| 548 | + rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); |
---|
| 549 | + if (rc) { |
---|
| 550 | + DPU_ERROR("initialize_DP failed, rc = %d\n", rc); |
---|
| 551 | + return rc; |
---|
| 552 | + } |
---|
| 553 | + |
---|
| 554 | + return rc; |
---|
497 | 555 | } |
---|
498 | 556 | |
---|
499 | 557 | static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) |
---|
.. | .. |
---|
501 | 559 | struct msm_drm_private *priv; |
---|
502 | 560 | int i; |
---|
503 | 561 | |
---|
504 | | - if (!dpu_kms) { |
---|
505 | | - DPU_ERROR("invalid dpu_kms\n"); |
---|
506 | | - return; |
---|
507 | | - } else if (!dpu_kms->dev) { |
---|
508 | | - DPU_ERROR("invalid dev\n"); |
---|
509 | | - return; |
---|
510 | | - } else if (!dpu_kms->dev->dev_private) { |
---|
511 | | - DPU_ERROR("invalid dev_private\n"); |
---|
512 | | - return; |
---|
513 | | - } |
---|
514 | 562 | priv = dpu_kms->dev->dev_private; |
---|
515 | 563 | |
---|
516 | 564 | for (i = 0; i < priv->num_crtcs; i++) |
---|
.. | .. |
---|
534 | 582 | { |
---|
535 | 583 | struct drm_device *dev; |
---|
536 | 584 | struct drm_plane *primary_planes[MAX_PLANES], *plane; |
---|
| 585 | + struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; |
---|
537 | 586 | struct drm_crtc *crtc; |
---|
538 | 587 | |
---|
539 | 588 | struct msm_drm_private *priv; |
---|
540 | 589 | struct dpu_mdss_cfg *catalog; |
---|
541 | 590 | |
---|
542 | | - int primary_planes_idx = 0, i, ret; |
---|
| 591 | + int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; |
---|
543 | 592 | int max_crtc_count; |
---|
544 | | - |
---|
545 | | - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { |
---|
546 | | - DPU_ERROR("invalid dpu_kms\n"); |
---|
547 | | - return -EINVAL; |
---|
548 | | - } |
---|
549 | | - |
---|
550 | 593 | dev = dpu_kms->dev; |
---|
551 | 594 | priv = dev->dev_private; |
---|
552 | 595 | catalog = dpu_kms->catalog; |
---|
.. | .. |
---|
561 | 604 | |
---|
562 | 605 | max_crtc_count = min(catalog->mixer_count, priv->num_encoders); |
---|
563 | 606 | |
---|
564 | | - /* Create the planes */ |
---|
| 607 | + /* Create the planes, keeping track of one primary/cursor per crtc */ |
---|
565 | 608 | for (i = 0; i < catalog->sspp_count; i++) { |
---|
566 | | - bool primary = true; |
---|
| 609 | + enum drm_plane_type type; |
---|
567 | 610 | |
---|
568 | | - if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR) |
---|
569 | | - || primary_planes_idx >= max_crtc_count) |
---|
570 | | - primary = false; |
---|
| 611 | + if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) |
---|
| 612 | + && cursor_planes_idx < max_crtc_count) |
---|
| 613 | + type = DRM_PLANE_TYPE_CURSOR; |
---|
| 614 | + else if (primary_planes_idx < max_crtc_count) |
---|
| 615 | + type = DRM_PLANE_TYPE_PRIMARY; |
---|
| 616 | + else |
---|
| 617 | + type = DRM_PLANE_TYPE_OVERLAY; |
---|
571 | 618 | |
---|
572 | | - plane = dpu_plane_init(dev, catalog->sspp[i].id, primary, |
---|
573 | | - (1UL << max_crtc_count) - 1, 0); |
---|
| 619 | + DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", |
---|
| 620 | + type, catalog->sspp[i].features, |
---|
| 621 | + catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); |
---|
| 622 | + |
---|
| 623 | + plane = dpu_plane_init(dev, catalog->sspp[i].id, type, |
---|
| 624 | + (1UL << max_crtc_count) - 1, 0); |
---|
574 | 625 | if (IS_ERR(plane)) { |
---|
575 | 626 | DPU_ERROR("dpu_plane_init failed\n"); |
---|
576 | 627 | ret = PTR_ERR(plane); |
---|
.. | .. |
---|
578 | 629 | } |
---|
579 | 630 | priv->planes[priv->num_planes++] = plane; |
---|
580 | 631 | |
---|
581 | | - if (primary) |
---|
| 632 | + if (type == DRM_PLANE_TYPE_CURSOR) |
---|
| 633 | + cursor_planes[cursor_planes_idx++] = plane; |
---|
| 634 | + else if (type == DRM_PLANE_TYPE_PRIMARY) |
---|
582 | 635 | primary_planes[primary_planes_idx++] = plane; |
---|
583 | 636 | } |
---|
584 | 637 | |
---|
.. | .. |
---|
586 | 639 | |
---|
587 | 640 | /* Create one CRTC per encoder */ |
---|
588 | 641 | for (i = 0; i < max_crtc_count; i++) { |
---|
589 | | - crtc = dpu_crtc_init(dev, primary_planes[i]); |
---|
| 642 | + crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); |
---|
590 | 643 | if (IS_ERR(crtc)) { |
---|
591 | 644 | ret = PTR_ERR(crtc); |
---|
592 | 645 | goto fail; |
---|
.. | .. |
---|
604 | 657 | return ret; |
---|
605 | 658 | } |
---|
606 | 659 | |
---|
607 | | -#ifdef CONFIG_DEBUG_FS |
---|
608 | | -static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) |
---|
609 | | -{ |
---|
610 | | - struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
611 | | - struct drm_device *dev; |
---|
612 | | - int rc; |
---|
613 | | - |
---|
614 | | - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { |
---|
615 | | - DPU_ERROR("invalid dpu_kms\n"); |
---|
616 | | - return -EINVAL; |
---|
617 | | - } |
---|
618 | | - |
---|
619 | | - dev = dpu_kms->dev; |
---|
620 | | - |
---|
621 | | - rc = _dpu_debugfs_init(dpu_kms); |
---|
622 | | - if (rc) |
---|
623 | | - DPU_ERROR("dpu_debugfs init failed: %d\n", rc); |
---|
624 | | - |
---|
625 | | - return rc; |
---|
626 | | -} |
---|
627 | | -#endif |
---|
628 | | - |
---|
629 | 660 | static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, |
---|
630 | 661 | struct drm_encoder *encoder) |
---|
631 | 662 | { |
---|
.. | .. |
---|
634 | 665 | |
---|
635 | 666 | static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) |
---|
636 | 667 | { |
---|
637 | | - struct drm_device *dev; |
---|
638 | 668 | int i; |
---|
639 | | - |
---|
640 | | - dev = dpu_kms->dev; |
---|
641 | | - if (!dev) |
---|
642 | | - return; |
---|
643 | 669 | |
---|
644 | 670 | if (dpu_kms->hw_intr) |
---|
645 | 671 | dpu_hw_intr_destroy(dpu_kms->hw_intr); |
---|
646 | 672 | dpu_kms->hw_intr = NULL; |
---|
647 | 673 | |
---|
648 | | - if (dpu_kms->power_event) |
---|
649 | | - dpu_power_handle_unregister_event( |
---|
650 | | - &dpu_kms->phandle, dpu_kms->power_event); |
---|
651 | | - |
---|
652 | 674 | /* safe to call these more than once during shutdown */ |
---|
653 | | - _dpu_debugfs_destroy(dpu_kms); |
---|
654 | 675 | _dpu_kms_mmu_destroy(dpu_kms); |
---|
655 | 676 | |
---|
656 | 677 | if (dpu_kms->catalog) { |
---|
657 | | - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { |
---|
658 | | - u32 vbif_idx = dpu_kms->catalog->vbif[i].id; |
---|
659 | | - |
---|
660 | | - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) |
---|
661 | | - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); |
---|
| 678 | + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { |
---|
| 679 | + if (dpu_kms->hw_vbif[i]) { |
---|
| 680 | + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); |
---|
| 681 | + dpu_kms->hw_vbif[i] = NULL; |
---|
| 682 | + } |
---|
662 | 683 | } |
---|
663 | 684 | } |
---|
664 | 685 | |
---|
.. | .. |
---|
670 | 691 | dpu_hw_catalog_deinit(dpu_kms->catalog); |
---|
671 | 692 | dpu_kms->catalog = NULL; |
---|
672 | 693 | |
---|
673 | | - if (dpu_kms->core_client) |
---|
674 | | - dpu_power_client_destroy(&dpu_kms->phandle, |
---|
675 | | - dpu_kms->core_client); |
---|
676 | | - dpu_kms->core_client = NULL; |
---|
677 | | - |
---|
678 | 694 | if (dpu_kms->vbif[VBIF_NRT]) |
---|
679 | 695 | devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]); |
---|
680 | 696 | dpu_kms->vbif[VBIF_NRT] = NULL; |
---|
.. | .. |
---|
682 | 698 | if (dpu_kms->vbif[VBIF_RT]) |
---|
683 | 699 | devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); |
---|
684 | 700 | dpu_kms->vbif[VBIF_RT] = NULL; |
---|
| 701 | + |
---|
| 702 | + if (dpu_kms->hw_mdp) |
---|
| 703 | + dpu_hw_mdp_destroy(dpu_kms->hw_mdp); |
---|
| 704 | + dpu_kms->hw_mdp = NULL; |
---|
685 | 705 | |
---|
686 | 706 | if (dpu_kms->mmio) |
---|
687 | 707 | devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); |
---|
.. | .. |
---|
699 | 719 | |
---|
700 | 720 | dpu_kms = to_dpu_kms(kms); |
---|
701 | 721 | |
---|
702 | | - dpu_dbg_destroy(); |
---|
703 | 722 | _dpu_kms_hw_destroy(dpu_kms); |
---|
704 | | -} |
---|
705 | | - |
---|
706 | | -static int dpu_kms_pm_suspend(struct device *dev) |
---|
707 | | -{ |
---|
708 | | - struct drm_device *ddev; |
---|
709 | | - struct drm_modeset_acquire_ctx ctx; |
---|
710 | | - struct drm_atomic_state *state; |
---|
711 | | - struct dpu_kms *dpu_kms; |
---|
712 | | - int ret = 0, num_crtcs = 0; |
---|
713 | | - |
---|
714 | | - if (!dev) |
---|
715 | | - return -EINVAL; |
---|
716 | | - |
---|
717 | | - ddev = dev_get_drvdata(dev); |
---|
718 | | - if (!ddev || !ddev_to_msm_kms(ddev)) |
---|
719 | | - return -EINVAL; |
---|
720 | | - |
---|
721 | | - dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev)); |
---|
722 | | - |
---|
723 | | - /* disable hot-plug polling */ |
---|
724 | | - drm_kms_helper_poll_disable(ddev); |
---|
725 | | - |
---|
726 | | - /* acquire modeset lock(s) */ |
---|
727 | | - drm_modeset_acquire_init(&ctx, 0); |
---|
728 | | - |
---|
729 | | -retry: |
---|
730 | | - DPU_ATRACE_BEGIN("kms_pm_suspend"); |
---|
731 | | - |
---|
732 | | - ret = drm_modeset_lock_all_ctx(ddev, &ctx); |
---|
733 | | - if (ret) |
---|
734 | | - goto unlock; |
---|
735 | | - |
---|
736 | | - /* save current state for resume */ |
---|
737 | | - if (dpu_kms->suspend_state) |
---|
738 | | - drm_atomic_state_put(dpu_kms->suspend_state); |
---|
739 | | - dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx); |
---|
740 | | - if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) { |
---|
741 | | - DRM_ERROR("failed to back up suspend state\n"); |
---|
742 | | - dpu_kms->suspend_state = NULL; |
---|
743 | | - goto unlock; |
---|
744 | | - } |
---|
745 | | - |
---|
746 | | - /* create atomic state to disable all CRTCs */ |
---|
747 | | - state = drm_atomic_state_alloc(ddev); |
---|
748 | | - if (IS_ERR_OR_NULL(state)) { |
---|
749 | | - DRM_ERROR("failed to allocate crtc disable state\n"); |
---|
750 | | - goto unlock; |
---|
751 | | - } |
---|
752 | | - |
---|
753 | | - state->acquire_ctx = &ctx; |
---|
754 | | - |
---|
755 | | - /* check for nothing to do */ |
---|
756 | | - if (num_crtcs == 0) { |
---|
757 | | - DRM_DEBUG("all crtcs are already in the off state\n"); |
---|
758 | | - drm_atomic_state_put(state); |
---|
759 | | - goto suspended; |
---|
760 | | - } |
---|
761 | | - |
---|
762 | | - /* commit the "disable all" state */ |
---|
763 | | - ret = drm_atomic_commit(state); |
---|
764 | | - if (ret < 0) { |
---|
765 | | - DRM_ERROR("failed to disable crtcs, %d\n", ret); |
---|
766 | | - drm_atomic_state_put(state); |
---|
767 | | - goto unlock; |
---|
768 | | - } |
---|
769 | | - |
---|
770 | | -suspended: |
---|
771 | | - dpu_kms->suspend_block = true; |
---|
772 | | - |
---|
773 | | -unlock: |
---|
774 | | - if (ret == -EDEADLK) { |
---|
775 | | - drm_modeset_backoff(&ctx); |
---|
776 | | - goto retry; |
---|
777 | | - } |
---|
778 | | - drm_modeset_drop_locks(&ctx); |
---|
779 | | - drm_modeset_acquire_fini(&ctx); |
---|
780 | | - |
---|
781 | | - DPU_ATRACE_END("kms_pm_suspend"); |
---|
782 | | - return 0; |
---|
783 | | -} |
---|
784 | | - |
---|
785 | | -static int dpu_kms_pm_resume(struct device *dev) |
---|
786 | | -{ |
---|
787 | | - struct drm_device *ddev; |
---|
788 | | - struct dpu_kms *dpu_kms; |
---|
789 | | - int ret; |
---|
790 | | - |
---|
791 | | - if (!dev) |
---|
792 | | - return -EINVAL; |
---|
793 | | - |
---|
794 | | - ddev = dev_get_drvdata(dev); |
---|
795 | | - if (!ddev || !ddev_to_msm_kms(ddev)) |
---|
796 | | - return -EINVAL; |
---|
797 | | - |
---|
798 | | - dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev)); |
---|
799 | | - |
---|
800 | | - DPU_ATRACE_BEGIN("kms_pm_resume"); |
---|
801 | | - |
---|
802 | | - drm_mode_config_reset(ddev); |
---|
803 | | - |
---|
804 | | - drm_modeset_lock_all(ddev); |
---|
805 | | - |
---|
806 | | - dpu_kms->suspend_block = false; |
---|
807 | | - |
---|
808 | | - if (dpu_kms->suspend_state) { |
---|
809 | | - dpu_kms->suspend_state->acquire_ctx = |
---|
810 | | - ddev->mode_config.acquire_ctx; |
---|
811 | | - ret = drm_atomic_commit(dpu_kms->suspend_state); |
---|
812 | | - if (ret < 0) { |
---|
813 | | - DRM_ERROR("failed to restore state, %d\n", ret); |
---|
814 | | - drm_atomic_state_put(dpu_kms->suspend_state); |
---|
815 | | - } |
---|
816 | | - dpu_kms->suspend_state = NULL; |
---|
817 | | - } |
---|
818 | | - drm_modeset_unlock_all(ddev); |
---|
819 | | - |
---|
820 | | - /* enable hot-plug polling */ |
---|
821 | | - drm_kms_helper_poll_enable(ddev); |
---|
822 | | - |
---|
823 | | - DPU_ATRACE_END("kms_pm_resume"); |
---|
824 | | - return 0; |
---|
825 | 723 | } |
---|
826 | 724 | |
---|
827 | 725 | static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, |
---|
.. | .. |
---|
838 | 736 | info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : |
---|
839 | 737 | MSM_DISPLAY_CAP_VID_MODE; |
---|
840 | 738 | |
---|
841 | | - /* TODO: No support for DSI swap */ |
---|
842 | | - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { |
---|
843 | | - if (priv->dsi[i]) { |
---|
844 | | - info.h_tile_instance[info.num_of_h_tiles] = i; |
---|
845 | | - info.num_of_h_tiles++; |
---|
| 739 | + switch (info.intf_type) { |
---|
| 740 | + case DRM_MODE_ENCODER_DSI: |
---|
| 741 | + /* TODO: No support for DSI swap */ |
---|
| 742 | + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { |
---|
| 743 | + if (priv->dsi[i]) { |
---|
| 744 | + info.h_tile_instance[info.num_of_h_tiles] = i; |
---|
| 745 | + info.num_of_h_tiles++; |
---|
| 746 | + } |
---|
846 | 747 | } |
---|
847 | | - } |
---|
| 748 | + break; |
---|
| 749 | + case DRM_MODE_ENCODER_TMDS: |
---|
| 750 | + info.num_of_h_tiles = 1; |
---|
| 751 | + break; |
---|
| 752 | + }; |
---|
848 | 753 | |
---|
849 | 754 | rc = dpu_encoder_setup(encoder->dev, encoder, &info); |
---|
850 | 755 | if (rc) |
---|
851 | 756 | DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", |
---|
852 | 757 | encoder->base.id, rc); |
---|
| 758 | +} |
---|
| 759 | + |
---|
| 760 | +static irqreturn_t dpu_irq(struct msm_kms *kms) |
---|
| 761 | +{ |
---|
| 762 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 763 | + |
---|
| 764 | + return dpu_core_irq(dpu_kms); |
---|
| 765 | +} |
---|
| 766 | + |
---|
| 767 | +static void dpu_irq_preinstall(struct msm_kms *kms) |
---|
| 768 | +{ |
---|
| 769 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 770 | + |
---|
| 771 | + dpu_core_irq_preinstall(dpu_kms); |
---|
| 772 | +} |
---|
| 773 | + |
---|
| 774 | +static int dpu_irq_postinstall(struct msm_kms *kms) |
---|
| 775 | +{ |
---|
| 776 | + struct msm_drm_private *priv; |
---|
| 777 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 778 | + |
---|
| 779 | + if (!dpu_kms || !dpu_kms->dev) |
---|
| 780 | + return -EINVAL; |
---|
| 781 | + |
---|
| 782 | + priv = dpu_kms->dev->dev_private; |
---|
| 783 | + if (!priv) |
---|
| 784 | + return -EINVAL; |
---|
| 785 | + |
---|
| 786 | + msm_dp_irq_postinstall(priv->dp); |
---|
| 787 | + |
---|
| 788 | + return 0; |
---|
| 789 | +} |
---|
| 790 | + |
---|
| 791 | +static void dpu_irq_uninstall(struct msm_kms *kms) |
---|
| 792 | +{ |
---|
| 793 | + struct dpu_kms *dpu_kms = to_dpu_kms(kms); |
---|
| 794 | + |
---|
| 795 | + dpu_core_irq_uninstall(dpu_kms); |
---|
853 | 796 | } |
---|
854 | 797 | |
---|
855 | 798 | static const struct msm_kms_funcs kms_funcs = { |
---|
.. | .. |
---|
858 | 801 | .irq_postinstall = dpu_irq_postinstall, |
---|
859 | 802 | .irq_uninstall = dpu_irq_uninstall, |
---|
860 | 803 | .irq = dpu_irq, |
---|
| 804 | + .enable_commit = dpu_kms_enable_commit, |
---|
| 805 | + .disable_commit = dpu_kms_disable_commit, |
---|
| 806 | + .vsync_time = dpu_kms_vsync_time, |
---|
861 | 807 | .prepare_commit = dpu_kms_prepare_commit, |
---|
862 | | - .commit = dpu_kms_commit, |
---|
| 808 | + .flush_commit = dpu_kms_flush_commit, |
---|
| 809 | + .wait_flush = dpu_kms_wait_flush, |
---|
863 | 810 | .complete_commit = dpu_kms_complete_commit, |
---|
864 | | - .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done, |
---|
865 | 811 | .enable_vblank = dpu_kms_enable_vblank, |
---|
866 | 812 | .disable_vblank = dpu_kms_disable_vblank, |
---|
867 | 813 | .check_modified_format = dpu_format_check_modified_format, |
---|
868 | 814 | .get_format = dpu_get_msm_format, |
---|
869 | 815 | .round_pixclk = dpu_kms_round_pixclk, |
---|
870 | | - .pm_suspend = dpu_kms_pm_suspend, |
---|
871 | | - .pm_resume = dpu_kms_pm_resume, |
---|
872 | 816 | .destroy = dpu_kms_destroy, |
---|
873 | 817 | .set_encoder_mode = _dpu_kms_set_encoder_mode, |
---|
874 | 818 | #ifdef CONFIG_DEBUG_FS |
---|
.. | .. |
---|
876 | 820 | #endif |
---|
877 | 821 | }; |
---|
878 | 822 | |
---|
879 | | -/* the caller api needs to turn on clock before calling it */ |
---|
880 | | -static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms) |
---|
881 | | -{ |
---|
882 | | - dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); |
---|
883 | | -} |
---|
884 | | - |
---|
885 | | -static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) |
---|
| 823 | +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) |
---|
886 | 824 | { |
---|
887 | 825 | struct msm_mmu *mmu; |
---|
888 | 826 | |
---|
| 827 | + if (!dpu_kms->base.aspace) |
---|
| 828 | + return; |
---|
| 829 | + |
---|
889 | 830 | mmu = dpu_kms->base.aspace->mmu; |
---|
890 | 831 | |
---|
891 | | - mmu->funcs->detach(mmu, (const char **)iommu_ports, |
---|
892 | | - ARRAY_SIZE(iommu_ports)); |
---|
| 832 | + mmu->funcs->detach(mmu); |
---|
893 | 833 | msm_gem_address_space_put(dpu_kms->base.aspace); |
---|
894 | 834 | |
---|
895 | | - return 0; |
---|
| 835 | + dpu_kms->base.aspace = NULL; |
---|
896 | 836 | } |
---|
897 | 837 | |
---|
898 | 838 | static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) |
---|
899 | 839 | { |
---|
900 | 840 | struct iommu_domain *domain; |
---|
901 | 841 | struct msm_gem_address_space *aspace; |
---|
902 | | - int ret; |
---|
| 842 | + struct msm_mmu *mmu; |
---|
903 | 843 | |
---|
904 | 844 | domain = iommu_domain_alloc(&platform_bus_type); |
---|
905 | 845 | if (!domain) |
---|
906 | 846 | return 0; |
---|
907 | 847 | |
---|
908 | | - aspace = msm_gem_address_space_create(dpu_kms->dev->dev, |
---|
909 | | - domain, "dpu1"); |
---|
| 848 | + mmu = msm_iommu_new(dpu_kms->dev->dev, domain); |
---|
| 849 | + if (IS_ERR(mmu)) { |
---|
| 850 | + iommu_domain_free(domain); |
---|
| 851 | + return PTR_ERR(mmu); |
---|
| 852 | + } |
---|
| 853 | + aspace = msm_gem_address_space_create(mmu, "dpu1", |
---|
| 854 | + 0x1000, 0x100000000 - 0x1000); |
---|
| 855 | + |
---|
910 | 856 | if (IS_ERR(aspace)) { |
---|
911 | | - ret = PTR_ERR(aspace); |
---|
912 | | - goto fail; |
---|
| 857 | + mmu->funcs->destroy(mmu); |
---|
| 858 | + return PTR_ERR(aspace); |
---|
913 | 859 | } |
---|
914 | 860 | |
---|
915 | 861 | dpu_kms->base.aspace = aspace; |
---|
916 | | - |
---|
917 | | - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, |
---|
918 | | - ARRAY_SIZE(iommu_ports)); |
---|
919 | | - if (ret) { |
---|
920 | | - DPU_ERROR("failed to attach iommu %d\n", ret); |
---|
921 | | - msm_gem_address_space_put(aspace); |
---|
922 | | - goto fail; |
---|
923 | | - } |
---|
924 | | - |
---|
925 | 862 | return 0; |
---|
926 | | -fail: |
---|
927 | | - _dpu_kms_mmu_destroy(dpu_kms); |
---|
928 | | - |
---|
929 | | - return ret; |
---|
930 | 863 | } |
---|
931 | 864 | |
---|
932 | 865 | static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms, |
---|
.. | .. |
---|
954 | 887 | return clk_get_rate(clk->clk); |
---|
955 | 888 | } |
---|
956 | 889 | |
---|
957 | | -static void dpu_kms_handle_power_event(u32 event_type, void *usr) |
---|
958 | | -{ |
---|
959 | | - struct dpu_kms *dpu_kms = usr; |
---|
960 | | - |
---|
961 | | - if (!dpu_kms) |
---|
962 | | - return; |
---|
963 | | - |
---|
964 | | - if (event_type == DPU_POWER_EVENT_POST_ENABLE) |
---|
965 | | - dpu_vbif_init_memtypes(dpu_kms); |
---|
966 | | -} |
---|
967 | | - |
---|
968 | 890 | static int dpu_kms_hw_init(struct msm_kms *kms) |
---|
969 | 891 | { |
---|
970 | 892 | struct dpu_kms *dpu_kms; |
---|
971 | 893 | struct drm_device *dev; |
---|
972 | | - struct msm_drm_private *priv; |
---|
973 | 894 | int i, rc = -EINVAL; |
---|
974 | 895 | |
---|
975 | 896 | if (!kms) { |
---|
976 | 897 | DPU_ERROR("invalid kms\n"); |
---|
977 | | - goto end; |
---|
| 898 | + return rc; |
---|
978 | 899 | } |
---|
979 | 900 | |
---|
980 | 901 | dpu_kms = to_dpu_kms(kms); |
---|
981 | 902 | dev = dpu_kms->dev; |
---|
982 | | - if (!dev) { |
---|
983 | | - DPU_ERROR("invalid device\n"); |
---|
984 | | - goto end; |
---|
985 | | - } |
---|
986 | 903 | |
---|
987 | | - rc = dpu_dbg_init(&dpu_kms->pdev->dev); |
---|
988 | | - if (rc) { |
---|
989 | | - DRM_ERROR("failed to init dpu dbg: %d\n", rc); |
---|
990 | | - goto end; |
---|
991 | | - } |
---|
| 904 | + rc = dpu_kms_global_obj_init(dpu_kms); |
---|
| 905 | + if (rc) |
---|
| 906 | + return rc; |
---|
992 | 907 | |
---|
993 | | - priv = dev->dev_private; |
---|
994 | | - if (!priv) { |
---|
995 | | - DPU_ERROR("invalid private data\n"); |
---|
996 | | - goto dbg_destroy; |
---|
997 | | - } |
---|
| 908 | + atomic_set(&dpu_kms->bandwidth_ref, 0); |
---|
998 | 909 | |
---|
999 | 910 | dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp"); |
---|
1000 | 911 | if (IS_ERR(dpu_kms->mmio)) { |
---|
.. | .. |
---|
1004 | 915 | goto error; |
---|
1005 | 916 | } |
---|
1006 | 917 | DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio); |
---|
1007 | | - dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp"); |
---|
1008 | 918 | |
---|
1009 | 919 | dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif"); |
---|
1010 | 920 | if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { |
---|
.. | .. |
---|
1013 | 923 | dpu_kms->vbif[VBIF_RT] = NULL; |
---|
1014 | 924 | goto error; |
---|
1015 | 925 | } |
---|
1016 | | - dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif"); |
---|
1017 | | - dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt"); |
---|
| 926 | + dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt"); |
---|
1018 | 927 | if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { |
---|
1019 | 928 | dpu_kms->vbif[VBIF_NRT] = NULL; |
---|
1020 | 929 | DPU_DEBUG("VBIF NRT is not defined"); |
---|
1021 | | - } else { |
---|
1022 | | - dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev, |
---|
1023 | | - "vbif_nrt"); |
---|
1024 | 930 | } |
---|
1025 | 931 | |
---|
1026 | | - dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma"); |
---|
| 932 | + dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma"); |
---|
1027 | 933 | if (IS_ERR(dpu_kms->reg_dma)) { |
---|
1028 | 934 | dpu_kms->reg_dma = NULL; |
---|
1029 | 935 | DPU_DEBUG("REG_DMA is not defined"); |
---|
1030 | | - } else { |
---|
1031 | | - dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma"); |
---|
1032 | 936 | } |
---|
1033 | 937 | |
---|
1034 | | - dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle, |
---|
1035 | | - "core"); |
---|
1036 | | - if (IS_ERR_OR_NULL(dpu_kms->core_client)) { |
---|
1037 | | - rc = PTR_ERR(dpu_kms->core_client); |
---|
1038 | | - if (!dpu_kms->core_client) |
---|
1039 | | - rc = -EINVAL; |
---|
1040 | | - DPU_ERROR("dpu power client create failed: %d\n", rc); |
---|
1041 | | - dpu_kms->core_client = NULL; |
---|
| 938 | + dpu_kms_parse_data_bus_icc_path(dpu_kms); |
---|
| 939 | + |
---|
| 940 | + rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev); |
---|
| 941 | + if (rc < 0) |
---|
1042 | 942 | goto error; |
---|
1043 | | - } |
---|
1044 | 943 | |
---|
1045 | | - pm_runtime_get_sync(&dpu_kms->pdev->dev); |
---|
1046 | | - |
---|
1047 | | - _dpu_kms_core_hw_rev_init(dpu_kms); |
---|
| 944 | + dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); |
---|
1048 | 945 | |
---|
1049 | 946 | pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev); |
---|
1050 | 947 | |
---|
.. | .. |
---|
1058 | 955 | goto power_error; |
---|
1059 | 956 | } |
---|
1060 | 957 | |
---|
1061 | | - dpu_dbg_init_dbg_buses(dpu_kms->core_rev); |
---|
1062 | | - |
---|
1063 | 958 | /* |
---|
1064 | 959 | * Now we need to read the HW catalog and initialize resources such as |
---|
1065 | 960 | * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc |
---|
.. | .. |
---|
1070 | 965 | goto power_error; |
---|
1071 | 966 | } |
---|
1072 | 967 | |
---|
1073 | | - rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio, |
---|
1074 | | - dpu_kms->dev); |
---|
| 968 | + rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); |
---|
1075 | 969 | if (rc) { |
---|
1076 | 970 | DPU_ERROR("rm init failed: %d\n", rc); |
---|
1077 | 971 | goto power_error; |
---|
.. | .. |
---|
1079 | 973 | |
---|
1080 | 974 | dpu_kms->rm_init = true; |
---|
1081 | 975 | |
---|
1082 | | - dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm); |
---|
1083 | | - if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) { |
---|
| 976 | + dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, |
---|
| 977 | + dpu_kms->catalog); |
---|
| 978 | + if (IS_ERR(dpu_kms->hw_mdp)) { |
---|
1084 | 979 | rc = PTR_ERR(dpu_kms->hw_mdp); |
---|
1085 | | - if (!dpu_kms->hw_mdp) |
---|
1086 | | - rc = -EINVAL; |
---|
1087 | 980 | DPU_ERROR("failed to get hw_mdp: %d\n", rc); |
---|
1088 | 981 | dpu_kms->hw_mdp = NULL; |
---|
1089 | 982 | goto power_error; |
---|
.. | .. |
---|
1092 | 985 | for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { |
---|
1093 | 986 | u32 vbif_idx = dpu_kms->catalog->vbif[i].id; |
---|
1094 | 987 | |
---|
1095 | | - dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, |
---|
| 988 | + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, |
---|
1096 | 989 | dpu_kms->vbif[vbif_idx], dpu_kms->catalog); |
---|
1097 | 990 | if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { |
---|
1098 | 991 | rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); |
---|
.. | .. |
---|
1105 | 998 | } |
---|
1106 | 999 | |
---|
1107 | 1000 | rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog, |
---|
1108 | | - &dpu_kms->phandle, |
---|
1109 | 1001 | _dpu_kms_get_clk(dpu_kms, "core")); |
---|
1110 | 1002 | if (rc) { |
---|
1111 | 1003 | DPU_ERROR("failed to init perf %d\n", rc); |
---|
.. | .. |
---|
1118 | 1010 | DPU_ERROR("hw_intr init failed: %d\n", rc); |
---|
1119 | 1011 | dpu_kms->hw_intr = NULL; |
---|
1120 | 1012 | goto hw_intr_init_err; |
---|
1121 | | - } |
---|
1122 | | - |
---|
1123 | | - /* |
---|
1124 | | - * _dpu_kms_drm_obj_init should create the DRM related objects |
---|
1125 | | - * i.e. CRTCs, planes, encoders, connectors and so forth |
---|
1126 | | - */ |
---|
1127 | | - rc = _dpu_kms_drm_obj_init(dpu_kms); |
---|
1128 | | - if (rc) { |
---|
1129 | | - DPU_ERROR("modeset init failed: %d\n", rc); |
---|
1130 | | - goto drm_obj_init_err; |
---|
1131 | 1013 | } |
---|
1132 | 1014 | |
---|
1133 | 1015 | dev->mode_config.min_width = 0; |
---|
.. | .. |
---|
1147 | 1029 | dev->mode_config.allow_fb_modifiers = true; |
---|
1148 | 1030 | |
---|
1149 | 1031 | /* |
---|
1150 | | - * Handle (re)initializations during power enable |
---|
| 1032 | + * _dpu_kms_drm_obj_init should create the DRM related objects |
---|
| 1033 | + * i.e. CRTCs, planes, encoders, connectors and so forth |
---|
1151 | 1034 | */ |
---|
1152 | | - dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms); |
---|
1153 | | - dpu_kms->power_event = dpu_power_handle_register_event( |
---|
1154 | | - &dpu_kms->phandle, |
---|
1155 | | - DPU_POWER_EVENT_POST_ENABLE, |
---|
1156 | | - dpu_kms_handle_power_event, dpu_kms, "kms"); |
---|
| 1035 | + rc = _dpu_kms_drm_obj_init(dpu_kms); |
---|
| 1036 | + if (rc) { |
---|
| 1037 | + DPU_ERROR("modeset init failed: %d\n", rc); |
---|
| 1038 | + goto drm_obj_init_err; |
---|
| 1039 | + } |
---|
| 1040 | + |
---|
| 1041 | + dpu_vbif_init_memtypes(dpu_kms); |
---|
1157 | 1042 | |
---|
1158 | 1043 | pm_runtime_put_sync(&dpu_kms->pdev->dev); |
---|
1159 | 1044 | |
---|
.. | .. |
---|
1167 | 1052 | pm_runtime_put_sync(&dpu_kms->pdev->dev); |
---|
1168 | 1053 | error: |
---|
1169 | 1054 | _dpu_kms_hw_destroy(dpu_kms); |
---|
1170 | | -dbg_destroy: |
---|
1171 | | - dpu_dbg_destroy(); |
---|
1172 | | -end: |
---|
| 1055 | + |
---|
1173 | 1056 | return rc; |
---|
1174 | 1057 | } |
---|
1175 | 1058 | |
---|
.. | .. |
---|
1179 | 1062 | struct dpu_kms *dpu_kms; |
---|
1180 | 1063 | int irq; |
---|
1181 | 1064 | |
---|
1182 | | - if (!dev || !dev->dev_private) { |
---|
| 1065 | + if (!dev) { |
---|
1183 | 1066 | DPU_ERROR("drm device node invalid\n"); |
---|
1184 | 1067 | return ERR_PTR(-EINVAL); |
---|
1185 | 1068 | } |
---|
.. | .. |
---|
1210 | 1093 | if (!dpu_kms) |
---|
1211 | 1094 | return -ENOMEM; |
---|
1212 | 1095 | |
---|
| 1096 | + dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core"); |
---|
| 1097 | + if (IS_ERR(dpu_kms->opp_table)) |
---|
| 1098 | + return PTR_ERR(dpu_kms->opp_table); |
---|
| 1099 | + /* OPP table is optional */ |
---|
| 1100 | + ret = dev_pm_opp_of_add_table(dev); |
---|
| 1101 | + if (!ret) { |
---|
| 1102 | + dpu_kms->has_opp_table = true; |
---|
| 1103 | + } else if (ret != -ENODEV) { |
---|
| 1104 | + dev_err(dev, "invalid OPP table in device tree\n"); |
---|
| 1105 | + dev_pm_opp_put_clkname(dpu_kms->opp_table); |
---|
| 1106 | + return ret; |
---|
| 1107 | + } |
---|
| 1108 | + |
---|
1213 | 1109 | mp = &dpu_kms->mp; |
---|
1214 | 1110 | ret = msm_dss_parse_clock(pdev, mp); |
---|
1215 | 1111 | if (ret) { |
---|
1216 | 1112 | DPU_ERROR("failed to parse clocks, ret=%d\n", ret); |
---|
1217 | | - return ret; |
---|
| 1113 | + goto err; |
---|
1218 | 1114 | } |
---|
1219 | | - |
---|
1220 | | - dpu_power_resource_init(pdev, &dpu_kms->phandle); |
---|
1221 | 1115 | |
---|
1222 | 1116 | platform_set_drvdata(pdev, dpu_kms); |
---|
1223 | 1117 | |
---|
.. | .. |
---|
1230 | 1124 | |
---|
1231 | 1125 | priv->kms = &dpu_kms->base; |
---|
1232 | 1126 | return ret; |
---|
| 1127 | +err: |
---|
| 1128 | + if (dpu_kms->has_opp_table) |
---|
| 1129 | + dev_pm_opp_of_remove_table(dev); |
---|
| 1130 | + dev_pm_opp_put_clkname(dpu_kms->opp_table); |
---|
| 1131 | + return ret; |
---|
1233 | 1132 | } |
---|
1234 | 1133 | |
---|
1235 | 1134 | static void dpu_unbind(struct device *dev, struct device *master, void *data) |
---|
.. | .. |
---|
1238 | 1137 | struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); |
---|
1239 | 1138 | struct dss_module_power *mp = &dpu_kms->mp; |
---|
1240 | 1139 | |
---|
1241 | | - dpu_power_resource_deinit(pdev, &dpu_kms->phandle); |
---|
1242 | 1140 | msm_dss_put_clk(mp->clk_config, mp->num_clk); |
---|
1243 | 1141 | devm_kfree(&pdev->dev, mp->clk_config); |
---|
1244 | 1142 | mp->num_clk = 0; |
---|
1245 | 1143 | |
---|
1246 | 1144 | if (dpu_kms->rpm_enabled) |
---|
1247 | 1145 | pm_runtime_disable(&pdev->dev); |
---|
| 1146 | + |
---|
| 1147 | + if (dpu_kms->has_opp_table) |
---|
| 1148 | + dev_pm_opp_of_remove_table(dev); |
---|
| 1149 | + dev_pm_opp_put_clkname(dpu_kms->opp_table); |
---|
1248 | 1150 | } |
---|
1249 | 1151 | |
---|
1250 | 1152 | static const struct component_ops dpu_ops = { |
---|
.. | .. |
---|
1265 | 1167 | |
---|
1266 | 1168 | static int __maybe_unused dpu_runtime_suspend(struct device *dev) |
---|
1267 | 1169 | { |
---|
1268 | | - int rc = -1; |
---|
| 1170 | + int i, rc = -1; |
---|
1269 | 1171 | struct platform_device *pdev = to_platform_device(dev); |
---|
1270 | 1172 | struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); |
---|
1271 | | - struct drm_device *ddev; |
---|
1272 | 1173 | struct dss_module_power *mp = &dpu_kms->mp; |
---|
1273 | 1174 | |
---|
1274 | | - ddev = dpu_kms->dev; |
---|
1275 | | - if (!ddev) { |
---|
1276 | | - DPU_ERROR("invalid drm_device\n"); |
---|
1277 | | - goto exit; |
---|
1278 | | - } |
---|
1279 | | - |
---|
1280 | | - rc = dpu_power_resource_enable(&dpu_kms->phandle, |
---|
1281 | | - dpu_kms->core_client, false); |
---|
1282 | | - if (rc) |
---|
1283 | | - DPU_ERROR("resource disable failed: %d\n", rc); |
---|
1284 | | - |
---|
| 1175 | + /* Drop the performance state vote */ |
---|
| 1176 | + dev_pm_opp_set_rate(dev, 0); |
---|
1285 | 1177 | rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); |
---|
1286 | 1178 | if (rc) |
---|
1287 | 1179 | DPU_ERROR("clock disable failed rc:%d\n", rc); |
---|
1288 | 1180 | |
---|
1289 | | -exit: |
---|
| 1181 | + for (i = 0; i < dpu_kms->num_paths; i++) |
---|
| 1182 | + icc_set_bw(dpu_kms->path[i], 0, 0); |
---|
| 1183 | + |
---|
1290 | 1184 | return rc; |
---|
1291 | 1185 | } |
---|
1292 | 1186 | |
---|
.. | .. |
---|
1295 | 1189 | int rc = -1; |
---|
1296 | 1190 | struct platform_device *pdev = to_platform_device(dev); |
---|
1297 | 1191 | struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); |
---|
| 1192 | + struct drm_encoder *encoder; |
---|
1298 | 1193 | struct drm_device *ddev; |
---|
1299 | 1194 | struct dss_module_power *mp = &dpu_kms->mp; |
---|
| 1195 | + int i; |
---|
1300 | 1196 | |
---|
1301 | 1197 | ddev = dpu_kms->dev; |
---|
1302 | | - if (!ddev) { |
---|
1303 | | - DPU_ERROR("invalid drm_device\n"); |
---|
1304 | | - goto exit; |
---|
1305 | | - } |
---|
| 1198 | + |
---|
| 1199 | + WARN_ON(!(dpu_kms->num_paths)); |
---|
| 1200 | + /* Min vote of BW is required before turning on AXI clk */ |
---|
| 1201 | + for (i = 0; i < dpu_kms->num_paths; i++) |
---|
| 1202 | + icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); |
---|
1306 | 1203 | |
---|
1307 | 1204 | rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); |
---|
1308 | 1205 | if (rc) { |
---|
1309 | 1206 | DPU_ERROR("clock enable failed rc:%d\n", rc); |
---|
1310 | | - goto exit; |
---|
| 1207 | + return rc; |
---|
1311 | 1208 | } |
---|
1312 | 1209 | |
---|
1313 | | - rc = dpu_power_resource_enable(&dpu_kms->phandle, |
---|
1314 | | - dpu_kms->core_client, true); |
---|
1315 | | - if (rc) |
---|
1316 | | - DPU_ERROR("resource enable failed: %d\n", rc); |
---|
| 1210 | + dpu_vbif_init_memtypes(dpu_kms); |
---|
1317 | 1211 | |
---|
1318 | | -exit: |
---|
| 1212 | + drm_for_each_encoder(encoder, ddev) |
---|
| 1213 | + dpu_encoder_virt_runtime_resume(encoder); |
---|
| 1214 | + |
---|
1319 | 1215 | return rc; |
---|
1320 | 1216 | } |
---|
1321 | 1217 | |
---|
1322 | 1218 | static const struct dev_pm_ops dpu_pm_ops = { |
---|
1323 | 1219 | SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL) |
---|
| 1220 | + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
---|
| 1221 | + pm_runtime_force_resume) |
---|
1324 | 1222 | }; |
---|
1325 | 1223 | |
---|
1326 | 1224 | static const struct of_device_id dpu_dt_match[] = { |
---|
1327 | 1225 | { .compatible = "qcom,sdm845-dpu", }, |
---|
| 1226 | + { .compatible = "qcom,sc7180-dpu", }, |
---|
1328 | 1227 | {} |
---|
1329 | 1228 | }; |
---|
1330 | 1229 | MODULE_DEVICE_TABLE(of, dpu_dt_match); |
---|