From 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 13 May 2024 10:30:14 +0000 Subject: [PATCH] modify sin led gpio --- kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 939 ++++++++++++++++++++++++++-------------------------------- 1 files changed, 419 insertions(+), 520 deletions(-) diff --git a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 52474dc..b7841f7 100644 --- a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -1,27 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ -#include <drm/drm_crtc.h> #include <linux/debugfs.h> -#include <linux/of_irq.h> #include <linux/dma-buf.h> +#include <linux/of_irq.h> +#include <linux/pm_opp.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_file.h> #include "msm_drv.h" #include "msm_mmu.h" @@ -39,10 +31,6 @@ #define CREATE_TRACE_POINTS #include "dpu_trace.h" -static const char * const iommu_ports[] = { - "mdp_0", -}; - /* * To enable overall DRM driver logging * # echo 0x2 > /sys/module/drm/parameters/debug @@ -55,38 +43,24 @@ #define DPU_DEBUGFS_DIR "msm_dpu" #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" +#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */ + static int dpu_kms_hw_init(struct msm_kms *kms); -static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); - -static unsigned long dpu_iomap_size(struct platform_device *pdev, - const char *name) -{ - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - if (!res) { - DRM_ERROR("failed to get memory resource: %s\n", name); - return 0; - } - - return resource_size(res); -} +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); #ifdef CONFIG_DEBUG_FS static int _dpu_danger_signal_status(struct seq_file *s, bool danger_status) { struct dpu_kms *kms = (struct dpu_kms *)s->private; - struct msm_drm_private *priv; struct dpu_danger_safe_status status; int i; - if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { + if (!kms->hw_mdp) { DPU_ERROR("invalid arg(s)\n"); return 0; } - priv = kms->dev->dev_private; memset(&status, 0, sizeof(struct dpu_danger_safe_status)); pm_runtime_get_sync(&kms->pdev->dev); @@ -113,79 +87,37 @@ return 0; } -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} - static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, true); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, false); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); -static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms) -{ - debugfs_remove_recursive(dpu_kms->debugfs_danger); - dpu_kms->debugfs_danger = NULL; -} - -static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, +static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, struct dentry *parent) { - dpu_kms->debugfs_danger = debugfs_create_dir("danger", - parent); - if (!dpu_kms->debugfs_danger) { - DPU_ERROR("failed to create danger debugfs\n"); - return -EINVAL; - } + struct dentry *entry = debugfs_create_dir("danger", parent); - debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger, + debugfs_create_file("danger_status", 0600, entry, dpu_kms, &dpu_debugfs_danger_stats_fops); - debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger, + debugfs_create_file("safe_status", 0600, entry, dpu_kms, &dpu_debugfs_safe_stats_fops); - - return 0; } static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) { - struct dpu_debugfs_regset32 *regset; - struct dpu_kms *dpu_kms; - struct drm_device *dev; - struct msm_drm_private *priv; + struct dpu_debugfs_regset32 *regset = s->private; + struct dpu_kms *dpu_kms = regset->dpu_kms; void __iomem *base; uint32_t i, addr; - if (!s || !s->private) - return 0; - - regset = s->private; - - dpu_kms = regset->dpu_kms; - if (!dpu_kms || !dpu_kms->mmio) - return 0; - - dev = dpu_kms->dev; - if (!dev) - return 0; - - priv = dev->dev_private; - if (!priv) + if (!dpu_kms->mmio) return 0; base = dpu_kms->mmio + regset->offset; @@ -235,74 +167,147 @@ } } -void *dpu_debugfs_create_regset32(const char *name, umode_t mode, +void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, struct dpu_debugfs_regset32 *regset) { if (!name || !regset || !regset->dpu_kms || !regset->blk_len) - return NULL; + return; /* make sure offset is a multiple of 4 */ regset->offset = round_down(regset->offset, 4); - return debugfs_create_file(name, mode, parent, - regset, &dpu_fops_regset32); + debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); } -static int _dpu_debugfs_init(struct dpu_kms *dpu_kms) +static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) { - void *p; - int rc; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + void *p = dpu_hw_util_get_log_mask_ptr(); + struct dentry *entry; + struct drm_device *dev; + struct msm_drm_private *priv; - p = dpu_hw_util_get_log_mask_ptr(); - - if (!dpu_kms || !p) + if (!p) return -EINVAL; - dpu_kms->debugfs_root = debugfs_create_dir("debug", - dpu_kms->dev->primary->debugfs_root); - if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) { - DRM_ERROR("debugfs create_dir failed %ld\n", - PTR_ERR(dpu_kms->debugfs_root)); - return PTR_ERR(dpu_kms->debugfs_root); - } + dev = dpu_kms->dev; + priv = dev->dev_private; - rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root); - if (rc) { - DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc); - return rc; - } + entry = debugfs_create_dir("debug", minor->debugfs_root); - /* allow root to be NULL */ - debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p); + debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); - (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root); - (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root); - (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root); + dpu_debugfs_danger_init(dpu_kms, entry); + dpu_debugfs_vbif_init(dpu_kms, entry); + dpu_debugfs_core_irq_init(dpu_kms, entry); - rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root); - if (rc) { - DPU_ERROR("failed to init perf %d\n", rc); - return rc; - } + if (priv->dp) + msm_dp_debugfs_init(priv->dp, minor); + return dpu_core_perf_debugfs_init(dpu_kms, entry); +} +#endif + +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct dpu_global_state * +dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) +{ + return to_dpu_global_state(dpu_kms->global_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, + &dpu_kms->global_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_dpu_global_state(priv_state); +} + +static struct drm_private_state * +dpu_kms_global_duplicate_state(struct drm_private_obj *obj) +{ + struct dpu_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dpu_global_state *dpu_state = to_dpu_global_state(state); + + kfree(dpu_state); +} + +static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { + .atomic_duplicate_state = dpu_kms_global_duplicate_state, + .atomic_destroy_state = dpu_kms_global_destroy_state, +}; + +static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) +{ + struct dpu_global_state *state; + + drm_modeset_lock_init(&dpu_kms->global_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, + &state->base, + &dpu_kms_global_state_funcs); return 0; } -static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms) +static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) { - /* don't need to NULL check debugfs_root */ - if (dpu_kms) { - dpu_debugfs_vbif_destroy(dpu_kms); - dpu_debugfs_danger_destroy(dpu_kms); - dpu_debugfs_core_irq_destroy(dpu_kms); - debugfs_remove_recursive(dpu_kms->debugfs_root); + struct icc_path *path0; + struct icc_path *path1; + struct drm_device *dev = dpu_kms->dev; + + path0 = of_icc_get(dev->dev, "mdp0-mem"); + path1 = of_icc_get(dev->dev, "mdp1-mem"); + + if (IS_ERR_OR_NULL(path0)) + return PTR_ERR_OR_ZERO(path0); + + dpu_kms->path[0] = path0; + dpu_kms->num_paths = 1; + + if (!IS_ERR_OR_NULL(path1)) { + dpu_kms->path[1] = path1; + dpu_kms->num_paths++; } + return 0; } -#else -static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms) -{ -} -#endif static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { @@ -314,27 +319,64 @@ dpu_crtc_vblank(crtc, false); } +static void dpu_kms_enable_commit(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + pm_runtime_get_sync(&dpu_kms->pdev->dev); +} + +static void dpu_kms_disable_commit(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} + +static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { + ktime_t vsync_time; + + if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0) + return vsync_time; + } + + return ktime_get(); +} + static void dpu_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - struct dpu_kms *dpu_kms; - struct msm_drm_private *priv; - struct drm_device *dev; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; struct drm_encoder *encoder; + int i; if (!kms) return; - dpu_kms = to_dpu_kms(kms); - dev = dpu_kms->dev; - if (!dev || !dev->dev_private) - return; - priv = dev->dev_private; - pm_runtime_get_sync(&dpu_kms->pdev->dev); - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - if (encoder->crtc != NULL) + /* Call prepare_commit for all affected encoders */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc_state->encoder_mask) { dpu_encoder_prepare_commit(encoder); + } + } +} + +static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct drm_crtc *crtc; + + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + + trace_dpu_kms_commit(DRMID(crtc)); + dpu_crtc_commit_kickoff(crtc); + } } /* @@ -344,59 +386,30 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder) { const struct drm_encoder_helper_funcs *funcs = encoder->helper_private; - struct drm_crtc *crtc = encoder->crtc; + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc; /* Forward this enable call to the commit hook */ if (funcs && funcs->commit) funcs->commit(encoder); - if (crtc && crtc->state->active) { - trace_dpu_kms_enc_enable(DRMID(crtc)); - dpu_crtc_commit_kickoff(crtc); - } -} - -static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - int i; - - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - /* If modeset is required, kickoff is run in encoder_enable */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) + drm_for_each_crtc(crtc, dev) { + if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) continue; - if (crtc->state->active) { - trace_dpu_kms_commit(DRMID(crtc)); - dpu_crtc_commit_kickoff(crtc); - } + trace_dpu_kms_enc_enable(DRMID(crtc)); } } -static void dpu_kms_complete_commit(struct msm_kms *kms, - struct drm_atomic_state *old_state) +static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) { - struct dpu_kms *dpu_kms; - struct msm_drm_private *priv; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state; - int i; - - if (!kms || !old_state) - return; - dpu_kms = to_dpu_kms(kms); - - if (!dpu_kms->dev || !dpu_kms->dev->dev_private) - return; - priv = dpu_kms->dev->dev_private; DPU_ATRACE_BEGIN("kms_complete_commit"); - for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) - dpu_crtc_complete_commit(crtc, old_crtc_state); - - pm_runtime_put_sync(&dpu_kms->pdev->dev); + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) + dpu_crtc_complete_commit(crtc); DPU_ATRACE_END("kms_complete_commit"); } @@ -442,6 +455,15 @@ } } +static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct drm_crtc *crtc; + + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) + dpu_kms_wait_for_commit_done(kms, crtc); +} + static int _dpu_kms_initialize_dsi(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) @@ -476,6 +498,33 @@ return rc; } +static int _dpu_kms_initialize_displayport(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + struct drm_encoder *encoder = NULL; + int rc = 0; + + if (!priv->dp) + return rc; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + rc = msm_dp_modeset_init(priv->dp, dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } + + priv->encoders[priv->num_encoders++] = encoder; + return rc; +} + /** * _dpu_kms_setup_displays - create encoders, bridges and connectors * for underlying displays @@ -488,12 +537,21 @@ struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { - /** - * Extend this function to initialize other - * types of displays - */ + int rc = 0; - return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); + return rc; + } + + rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_DP failed, rc = %d\n", rc); + return rc; + } + + return rc; } static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) @@ -501,16 +559,6 @@ struct msm_drm_private *priv; int i; - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } else if (!dpu_kms->dev) { - DPU_ERROR("invalid dev\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid dev_private\n"); - return; - } priv = dpu_kms->dev->dev_private; for (i = 0; i < priv->num_crtcs; i++) @@ -534,19 +582,14 @@ { struct drm_device *dev; struct drm_plane *primary_planes[MAX_PLANES], *plane; + struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; struct drm_crtc *crtc; struct msm_drm_private *priv; struct dpu_mdss_cfg *catalog; - int primary_planes_idx = 0, i, ret; + int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; int max_crtc_count; - - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { - DPU_ERROR("invalid dpu_kms\n"); - return -EINVAL; - } - dev = dpu_kms->dev; priv = dev->dev_private; catalog = dpu_kms->catalog; @@ -561,16 +604,24 @@ max_crtc_count = min(catalog->mixer_count, priv->num_encoders); - /* Create the planes */ + /* Create the planes, keeping track of one primary/cursor per crtc */ for (i = 0; i < catalog->sspp_count; i++) { - bool primary = true; + enum drm_plane_type type; - if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR) - || primary_planes_idx >= max_crtc_count) - primary = false; + if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) + && cursor_planes_idx < max_crtc_count) + type = DRM_PLANE_TYPE_CURSOR; + else if (primary_planes_idx < max_crtc_count) + type = DRM_PLANE_TYPE_PRIMARY; + else + type = DRM_PLANE_TYPE_OVERLAY; - plane = dpu_plane_init(dev, catalog->sspp[i].id, primary, - (1UL << max_crtc_count) - 1, 0); + DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", + type, catalog->sspp[i].features, + catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); + + plane = dpu_plane_init(dev, catalog->sspp[i].id, type, + (1UL << max_crtc_count) - 1, 0); if (IS_ERR(plane)) { DPU_ERROR("dpu_plane_init failed\n"); ret = PTR_ERR(plane); @@ -578,7 +629,9 @@ } priv->planes[priv->num_planes++] = plane; - if (primary) + if (type == DRM_PLANE_TYPE_CURSOR) + cursor_planes[cursor_planes_idx++] = plane; + else if (type == DRM_PLANE_TYPE_PRIMARY) primary_planes[primary_planes_idx++] = plane; } @@ -586,7 +639,7 @@ /* Create one CRTC per encoder */ for (i = 0; i < max_crtc_count; i++) { - crtc = dpu_crtc_init(dev, primary_planes[i]); + crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); goto fail; @@ -604,28 +657,6 @@ return ret; } -#ifdef CONFIG_DEBUG_FS -static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) -{ - struct dpu_kms *dpu_kms = to_dpu_kms(kms); - struct drm_device *dev; - int rc; - - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { - DPU_ERROR("invalid dpu_kms\n"); - return -EINVAL; - } - - dev = dpu_kms->dev; - - rc = _dpu_debugfs_init(dpu_kms); - if (rc) - DPU_ERROR("dpu_debugfs init failed: %d\n", rc); - - return rc; -} -#endif - static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -634,31 +665,21 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) { - struct drm_device *dev; int i; - - dev = dpu_kms->dev; - if (!dev) - return; if (dpu_kms->hw_intr) dpu_hw_intr_destroy(dpu_kms->hw_intr); dpu_kms->hw_intr = NULL; - if (dpu_kms->power_event) - dpu_power_handle_unregister_event( - &dpu_kms->phandle, dpu_kms->power_event); - /* safe to call these more than once during shutdown */ - _dpu_debugfs_destroy(dpu_kms); _dpu_kms_mmu_destroy(dpu_kms); if (dpu_kms->catalog) { - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + if (dpu_kms->hw_vbif[i]) { + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); + dpu_kms->hw_vbif[i] = NULL; + } } } @@ -670,11 +691,6 @@ dpu_hw_catalog_deinit(dpu_kms->catalog); dpu_kms->catalog = NULL; - if (dpu_kms->core_client) - dpu_power_client_destroy(&dpu_kms->phandle, - dpu_kms->core_client); - dpu_kms->core_client = NULL; - if (dpu_kms->vbif[VBIF_NRT]) devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]); dpu_kms->vbif[VBIF_NRT] = NULL; @@ -682,6 +698,10 @@ if (dpu_kms->vbif[VBIF_RT]) devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); dpu_kms->vbif[VBIF_RT] = NULL; + + if (dpu_kms->hw_mdp) + dpu_hw_mdp_destroy(dpu_kms->hw_mdp); + dpu_kms->hw_mdp = NULL; if (dpu_kms->mmio) devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); @@ -699,129 +719,7 @@ dpu_kms = to_dpu_kms(kms); - dpu_dbg_destroy(); _dpu_kms_hw_destroy(dpu_kms); -} - -static int dpu_kms_pm_suspend(struct device *dev) -{ - struct drm_device *ddev; - struct drm_modeset_acquire_ctx ctx; - struct drm_atomic_state *state; - struct dpu_kms *dpu_kms; - int ret = 0, num_crtcs = 0; - - if (!dev) - return -EINVAL; - - ddev = dev_get_drvdata(dev); - if (!ddev || !ddev_to_msm_kms(ddev)) - return -EINVAL; - - dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev)); - - /* disable hot-plug polling */ - drm_kms_helper_poll_disable(ddev); - - /* acquire modeset lock(s) */ - drm_modeset_acquire_init(&ctx, 0); - -retry: - DPU_ATRACE_BEGIN("kms_pm_suspend"); - - ret = drm_modeset_lock_all_ctx(ddev, &ctx); - if (ret) - goto unlock; - - /* save current state for resume */ - if (dpu_kms->suspend_state) - drm_atomic_state_put(dpu_kms->suspend_state); - dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx); - if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) { - DRM_ERROR("failed to back up suspend state\n"); - dpu_kms->suspend_state = NULL; - goto unlock; - } - - /* create atomic state to disable all CRTCs */ - state = drm_atomic_state_alloc(ddev); - if (IS_ERR_OR_NULL(state)) { - DRM_ERROR("failed to allocate crtc disable state\n"); - goto unlock; - } - - state->acquire_ctx = &ctx; - - /* check for nothing to do */ - if (num_crtcs == 0) { - DRM_DEBUG("all crtcs are already in the off state\n"); - drm_atomic_state_put(state); - goto suspended; - } - - /* commit the "disable all" state */ - ret = drm_atomic_commit(state); - if (ret < 0) { - DRM_ERROR("failed to disable crtcs, %d\n", ret); - drm_atomic_state_put(state); - goto unlock; - } - -suspended: - dpu_kms->suspend_block = true; - -unlock: - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - goto retry; - } - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - DPU_ATRACE_END("kms_pm_suspend"); - return 0; -} - -static int dpu_kms_pm_resume(struct device *dev) -{ - struct drm_device *ddev; - struct dpu_kms *dpu_kms; - int ret; - - if (!dev) - return -EINVAL; - - ddev = dev_get_drvdata(dev); - if (!ddev || !ddev_to_msm_kms(ddev)) - return -EINVAL; - - dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev)); - - DPU_ATRACE_BEGIN("kms_pm_resume"); - - drm_mode_config_reset(ddev); - - drm_modeset_lock_all(ddev); - - dpu_kms->suspend_block = false; - - if (dpu_kms->suspend_state) { - dpu_kms->suspend_state->acquire_ctx = - ddev->mode_config.acquire_ctx; - ret = drm_atomic_commit(dpu_kms->suspend_state); - if (ret < 0) { - DRM_ERROR("failed to restore state, %d\n", ret); - drm_atomic_state_put(dpu_kms->suspend_state); - } - dpu_kms->suspend_state = NULL; - } - drm_modeset_unlock_all(ddev); - - /* enable hot-plug polling */ - drm_kms_helper_poll_enable(ddev); - - DPU_ATRACE_END("kms_pm_resume"); - return 0; } static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, @@ -838,18 +736,63 @@ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : MSM_DISPLAY_CAP_VID_MODE; - /* TODO: No support for DSI swap */ - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (priv->dsi[i]) { - info.h_tile_instance[info.num_of_h_tiles] = i; - info.num_of_h_tiles++; + switch (info.intf_type) { + case DRM_MODE_ENCODER_DSI: + /* TODO: No support for DSI swap */ + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + if (priv->dsi[i]) { + info.h_tile_instance[info.num_of_h_tiles] = i; + info.num_of_h_tiles++; + } } - } + break; + case DRM_MODE_ENCODER_TMDS: + info.num_of_h_tiles = 1; + break; + }; rc = dpu_encoder_setup(encoder->dev, encoder, &info); if (rc) DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", encoder->base.id, rc); +} + +static irqreturn_t dpu_irq(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + return dpu_core_irq(dpu_kms); +} + +static void dpu_irq_preinstall(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + dpu_core_irq_preinstall(dpu_kms); +} + +static int dpu_irq_postinstall(struct msm_kms *kms) +{ + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + if (!dpu_kms || !dpu_kms->dev) + return -EINVAL; + + priv = dpu_kms->dev->dev_private; + if (!priv) + return -EINVAL; + + msm_dp_irq_postinstall(priv->dp); + + return 0; +} + +static void dpu_irq_uninstall(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + dpu_core_irq_uninstall(dpu_kms); } static const struct msm_kms_funcs kms_funcs = { @@ -858,17 +801,18 @@ .irq_postinstall = dpu_irq_postinstall, .irq_uninstall = dpu_irq_uninstall, .irq = dpu_irq, + .enable_commit = dpu_kms_enable_commit, + .disable_commit = dpu_kms_disable_commit, + .vsync_time = dpu_kms_vsync_time, .prepare_commit = dpu_kms_prepare_commit, - .commit = dpu_kms_commit, + .flush_commit = dpu_kms_flush_commit, + .wait_flush = dpu_kms_wait_flush, .complete_commit = dpu_kms_complete_commit, - .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done, .enable_vblank = dpu_kms_enable_vblank, .disable_vblank = dpu_kms_disable_vblank, .check_modified_format = dpu_format_check_modified_format, .get_format = dpu_get_msm_format, .round_pixclk = dpu_kms_round_pixclk, - .pm_suspend = dpu_kms_pm_suspend, - .pm_resume = dpu_kms_pm_resume, .destroy = dpu_kms_destroy, .set_encoder_mode = _dpu_kms_set_encoder_mode, #ifdef CONFIG_DEBUG_FS @@ -876,57 +820,46 @@ #endif }; -/* the caller api needs to turn on clock before calling it */ -static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms) -{ - dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); -} - -static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) { struct msm_mmu *mmu; + if (!dpu_kms->base.aspace) + return; + mmu = dpu_kms->base.aspace->mmu; - mmu->funcs->detach(mmu, (const char **)iommu_ports, - ARRAY_SIZE(iommu_ports)); + mmu->funcs->detach(mmu); msm_gem_address_space_put(dpu_kms->base.aspace); - return 0; + dpu_kms->base.aspace = NULL; } static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) { struct iommu_domain *domain; struct msm_gem_address_space *aspace; - int ret; + struct msm_mmu *mmu; domain = iommu_domain_alloc(&platform_bus_type); if (!domain) return 0; - aspace = msm_gem_address_space_create(dpu_kms->dev->dev, - domain, "dpu1"); + mmu = msm_iommu_new(dpu_kms->dev->dev, domain); + if (IS_ERR(mmu)) { + iommu_domain_free(domain); + return PTR_ERR(mmu); + } + aspace = msm_gem_address_space_create(mmu, "dpu1", + 0x1000, 0x100000000 - 0x1000); + if (IS_ERR(aspace)) { - ret = PTR_ERR(aspace); - goto fail; + mmu->funcs->destroy(mmu); + return PTR_ERR(aspace); } dpu_kms->base.aspace = aspace; - - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) { - DPU_ERROR("failed to attach iommu %d\n", ret); - msm_gem_address_space_put(aspace); - goto fail; - } - return 0; -fail: - _dpu_kms_mmu_destroy(dpu_kms); - - return ret; } static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms, @@ -954,47 +887,25 @@ return clk_get_rate(clk->clk); } -static void dpu_kms_handle_power_event(u32 event_type, void *usr) -{ - struct dpu_kms *dpu_kms = usr; - - if (!dpu_kms) - return; - - if (event_type == DPU_POWER_EVENT_POST_ENABLE) - dpu_vbif_init_memtypes(dpu_kms); -} - static int dpu_kms_hw_init(struct msm_kms *kms) { struct dpu_kms *dpu_kms; struct drm_device *dev; - struct msm_drm_private *priv; int i, rc = -EINVAL; if (!kms) { DPU_ERROR("invalid kms\n"); - goto end; + return rc; } dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - if (!dev) { - DPU_ERROR("invalid device\n"); - goto end; - } - rc = dpu_dbg_init(&dpu_kms->pdev->dev); - if (rc) { - DRM_ERROR("failed to init dpu dbg: %d\n", rc); - goto end; - } + rc = dpu_kms_global_obj_init(dpu_kms); + if (rc) + return rc; - priv = dev->dev_private; - if (!priv) { - DPU_ERROR("invalid private data\n"); - goto dbg_destroy; - } + atomic_set(&dpu_kms->bandwidth_ref, 0); dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp"); if (IS_ERR(dpu_kms->mmio)) { @@ -1004,7 +915,6 @@ goto error; } DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio); - dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp"); dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif"); if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { @@ -1013,38 +923,25 @@ dpu_kms->vbif[VBIF_RT] = NULL; goto error; } - dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif"); - dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt"); + dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt"); if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { dpu_kms->vbif[VBIF_NRT] = NULL; DPU_DEBUG("VBIF NRT is not defined"); - } else { - dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev, - "vbif_nrt"); } - dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma"); + dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma"); if (IS_ERR(dpu_kms->reg_dma)) { dpu_kms->reg_dma = NULL; DPU_DEBUG("REG_DMA is not defined"); - } else { - dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma"); } - dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle, - "core"); - if (IS_ERR_OR_NULL(dpu_kms->core_client)) { - rc = PTR_ERR(dpu_kms->core_client); - if (!dpu_kms->core_client) - rc = -EINVAL; - DPU_ERROR("dpu power client create failed: %d\n", rc); - dpu_kms->core_client = NULL; + dpu_kms_parse_data_bus_icc_path(dpu_kms); + + rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev); + if (rc < 0) goto error; - } - pm_runtime_get_sync(&dpu_kms->pdev->dev); - - _dpu_kms_core_hw_rev_init(dpu_kms); + dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev); @@ -1058,8 +955,6 @@ goto power_error; } - dpu_dbg_init_dbg_buses(dpu_kms->core_rev); - /* * Now we need to read the HW catalog and initialize resources such as * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc @@ -1070,8 +965,7 @@ goto power_error; } - rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio, - dpu_kms->dev); + rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); if (rc) { DPU_ERROR("rm init failed: %d\n", rc); goto power_error; @@ -1079,11 +973,10 @@ dpu_kms->rm_init = true; - dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm); - if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) { + dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, + dpu_kms->catalog); + if (IS_ERR(dpu_kms->hw_mdp)) { rc = PTR_ERR(dpu_kms->hw_mdp); - if (!dpu_kms->hw_mdp) - rc = -EINVAL; DPU_ERROR("failed to get hw_mdp: %d\n", rc); dpu_kms->hw_mdp = NULL; goto power_error; @@ -1092,7 +985,7 @@ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, dpu_kms->vbif[vbif_idx], dpu_kms->catalog); if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); @@ -1105,7 +998,6 @@ } rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog, - &dpu_kms->phandle, _dpu_kms_get_clk(dpu_kms, "core")); if (rc) { DPU_ERROR("failed to init perf %d\n", rc); @@ -1118,16 +1010,6 @@ DPU_ERROR("hw_intr init failed: %d\n", rc); dpu_kms->hw_intr = NULL; goto hw_intr_init_err; - } - - /* - * _dpu_kms_drm_obj_init should create the DRM related objects - * i.e. CRTCs, planes, encoders, connectors and so forth - */ - rc = _dpu_kms_drm_obj_init(dpu_kms); - if (rc) { - DPU_ERROR("modeset init failed: %d\n", rc); - goto drm_obj_init_err; } dev->mode_config.min_width = 0; @@ -1147,13 +1029,16 @@ dev->mode_config.allow_fb_modifiers = true; /* - * Handle (re)initializations during power enable + * _dpu_kms_drm_obj_init should create the DRM related objects + * i.e. CRTCs, planes, encoders, connectors and so forth */ - dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms); - dpu_kms->power_event = dpu_power_handle_register_event( - &dpu_kms->phandle, - DPU_POWER_EVENT_POST_ENABLE, - dpu_kms_handle_power_event, dpu_kms, "kms"); + rc = _dpu_kms_drm_obj_init(dpu_kms); + if (rc) { + DPU_ERROR("modeset init failed: %d\n", rc); + goto drm_obj_init_err; + } + + dpu_vbif_init_memtypes(dpu_kms); pm_runtime_put_sync(&dpu_kms->pdev->dev); @@ -1167,9 +1052,7 @@ pm_runtime_put_sync(&dpu_kms->pdev->dev); error: _dpu_kms_hw_destroy(dpu_kms); -dbg_destroy: - dpu_dbg_destroy(); -end: + return rc; } @@ -1179,7 +1062,7 @@ struct dpu_kms *dpu_kms; int irq; - if (!dev || !dev->dev_private) { + if (!dev) { DPU_ERROR("drm device node invalid\n"); return ERR_PTR(-EINVAL); } @@ -1210,14 +1093,25 @@ if (!dpu_kms) return -ENOMEM; + dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core"); + if (IS_ERR(dpu_kms->opp_table)) + return PTR_ERR(dpu_kms->opp_table); + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(dev); + if (!ret) { + dpu_kms->has_opp_table = true; + } else if (ret != -ENODEV) { + dev_err(dev, "invalid OPP table in device tree\n"); + dev_pm_opp_put_clkname(dpu_kms->opp_table); + return ret; + } + mp = &dpu_kms->mp; ret = msm_dss_parse_clock(pdev, mp); if (ret) { DPU_ERROR("failed to parse clocks, ret=%d\n", ret); - return ret; + goto err; } - - dpu_power_resource_init(pdev, &dpu_kms->phandle); platform_set_drvdata(pdev, dpu_kms); @@ -1230,6 +1124,11 @@ priv->kms = &dpu_kms->base; return ret; +err: + if (dpu_kms->has_opp_table) + dev_pm_opp_of_remove_table(dev); + dev_pm_opp_put_clkname(dpu_kms->opp_table); + return ret; } static void dpu_unbind(struct device *dev, struct device *master, void *data) @@ -1238,13 +1137,16 @@ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dss_module_power *mp = &dpu_kms->mp; - dpu_power_resource_deinit(pdev, &dpu_kms->phandle); msm_dss_put_clk(mp->clk_config, mp->num_clk); devm_kfree(&pdev->dev, mp->clk_config); mp->num_clk = 0; if (dpu_kms->rpm_enabled) pm_runtime_disable(&pdev->dev); + + if (dpu_kms->has_opp_table) + dev_pm_opp_of_remove_table(dev); + dev_pm_opp_put_clkname(dpu_kms->opp_table); } static const struct component_ops dpu_ops = { @@ -1265,28 +1167,20 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) { - int rc = -1; + int i, rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); - struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; - ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - goto exit; - } - - rc = dpu_power_resource_enable(&dpu_kms->phandle, - dpu_kms->core_client, false); - if (rc) - DPU_ERROR("resource disable failed: %d\n", rc); - + /* Drop the performance state vote */ + dev_pm_opp_set_rate(dev, 0); rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); -exit: + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, 0); + return rc; } @@ -1295,36 +1189,41 @@ int rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); + struct drm_encoder *encoder; struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; + int i; ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - goto exit; - } + + WARN_ON(!(dpu_kms->num_paths)); + /* Min vote of BW is required before turning on AXI clk */ + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { DPU_ERROR("clock enable failed rc:%d\n", rc); - goto exit; + return rc; } - rc = dpu_power_resource_enable(&dpu_kms->phandle, - dpu_kms->core_client, true); - if (rc) - DPU_ERROR("resource enable failed: %d\n", rc); + dpu_vbif_init_memtypes(dpu_kms); -exit: + drm_for_each_encoder(encoder, ddev) + dpu_encoder_virt_runtime_resume(encoder); + return rc; } static const struct dev_pm_ops dpu_pm_ops = { SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,sdm845-dpu", }, + { .compatible = "qcom,sc7180-dpu", }, {} }; MODULE_DEVICE_TABLE(of, dpu_dt_match); -- Gitblit v1.6.2