| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2015 Broadcom |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | 4 | */ |
|---|
| 8 | 5 | |
|---|
| 9 | 6 | /** |
|---|
| .. | .. |
|---|
| 14 | 11 | * crtc, HDMI encoder). |
|---|
| 15 | 12 | */ |
|---|
| 16 | 13 | |
|---|
| 17 | | -#include <drm/drm_crtc.h> |
|---|
| 14 | +#include <linux/clk.h> |
|---|
| 15 | + |
|---|
| 18 | 16 | #include <drm/drm_atomic.h> |
|---|
| 19 | 17 | #include <drm/drm_atomic_helper.h> |
|---|
| 20 | | -#include <drm/drm_crtc_helper.h> |
|---|
| 21 | | -#include <drm/drm_plane_helper.h> |
|---|
| 22 | | -#include <drm/drm_fb_helper.h> |
|---|
| 23 | | -#include <drm/drm_fb_cma_helper.h> |
|---|
| 18 | +#include <drm/drm_crtc.h> |
|---|
| 24 | 19 | #include <drm/drm_gem_framebuffer_helper.h> |
|---|
| 20 | +#include <drm/drm_plane_helper.h> |
|---|
| 21 | +#include <drm/drm_probe_helper.h> |
|---|
| 22 | +#include <drm/drm_vblank.h> |
|---|
| 23 | + |
|---|
| 25 | 24 | #include "vc4_drv.h" |
|---|
| 26 | 25 | #include "vc4_regs.h" |
|---|
| 26 | + |
|---|
| 27 | +#define HVS_NUM_CHANNELS 3 |
|---|
| 27 | 28 | |
|---|
| 28 | 29 | struct vc4_ctm_state { |
|---|
| 29 | 30 | struct drm_private_state base; |
|---|
| .. | .. |
|---|
| 36 | 37 | return container_of(priv, struct vc4_ctm_state, base); |
|---|
| 37 | 38 | } |
|---|
| 38 | 39 | |
|---|
| 40 | +struct vc4_hvs_state { |
|---|
| 41 | + struct drm_private_state base; |
|---|
| 42 | + unsigned int unassigned_channels; |
|---|
| 43 | +}; |
|---|
| 44 | + |
|---|
| 45 | +static struct vc4_hvs_state * |
|---|
| 46 | +to_vc4_hvs_state(struct drm_private_state *priv) |
|---|
| 47 | +{ |
|---|
| 48 | + return container_of(priv, struct vc4_hvs_state, base); |
|---|
| 49 | +} |
|---|
| 50 | + |
|---|
| 51 | +struct vc4_load_tracker_state { |
|---|
| 52 | + struct drm_private_state base; |
|---|
| 53 | + u64 hvs_load; |
|---|
| 54 | + u64 membus_load; |
|---|
| 55 | +}; |
|---|
| 56 | + |
|---|
| 57 | +static struct vc4_load_tracker_state * |
|---|
| 58 | +to_vc4_load_tracker_state(struct drm_private_state *priv) |
|---|
| 59 | +{ |
|---|
| 60 | + return container_of(priv, struct vc4_load_tracker_state, base); |
|---|
| 61 | +} |
|---|
| 62 | + |
|---|
| 39 | 63 | static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, |
|---|
| 40 | 64 | struct drm_private_obj *manager) |
|---|
| 41 | 65 | { |
|---|
| 42 | 66 | struct drm_device *dev = state->dev; |
|---|
| 43 | | - struct vc4_dev *vc4 = dev->dev_private; |
|---|
| 67 | + struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 44 | 68 | struct drm_private_state *priv_state; |
|---|
| 45 | 69 | int ret; |
|---|
| 46 | 70 | |
|---|
| .. | .. |
|---|
| 81 | 105 | .atomic_duplicate_state = vc4_ctm_duplicate_state, |
|---|
| 82 | 106 | .atomic_destroy_state = vc4_ctm_destroy_state, |
|---|
| 83 | 107 | }; |
|---|
| 108 | + |
|---|
| 109 | +static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) |
|---|
| 110 | +{ |
|---|
| 111 | + struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 112 | + |
|---|
| 113 | + drm_atomic_private_obj_fini(&vc4->ctm_manager); |
|---|
| 114 | +} |
|---|
| 115 | + |
|---|
| 116 | +static int vc4_ctm_obj_init(struct vc4_dev *vc4) |
|---|
| 117 | +{ |
|---|
| 118 | + struct vc4_ctm_state *ctm_state; |
|---|
| 119 | + |
|---|
| 120 | + drm_modeset_lock_init(&vc4->ctm_state_lock); |
|---|
| 121 | + |
|---|
| 122 | + ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); |
|---|
| 123 | + if (!ctm_state) |
|---|
| 124 | + return -ENOMEM; |
|---|
| 125 | + |
|---|
| 126 | + drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, |
|---|
| 127 | + &vc4_ctm_state_funcs); |
|---|
| 128 | + |
|---|
| 129 | + return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); |
|---|
| 130 | +} |
|---|
| 84 | 131 | |
|---|
| 85 | 132 | /* Converts a DRM S31.32 value to the HW S0.9 format. */ |
|---|
| 86 | 133 | static u16 vc4_ctm_s31_32_to_s0_9(u64 in) |
|---|
| .. | .. |
|---|
| 135 | 182 | VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); |
|---|
| 136 | 183 | } |
|---|
| 137 | 184 | |
|---|
| 185 | +static struct vc4_hvs_state * |
|---|
| 186 | +vc4_hvs_get_global_state(struct drm_atomic_state *state) |
|---|
| 187 | +{ |
|---|
| 188 | + struct vc4_dev *vc4 = to_vc4_dev(state->dev); |
|---|
| 189 | + struct drm_private_state *priv_state; |
|---|
| 190 | + |
|---|
| 191 | + priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); |
|---|
| 192 | + if (IS_ERR(priv_state)) |
|---|
| 193 | + return ERR_CAST(priv_state); |
|---|
| 194 | + |
|---|
| 195 | + return to_vc4_hvs_state(priv_state); |
|---|
| 196 | +} |
|---|
| 197 | + |
|---|
| 198 | +static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, |
|---|
| 199 | + struct drm_atomic_state *state) |
|---|
| 200 | +{ |
|---|
| 201 | + struct drm_crtc_state *crtc_state; |
|---|
| 202 | + struct drm_crtc *crtc; |
|---|
| 203 | + unsigned int i; |
|---|
| 204 | + |
|---|
| 205 | + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
|---|
| 206 | + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); |
|---|
| 207 | + u32 dispctrl; |
|---|
| 208 | + u32 dsp3_mux; |
|---|
| 209 | + |
|---|
| 210 | + if (!crtc_state->active) |
|---|
| 211 | + continue; |
|---|
| 212 | + |
|---|
| 213 | + if (vc4_state->assigned_channel != 2) |
|---|
| 214 | + continue; |
|---|
| 215 | + |
|---|
| 216 | + /* |
|---|
| 217 | + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to |
|---|
| 218 | + * FIFO X'. |
|---|
| 219 | + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. |
|---|
| 220 | + * |
|---|
| 221 | + * DSP3 is connected to FIFO2 unless the transposer is |
|---|
| 222 | + * enabled. In this case, FIFO 2 is directly accessed by the |
|---|
| 223 | + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 |
|---|
| 224 | + * route. |
|---|
| 225 | + */ |
|---|
| 226 | + if (vc4_state->feed_txp) |
|---|
| 227 | + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); |
|---|
| 228 | + else |
|---|
| 229 | + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); |
|---|
| 230 | + |
|---|
| 231 | + dispctrl = HVS_READ(SCALER_DISPCTRL) & |
|---|
| 232 | + ~SCALER_DISPCTRL_DSP3_MUX_MASK; |
|---|
| 233 | + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); |
|---|
| 234 | + } |
|---|
| 235 | +} |
|---|
| 236 | + |
|---|
| 237 | +static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, |
|---|
| 238 | + struct drm_atomic_state *state) |
|---|
| 239 | +{ |
|---|
| 240 | + struct drm_crtc_state *crtc_state; |
|---|
| 241 | + struct drm_crtc *crtc; |
|---|
| 242 | + unsigned char mux; |
|---|
| 243 | + unsigned int i; |
|---|
| 244 | + u32 reg; |
|---|
| 245 | + |
|---|
| 246 | + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
|---|
| 247 | + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); |
|---|
| 248 | + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
|---|
| 249 | + |
|---|
| 250 | + if (!vc4_state->update_muxing) |
|---|
| 251 | + continue; |
|---|
| 252 | + |
|---|
| 253 | + switch (vc4_crtc->data->hvs_output) { |
|---|
| 254 | + case 2: |
|---|
| 255 | + mux = (vc4_state->assigned_channel == 2) ? 0 : 1; |
|---|
| 256 | + reg = HVS_READ(SCALER_DISPECTRL); |
|---|
| 257 | + HVS_WRITE(SCALER_DISPECTRL, |
|---|
| 258 | + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | |
|---|
| 259 | + VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); |
|---|
| 260 | + break; |
|---|
| 261 | + |
|---|
| 262 | + case 3: |
|---|
| 263 | + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) |
|---|
| 264 | + mux = 3; |
|---|
| 265 | + else |
|---|
| 266 | + mux = vc4_state->assigned_channel; |
|---|
| 267 | + |
|---|
| 268 | + reg = HVS_READ(SCALER_DISPCTRL); |
|---|
| 269 | + HVS_WRITE(SCALER_DISPCTRL, |
|---|
| 270 | + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | |
|---|
| 271 | + VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); |
|---|
| 272 | + break; |
|---|
| 273 | + |
|---|
| 274 | + case 4: |
|---|
| 275 | + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) |
|---|
| 276 | + mux = 3; |
|---|
| 277 | + else |
|---|
| 278 | + mux = vc4_state->assigned_channel; |
|---|
| 279 | + |
|---|
| 280 | + reg = HVS_READ(SCALER_DISPEOLN); |
|---|
| 281 | + HVS_WRITE(SCALER_DISPEOLN, |
|---|
| 282 | + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | |
|---|
| 283 | + VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); |
|---|
| 284 | + |
|---|
| 285 | + break; |
|---|
| 286 | + |
|---|
| 287 | + case 5: |
|---|
| 288 | + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) |
|---|
| 289 | + mux = 3; |
|---|
| 290 | + else |
|---|
| 291 | + mux = vc4_state->assigned_channel; |
|---|
| 292 | + |
|---|
| 293 | + reg = HVS_READ(SCALER_DISPDITHER); |
|---|
| 294 | + HVS_WRITE(SCALER_DISPDITHER, |
|---|
| 295 | + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | |
|---|
| 296 | + VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); |
|---|
| 297 | + break; |
|---|
| 298 | + |
|---|
| 299 | + default: |
|---|
| 300 | + break; |
|---|
| 301 | + } |
|---|
| 302 | + } |
|---|
| 303 | +} |
|---|
| 304 | + |
|---|
| 138 | 305 | static void |
|---|
| 139 | 306 | vc4_atomic_complete_commit(struct drm_atomic_state *state) |
|---|
| 140 | 307 | { |
|---|
| 141 | 308 | struct drm_device *dev = state->dev; |
|---|
| 142 | 309 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 310 | + struct vc4_hvs *hvs = vc4->hvs; |
|---|
| 311 | + struct drm_crtc_state *new_crtc_state; |
|---|
| 312 | + struct drm_crtc *crtc; |
|---|
| 313 | + int i; |
|---|
| 314 | + |
|---|
| 315 | + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
|---|
| 316 | + struct vc4_crtc_state *vc4_crtc_state; |
|---|
| 317 | + |
|---|
| 318 | + if (!new_crtc_state->commit) |
|---|
| 319 | + continue; |
|---|
| 320 | + |
|---|
| 321 | + vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); |
|---|
| 322 | + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); |
|---|
| 323 | + } |
|---|
| 324 | + |
|---|
| 325 | + if (vc4->hvs->hvs5) |
|---|
| 326 | + clk_set_min_rate(hvs->core_clk, 500000000); |
|---|
| 143 | 327 | |
|---|
| 144 | 328 | drm_atomic_helper_wait_for_fences(dev, state, false); |
|---|
| 145 | 329 | |
|---|
| .. | .. |
|---|
| 148 | 332 | drm_atomic_helper_commit_modeset_disables(dev, state); |
|---|
| 149 | 333 | |
|---|
| 150 | 334 | vc4_ctm_commit(vc4, state); |
|---|
| 335 | + |
|---|
| 336 | + if (vc4->hvs->hvs5) |
|---|
| 337 | + vc5_hvs_pv_muxing_commit(vc4, state); |
|---|
| 338 | + else |
|---|
| 339 | + vc4_hvs_pv_muxing_commit(vc4, state); |
|---|
| 151 | 340 | |
|---|
| 152 | 341 | drm_atomic_helper_commit_planes(dev, state, 0); |
|---|
| 153 | 342 | |
|---|
| .. | .. |
|---|
| 162 | 351 | drm_atomic_helper_cleanup_planes(dev, state); |
|---|
| 163 | 352 | |
|---|
| 164 | 353 | drm_atomic_helper_commit_cleanup_done(state); |
|---|
| 354 | + |
|---|
| 355 | + if (vc4->hvs->hvs5) |
|---|
| 356 | + clk_set_min_rate(hvs->core_clk, 0); |
|---|
| 165 | 357 | |
|---|
| 166 | 358 | drm_atomic_state_put(state); |
|---|
| 167 | 359 | |
|---|
| .. | .. |
|---|
| 311 | 503 | mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; |
|---|
| 312 | 504 | } |
|---|
| 313 | 505 | |
|---|
| 314 | | - drm_gem_object_put_unlocked(gem_obj); |
|---|
| 506 | + drm_gem_object_put(gem_obj); |
|---|
| 315 | 507 | |
|---|
| 316 | 508 | mode_cmd = &mode_cmd_local; |
|---|
| 317 | 509 | } |
|---|
| .. | .. |
|---|
| 355 | 547 | |
|---|
| 356 | 548 | /* CTM is being enabled or the matrix changed. */ |
|---|
| 357 | 549 | if (new_crtc_state->ctm) { |
|---|
| 550 | + struct vc4_crtc_state *vc4_crtc_state = |
|---|
| 551 | + to_vc4_crtc_state(new_crtc_state); |
|---|
| 552 | + |
|---|
| 358 | 553 | /* fifo is 1-based since 0 disables CTM. */ |
|---|
| 359 | | - int fifo = to_vc4_crtc(crtc)->channel + 1; |
|---|
| 554 | + int fifo = vc4_crtc_state->assigned_channel + 1; |
|---|
| 360 | 555 | |
|---|
| 361 | 556 | /* Check userland isn't trying to turn on CTM for more |
|---|
| 362 | 557 | * than one CRTC at a time. |
|---|
| .. | .. |
|---|
| 387 | 582 | return 0; |
|---|
| 388 | 583 | } |
|---|
| 389 | 584 | |
|---|
| 585 | +static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) |
|---|
| 586 | +{ |
|---|
| 587 | + struct drm_plane_state *old_plane_state, *new_plane_state; |
|---|
| 588 | + struct vc4_dev *vc4 = to_vc4_dev(state->dev); |
|---|
| 589 | + struct vc4_load_tracker_state *load_state; |
|---|
| 590 | + struct drm_private_state *priv_state; |
|---|
| 591 | + struct drm_plane *plane; |
|---|
| 592 | + int i; |
|---|
| 593 | + |
|---|
| 594 | + if (!vc4->load_tracker_available) |
|---|
| 595 | + return 0; |
|---|
| 596 | + |
|---|
| 597 | + priv_state = drm_atomic_get_private_obj_state(state, |
|---|
| 598 | + &vc4->load_tracker); |
|---|
| 599 | + if (IS_ERR(priv_state)) |
|---|
| 600 | + return PTR_ERR(priv_state); |
|---|
| 601 | + |
|---|
| 602 | + load_state = to_vc4_load_tracker_state(priv_state); |
|---|
| 603 | + for_each_oldnew_plane_in_state(state, plane, old_plane_state, |
|---|
| 604 | + new_plane_state, i) { |
|---|
| 605 | + struct vc4_plane_state *vc4_plane_state; |
|---|
| 606 | + |
|---|
| 607 | + if (old_plane_state->fb && old_plane_state->crtc) { |
|---|
| 608 | + vc4_plane_state = to_vc4_plane_state(old_plane_state); |
|---|
| 609 | + load_state->membus_load -= vc4_plane_state->membus_load; |
|---|
| 610 | + load_state->hvs_load -= vc4_plane_state->hvs_load; |
|---|
| 611 | + } |
|---|
| 612 | + |
|---|
| 613 | + if (new_plane_state->fb && new_plane_state->crtc) { |
|---|
| 614 | + vc4_plane_state = to_vc4_plane_state(new_plane_state); |
|---|
| 615 | + load_state->membus_load += vc4_plane_state->membus_load; |
|---|
| 616 | + load_state->hvs_load += vc4_plane_state->hvs_load; |
|---|
| 617 | + } |
|---|
| 618 | + } |
|---|
| 619 | + |
|---|
| 620 | + /* Don't check the load when the tracker is disabled. */ |
|---|
| 621 | + if (!vc4->load_tracker_enabled) |
|---|
| 622 | + return 0; |
|---|
| 623 | + |
|---|
| 624 | + /* The absolute limit is 2Gbyte/sec, but let's take a margin to let |
|---|
| 625 | + * the system work when other blocks are accessing the memory. |
|---|
| 626 | + */ |
|---|
| 627 | + if (load_state->membus_load > SZ_1G + SZ_512M) |
|---|
| 628 | + return -ENOSPC; |
|---|
| 629 | + |
|---|
| 630 | + /* HVS clock is supposed to run @ 250Mhz, let's take a margin and |
|---|
| 631 | + * consider the maximum number of cycles is 240M. |
|---|
| 632 | + */ |
|---|
| 633 | + if (load_state->hvs_load > 240000000ULL) |
|---|
| 634 | + return -ENOSPC; |
|---|
| 635 | + |
|---|
| 636 | + return 0; |
|---|
| 637 | +} |
|---|
| 638 | + |
|---|
| 639 | +static struct drm_private_state * |
|---|
| 640 | +vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) |
|---|
| 641 | +{ |
|---|
| 642 | + struct vc4_load_tracker_state *state; |
|---|
| 643 | + |
|---|
| 644 | + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); |
|---|
| 645 | + if (!state) |
|---|
| 646 | + return NULL; |
|---|
| 647 | + |
|---|
| 648 | + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); |
|---|
| 649 | + |
|---|
| 650 | + return &state->base; |
|---|
| 651 | +} |
|---|
| 652 | + |
|---|
| 653 | +static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, |
|---|
| 654 | + struct drm_private_state *state) |
|---|
| 655 | +{ |
|---|
| 656 | + struct vc4_load_tracker_state *load_state; |
|---|
| 657 | + |
|---|
| 658 | + load_state = to_vc4_load_tracker_state(state); |
|---|
| 659 | + kfree(load_state); |
|---|
| 660 | +} |
|---|
| 661 | + |
|---|
| 662 | +static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { |
|---|
| 663 | + .atomic_duplicate_state = vc4_load_tracker_duplicate_state, |
|---|
| 664 | + .atomic_destroy_state = vc4_load_tracker_destroy_state, |
|---|
| 665 | +}; |
|---|
| 666 | + |
|---|
| 667 | +static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) |
|---|
| 668 | +{ |
|---|
| 669 | + struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 670 | + |
|---|
| 671 | + if (!vc4->load_tracker_available) |
|---|
| 672 | + return; |
|---|
| 673 | + |
|---|
| 674 | + drm_atomic_private_obj_fini(&vc4->load_tracker); |
|---|
| 675 | +} |
|---|
| 676 | + |
|---|
| 677 | +static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) |
|---|
| 678 | +{ |
|---|
| 679 | + struct vc4_load_tracker_state *load_state; |
|---|
| 680 | + |
|---|
| 681 | + if (!vc4->load_tracker_available) |
|---|
| 682 | + return 0; |
|---|
| 683 | + |
|---|
| 684 | + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); |
|---|
| 685 | + if (!load_state) |
|---|
| 686 | + return -ENOMEM; |
|---|
| 687 | + |
|---|
| 688 | + drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, |
|---|
| 689 | + &load_state->base, |
|---|
| 690 | + &vc4_load_tracker_state_funcs); |
|---|
| 691 | + |
|---|
| 692 | + return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); |
|---|
| 693 | +} |
|---|
| 694 | + |
|---|
| 695 | +static struct drm_private_state * |
|---|
| 696 | +vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) |
|---|
| 697 | +{ |
|---|
| 698 | + struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); |
|---|
| 699 | + struct vc4_hvs_state *state; |
|---|
| 700 | + |
|---|
| 701 | + state = kzalloc(sizeof(*state), GFP_KERNEL); |
|---|
| 702 | + if (!state) |
|---|
| 703 | + return NULL; |
|---|
| 704 | + |
|---|
| 705 | + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); |
|---|
| 706 | + |
|---|
| 707 | + state->unassigned_channels = old_state->unassigned_channels; |
|---|
| 708 | + |
|---|
| 709 | + return &state->base; |
|---|
| 710 | +} |
|---|
| 711 | + |
|---|
| 712 | +static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, |
|---|
| 713 | + struct drm_private_state *state) |
|---|
| 714 | +{ |
|---|
| 715 | + struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); |
|---|
| 716 | + |
|---|
| 717 | + kfree(hvs_state); |
|---|
| 718 | +} |
|---|
| 719 | + |
|---|
| 720 | +static const struct drm_private_state_funcs vc4_hvs_state_funcs = { |
|---|
| 721 | + .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, |
|---|
| 722 | + .atomic_destroy_state = vc4_hvs_channels_destroy_state, |
|---|
| 723 | +}; |
|---|
| 724 | + |
|---|
| 725 | +static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) |
|---|
| 726 | +{ |
|---|
| 727 | + struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 728 | + |
|---|
| 729 | + drm_atomic_private_obj_fini(&vc4->hvs_channels); |
|---|
| 730 | +} |
|---|
| 731 | + |
|---|
| 732 | +static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) |
|---|
| 733 | +{ |
|---|
| 734 | + struct vc4_hvs_state *state; |
|---|
| 735 | + |
|---|
| 736 | + state = kzalloc(sizeof(*state), GFP_KERNEL); |
|---|
| 737 | + if (!state) |
|---|
| 738 | + return -ENOMEM; |
|---|
| 739 | + |
|---|
| 740 | + state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0); |
|---|
| 741 | + drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, |
|---|
| 742 | + &state->base, |
|---|
| 743 | + &vc4_hvs_state_funcs); |
|---|
| 744 | + |
|---|
| 745 | + return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); |
|---|
| 746 | +} |
|---|
| 747 | + |
|---|
| 748 | +/* |
|---|
| 749 | + * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and |
|---|
| 750 | + * the TXP (and therefore all the CRTCs found on that platform). |
|---|
| 751 | + * |
|---|
| 752 | + * The naive (and our initial) implementation would just iterate over |
|---|
| 753 | + * all the active CRTCs, try to find a suitable FIFO, and then remove it |
|---|
| 754 | + * from the pool of available FIFOs. However, there are a few corner |
|---|
| 755 | + * cases that need to be considered: |
|---|
| 756 | + * |
|---|
| 757 | + * - When running in a dual-display setup (so with two CRTCs involved), |
|---|
| 758 | + * we can update the state of a single CRTC (for example by changing |
|---|
| 759 | + * its mode using xrandr under X11) without affecting the other. In |
|---|
| 760 | + * this case, the other CRTC wouldn't be in the state at all, so we |
|---|
| 761 | + * need to consider all the running CRTCs in the DRM device to assign |
|---|
| 762 | + * a FIFO, not just the one in the state. |
|---|
| 763 | + * |
|---|
| 764 | + * - To fix the above, we can't use drm_atomic_get_crtc_state on all |
|---|
| 765 | + * enabled CRTCs to pull their CRTC state into the global state, since |
|---|
| 766 | + * a page flip would start considering their vblank to complete. Since |
|---|
| 767 | + * we don't have a guarantee that they are actually active, that |
|---|
| 768 | + * vblank might never happen, and shouldn't even be considered if we |
|---|
| 769 | + * want to do a page flip on a single CRTC. That can be tested by |
|---|
| 770 | + * doing a modetest -v first on HDMI1 and then on HDMI0. |
|---|
| 771 | + * |
|---|
| 772 | + * - Since we need the pixelvalve to be disabled and enabled back when |
|---|
| 773 | + * the FIFO is changed, we should keep the FIFO assigned for as long |
|---|
| 774 | + * as the CRTC is enabled, only considering it free again once that |
|---|
| 775 | + * CRTC has been disabled. This can be tested by booting X11 on a |
|---|
| 776 | + * single display, and changing the resolution down and then back up. |
|---|
| 777 | + */ |
|---|
| 778 | +static int vc4_pv_muxing_atomic_check(struct drm_device *dev, |
|---|
| 779 | + struct drm_atomic_state *state) |
|---|
| 780 | +{ |
|---|
| 781 | + struct vc4_hvs_state *hvs_new_state; |
|---|
| 782 | + struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
|---|
| 783 | + struct drm_crtc *crtc; |
|---|
| 784 | + unsigned int i; |
|---|
| 785 | + |
|---|
| 786 | + hvs_new_state = vc4_hvs_get_global_state(state); |
|---|
| 787 | + if (!hvs_new_state) |
|---|
| 788 | + return -EINVAL; |
|---|
| 789 | + |
|---|
| 790 | + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
|---|
| 791 | + struct vc4_crtc_state *old_vc4_crtc_state = |
|---|
| 792 | + to_vc4_crtc_state(old_crtc_state); |
|---|
| 793 | + struct vc4_crtc_state *new_vc4_crtc_state = |
|---|
| 794 | + to_vc4_crtc_state(new_crtc_state); |
|---|
| 795 | + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
|---|
| 796 | + unsigned int matching_channels; |
|---|
| 797 | + |
|---|
| 798 | + /* Nothing to do here, let's skip it */ |
|---|
| 799 | + if (old_crtc_state->enable == new_crtc_state->enable) |
|---|
| 800 | + continue; |
|---|
| 801 | + |
|---|
| 802 | + /* Muxing will need to be modified, mark it as such */ |
|---|
| 803 | + new_vc4_crtc_state->update_muxing = true; |
|---|
| 804 | + |
|---|
| 805 | + /* If we're disabling our CRTC, we put back our channel */ |
|---|
| 806 | + if (!new_crtc_state->enable) { |
|---|
| 807 | + hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel); |
|---|
| 808 | + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; |
|---|
| 809 | + continue; |
|---|
| 810 | + } |
|---|
| 811 | + |
|---|
| 812 | + /* |
|---|
| 813 | + * The problem we have to solve here is that we have |
|---|
| 814 | + * up to 7 encoders, connected to up to 6 CRTCs. |
|---|
| 815 | + * |
|---|
| 816 | + * Those CRTCs, depending on the instance, can be |
|---|
| 817 | + * routed to 1, 2 or 3 HVS FIFOs, and we need to set |
|---|
| 818 | + * the change the muxing between FIFOs and outputs in |
|---|
| 819 | + * the HVS accordingly. |
|---|
| 820 | + * |
|---|
| 821 | + * It would be pretty hard to come up with an |
|---|
| 822 | + * algorithm that would generically solve |
|---|
| 823 | + * this. However, the current routing trees we support |
|---|
| 824 | + * allow us to simplify a bit the problem. |
|---|
| 825 | + * |
|---|
| 826 | + * Indeed, with the current supported layouts, if we |
|---|
| 827 | + * try to assign in the ascending crtc index order the |
|---|
| 828 | + * FIFOs, we can't fall into the situation where an |
|---|
| 829 | + * earlier CRTC that had multiple routes is assigned |
|---|
| 830 | + * one that was the only option for a later CRTC. |
|---|
| 831 | + * |
|---|
| 832 | + * If the layout changes and doesn't give us that in |
|---|
| 833 | + * the future, we will need to have something smarter, |
|---|
| 834 | + * but it works so far. |
|---|
| 835 | + */ |
|---|
| 836 | + matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels; |
|---|
| 837 | + if (matching_channels) { |
|---|
| 838 | + unsigned int channel = ffs(matching_channels) - 1; |
|---|
| 839 | + |
|---|
| 840 | + new_vc4_crtc_state->assigned_channel = channel; |
|---|
| 841 | + hvs_new_state->unassigned_channels &= ~BIT(channel); |
|---|
| 842 | + } else { |
|---|
| 843 | + return -EINVAL; |
|---|
| 844 | + } |
|---|
| 845 | + } |
|---|
| 846 | + |
|---|
| 847 | + return 0; |
|---|
| 848 | +} |
|---|
| 849 | + |
|---|
| 390 | 850 | static int |
|---|
| 391 | 851 | vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) |
|---|
| 392 | 852 | { |
|---|
| 393 | 853 | int ret; |
|---|
| 394 | 854 | |
|---|
| 855 | + ret = vc4_pv_muxing_atomic_check(dev, state); |
|---|
| 856 | + if (ret) |
|---|
| 857 | + return ret; |
|---|
| 858 | + |
|---|
| 395 | 859 | ret = vc4_ctm_atomic_check(dev, state); |
|---|
| 396 | 860 | if (ret < 0) |
|---|
| 397 | 861 | return ret; |
|---|
| 398 | 862 | |
|---|
| 399 | | - return drm_atomic_helper_check(dev, state); |
|---|
| 863 | + ret = drm_atomic_helper_check(dev, state); |
|---|
| 864 | + if (ret) |
|---|
| 865 | + return ret; |
|---|
| 866 | + |
|---|
| 867 | + return vc4_load_tracker_atomic_check(state); |
|---|
| 400 | 868 | } |
|---|
| 401 | 869 | |
|---|
| 402 | 870 | static const struct drm_mode_config_funcs vc4_mode_funcs = { |
|---|
| 403 | | - .output_poll_changed = drm_fb_helper_output_poll_changed, |
|---|
| 404 | 871 | .atomic_check = vc4_atomic_check, |
|---|
| 405 | 872 | .atomic_commit = vc4_atomic_commit, |
|---|
| 406 | 873 | .fb_create = vc4_fb_create, |
|---|
| .. | .. |
|---|
| 409 | 876 | int vc4_kms_load(struct drm_device *dev) |
|---|
| 410 | 877 | { |
|---|
| 411 | 878 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 412 | | - struct vc4_ctm_state *ctm_state; |
|---|
| 879 | + bool is_vc5 = of_device_is_compatible(dev->dev->of_node, |
|---|
| 880 | + "brcm,bcm2711-vc5"); |
|---|
| 413 | 881 | int ret; |
|---|
| 882 | + |
|---|
| 883 | + if (!is_vc5) { |
|---|
| 884 | + vc4->load_tracker_available = true; |
|---|
| 885 | + |
|---|
| 886 | + /* Start with the load tracker enabled. Can be |
|---|
| 887 | + * disabled through the debugfs load_tracker file. |
|---|
| 888 | + */ |
|---|
| 889 | + vc4->load_tracker_enabled = true; |
|---|
| 890 | + } |
|---|
| 414 | 891 | |
|---|
| 415 | 892 | sema_init(&vc4->async_modeset, 1); |
|---|
| 416 | 893 | |
|---|
| 417 | 894 | /* Set support for vblank irq fast disable, before drm_vblank_init() */ |
|---|
| 418 | 895 | dev->vblank_disable_immediate = true; |
|---|
| 419 | 896 | |
|---|
| 897 | + dev->irq_enabled = true; |
|---|
| 420 | 898 | ret = drm_vblank_init(dev, dev->mode_config.num_crtc); |
|---|
| 421 | 899 | if (ret < 0) { |
|---|
| 422 | 900 | dev_err(dev->dev, "failed to initialize vblank\n"); |
|---|
| 423 | 901 | return ret; |
|---|
| 424 | 902 | } |
|---|
| 425 | 903 | |
|---|
| 426 | | - dev->mode_config.max_width = 2048; |
|---|
| 427 | | - dev->mode_config.max_height = 2048; |
|---|
| 904 | + if (is_vc5) { |
|---|
| 905 | + dev->mode_config.max_width = 7680; |
|---|
| 906 | + dev->mode_config.max_height = 7680; |
|---|
| 907 | + } else { |
|---|
| 908 | + dev->mode_config.max_width = 2048; |
|---|
| 909 | + dev->mode_config.max_height = 2048; |
|---|
| 910 | + } |
|---|
| 911 | + |
|---|
| 428 | 912 | dev->mode_config.funcs = &vc4_mode_funcs; |
|---|
| 429 | 913 | dev->mode_config.preferred_depth = 24; |
|---|
| 430 | 914 | dev->mode_config.async_page_flip = true; |
|---|
| 431 | 915 | dev->mode_config.allow_fb_modifiers = true; |
|---|
| 432 | 916 | |
|---|
| 433 | | - drm_modeset_lock_init(&vc4->ctm_state_lock); |
|---|
| 917 | + ret = vc4_ctm_obj_init(vc4); |
|---|
| 918 | + if (ret) |
|---|
| 919 | + return ret; |
|---|
| 434 | 920 | |
|---|
| 435 | | - ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); |
|---|
| 436 | | - if (!ctm_state) |
|---|
| 437 | | - return -ENOMEM; |
|---|
| 438 | | - drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base, |
|---|
| 439 | | - &vc4_ctm_state_funcs); |
|---|
| 921 | + ret = vc4_load_tracker_obj_init(vc4); |
|---|
| 922 | + if (ret) |
|---|
| 923 | + return ret; |
|---|
| 924 | + |
|---|
| 925 | + ret = vc4_hvs_channels_obj_init(vc4); |
|---|
| 926 | + if (ret) |
|---|
| 927 | + return ret; |
|---|
| 440 | 928 | |
|---|
| 441 | 929 | drm_mode_config_reset(dev); |
|---|
| 442 | | - |
|---|
| 443 | | - if (dev->mode_config.num_connector) |
|---|
| 444 | | - drm_fb_cma_fbdev_init(dev, 32, 0); |
|---|
| 445 | 930 | |
|---|
| 446 | 931 | drm_kms_helper_poll_init(dev); |
|---|
| 447 | 932 | |
|---|