.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
20 | 20 | */ |
---|
21 | 21 | |
---|
22 | 22 | #include <mali_kbase.h> |
---|
| 23 | +#include <mali_kbase_config_defaults.h> |
---|
23 | 24 | #include "backend/gpu/mali_kbase_clk_rate_trace_mgr.h" |
---|
24 | 25 | #include "mali_kbase_csf_ipa_control.h" |
---|
25 | 26 | |
---|
.. | .. |
---|
27 | 28 | * Status flags from the STATUS register of the IPA Control interface. |
---|
28 | 29 | */ |
---|
29 | 30 | #define STATUS_COMMAND_ACTIVE ((u32)1 << 0) |
---|
30 | | -#define STATUS_TIMER_ACTIVE ((u32)1 << 1) |
---|
31 | | -#define STATUS_AUTO_ACTIVE ((u32)1 << 2) |
---|
32 | 31 | #define STATUS_PROTECTED_MODE ((u32)1 << 8) |
---|
33 | 32 | #define STATUS_RESET ((u32)1 << 9) |
---|
34 | 33 | #define STATUS_TIMER_ENABLED ((u32)1 << 31) |
---|
.. | .. |
---|
36 | 35 | /* |
---|
37 | 36 | * Commands for the COMMAND register of the IPA Control interface. |
---|
38 | 37 | */ |
---|
39 | | -#define COMMAND_NOP ((u32)0) |
---|
40 | 38 | #define COMMAND_APPLY ((u32)1) |
---|
41 | | -#define COMMAND_CLEAR ((u32)2) |
---|
42 | 39 | #define COMMAND_SAMPLE ((u32)3) |
---|
43 | 40 | #define COMMAND_PROTECTED_ACK ((u32)4) |
---|
44 | 41 | #define COMMAND_RESET_ACK ((u32)5) |
---|
45 | 42 | |
---|
46 | | -/** |
---|
47 | | - * Default value for the TIMER register of the IPA Control interface, |
---|
48 | | - * expressed in milliseconds. |
---|
49 | | - * |
---|
50 | | - * The chosen value is a trade off between two requirements: the IPA Control |
---|
51 | | - * interface should sample counters with a resolution in the order of |
---|
52 | | - * milliseconds, while keeping GPU overhead as limited as possible. |
---|
53 | | - */ |
---|
54 | | -#define TIMER_DEFAULT_VALUE_MS ((u32)10) /* 10 milliseconds */ |
---|
55 | | - |
---|
56 | | -/** |
---|
| 43 | +/* |
---|
57 | 44 | * Number of timer events per second. |
---|
58 | 45 | */ |
---|
59 | | -#define TIMER_EVENTS_PER_SECOND ((u32)1000 / TIMER_DEFAULT_VALUE_MS) |
---|
| 46 | +#define TIMER_EVENTS_PER_SECOND ((u32)1000 / IPA_CONTROL_TIMER_DEFAULT_VALUE_MS) |
---|
60 | 47 | |
---|
61 | | -/** |
---|
| 48 | +/* |
---|
62 | 49 | * Maximum number of loops polling the GPU before we assume the GPU has hung. |
---|
63 | 50 | */ |
---|
64 | | -#define IPA_INACTIVE_MAX_LOOPS ((unsigned int)8000000) |
---|
| 51 | +#define IPA_INACTIVE_MAX_LOOPS (8000000U) |
---|
65 | 52 | |
---|
66 | | -/** |
---|
| 53 | +/* |
---|
67 | 54 | * Number of bits used to configure a performance counter in SELECT registers. |
---|
68 | 55 | */ |
---|
69 | 56 | #define IPA_CONTROL_SELECT_BITS_PER_CNT ((u64)8) |
---|
70 | 57 | |
---|
71 | | -/** |
---|
| 58 | +/* |
---|
72 | 59 | * Maximum value of a performance counter. |
---|
73 | 60 | */ |
---|
74 | 61 | #define MAX_PRFCNT_VALUE (((u64)1 << 48) - 1) |
---|
.. | .. |
---|
146 | 133 | |
---|
147 | 134 | ret = wait_status(kbdev, STATUS_COMMAND_ACTIVE); |
---|
148 | 135 | |
---|
149 | | - if (!ret) |
---|
| 136 | + if (!ret) { |
---|
150 | 137 | kbase_reg_write(kbdev, IPA_CONTROL_REG(COMMAND), COMMAND_APPLY); |
---|
| 138 | + ret = wait_status(kbdev, STATUS_COMMAND_ACTIVE); |
---|
| 139 | + } else { |
---|
| 140 | + dev_err(kbdev->dev, "Wait for the pending command failed"); |
---|
| 141 | + } |
---|
151 | 142 | |
---|
152 | 143 | return ret; |
---|
153 | 144 | } |
---|
.. | .. |
---|
215 | 206 | } |
---|
216 | 207 | } |
---|
217 | 208 | |
---|
| 209 | +static int update_select_registers(struct kbase_device *kbdev) |
---|
| 210 | +{ |
---|
| 211 | + u64 select_config[KBASE_IPA_CORE_TYPE_NUM]; |
---|
| 212 | + |
---|
| 213 | + lockdep_assert_held(&kbdev->csf.ipa_control.lock); |
---|
| 214 | + |
---|
| 215 | + build_select_config(&kbdev->csf.ipa_control, select_config); |
---|
| 216 | + |
---|
| 217 | + return apply_select_config(kbdev, select_config); |
---|
| 218 | +} |
---|
| 219 | + |
---|
218 | 220 | static inline void calc_prfcnt_delta(struct kbase_device *kbdev, |
---|
219 | 221 | struct kbase_ipa_control_prfcnt *prfcnt, |
---|
220 | 222 | bool gpu_ready) |
---|
.. | .. |
---|
236 | 238 | |
---|
237 | 239 | delta_value *= prfcnt->scaling_factor; |
---|
238 | 240 | |
---|
239 | | - if (!WARN_ON_ONCE(kbdev->csf.ipa_control.cur_gpu_rate == 0)) |
---|
240 | | - if (prfcnt->gpu_norm) |
---|
241 | | - delta_value /= kbdev->csf.ipa_control.cur_gpu_rate; |
---|
| 241 | + if (kbdev->csf.ipa_control.cur_gpu_rate == 0) { |
---|
| 242 | + static bool warned; |
---|
| 243 | + |
---|
| 244 | + if (!warned) { |
---|
| 245 | + dev_warn(kbdev->dev, "%s: GPU freq is unexpectedly 0", __func__); |
---|
| 246 | + warned = true; |
---|
| 247 | + } |
---|
| 248 | + } else if (prfcnt->gpu_norm) |
---|
| 249 | + delta_value = div_u64(delta_value, kbdev->csf.ipa_control.cur_gpu_rate); |
---|
242 | 250 | |
---|
243 | 251 | prfcnt->latest_raw_value = raw_value; |
---|
244 | 252 | |
---|
.. | .. |
---|
285 | 293 | /* Interrupts are already disabled and interrupt state is also saved */ |
---|
286 | 294 | spin_lock(&ipa_ctrl->lock); |
---|
287 | 295 | |
---|
288 | | - for (i = 0; i < ipa_ctrl->num_active_sessions; i++) { |
---|
289 | | - size_t j; |
---|
| 296 | + for (i = 0; i < KBASE_IPA_CONTROL_MAX_SESSIONS; i++) { |
---|
290 | 297 | struct kbase_ipa_control_session *session = &ipa_ctrl->sessions[i]; |
---|
291 | 298 | |
---|
292 | | - for (j = 0; j < session->num_prfcnts; j++) { |
---|
293 | | - struct kbase_ipa_control_prfcnt *prfcnt = |
---|
294 | | - &session->prfcnts[j]; |
---|
| 299 | + if (session->active) { |
---|
| 300 | + size_t j; |
---|
295 | 301 | |
---|
296 | | - if (prfcnt->gpu_norm) |
---|
297 | | - calc_prfcnt_delta(kbdev, prfcnt, true); |
---|
298 | | - } |
---|
| 302 | + for (j = 0; j < session->num_prfcnts; j++) { |
---|
| 303 | + struct kbase_ipa_control_prfcnt *prfcnt = |
---|
| 304 | + &session->prfcnts[j]; |
---|
| 305 | + |
---|
| 306 | + if (prfcnt->gpu_norm) |
---|
| 307 | + calc_prfcnt_delta(kbdev, prfcnt, true); |
---|
| 308 | + } |
---|
| 309 | + } |
---|
299 | 310 | } |
---|
300 | 311 | |
---|
301 | 312 | ipa_ctrl->cur_gpu_rate = clk_rate_hz; |
---|
.. | .. |
---|
332 | 343 | |
---|
333 | 344 | spin_lock_init(&ipa_ctrl->lock); |
---|
334 | 345 | ipa_ctrl->num_active_sessions = 0; |
---|
335 | | - for (i = 0; i < KBASE_IPA_CONTROL_MAX_SESSIONS; i++) { |
---|
| 346 | + for (i = 0; i < KBASE_IPA_CONTROL_MAX_SESSIONS; i++) |
---|
336 | 347 | ipa_ctrl->sessions[i].active = false; |
---|
337 | | - } |
---|
338 | 348 | |
---|
339 | 349 | listener_data = kmalloc(sizeof(struct kbase_ipa_control_listener_data), |
---|
340 | 350 | GFP_KERNEL); |
---|
.. | .. |
---|
377 | 387 | } |
---|
378 | 388 | KBASE_EXPORT_TEST_API(kbase_ipa_control_term); |
---|
379 | 389 | |
---|
| 390 | +/** session_read_raw_values - Read latest raw values for a sessions |
---|
| 391 | + * @kbdev: Pointer to kbase device. |
---|
| 392 | + * @session: Pointer to the session whose performance counters shall be read. |
---|
| 393 | + * |
---|
| 394 | + * Read and update the latest raw values of all the performance counters |
---|
| 395 | + * belonging to a given session. |
---|
| 396 | + */ |
---|
| 397 | +static void session_read_raw_values(struct kbase_device *kbdev, |
---|
| 398 | + struct kbase_ipa_control_session *session) |
---|
| 399 | +{ |
---|
| 400 | + size_t i; |
---|
| 401 | + |
---|
| 402 | + lockdep_assert_held(&kbdev->csf.ipa_control.lock); |
---|
| 403 | + |
---|
| 404 | + for (i = 0; i < session->num_prfcnts; i++) { |
---|
| 405 | + struct kbase_ipa_control_prfcnt *prfcnt = &session->prfcnts[i]; |
---|
| 406 | + u64 raw_value = read_value_cnt(kbdev, (u8)prfcnt->type, |
---|
| 407 | + prfcnt->select_idx); |
---|
| 408 | + |
---|
| 409 | + prfcnt->latest_raw_value = raw_value; |
---|
| 410 | + } |
---|
| 411 | +} |
---|
| 412 | + |
---|
| 413 | +/** session_gpu_start - Start one or all sessions |
---|
| 414 | + * @kbdev: Pointer to kbase device. |
---|
| 415 | + * @ipa_ctrl: Pointer to IPA_CONTROL descriptor. |
---|
| 416 | + * @session: Pointer to the session to initialize, or NULL to initialize |
---|
| 417 | + * all sessions. |
---|
| 418 | + * |
---|
| 419 | + * This function starts one or all sessions by capturing a manual sample, |
---|
| 420 | + * reading the latest raw value of performance counters and possibly enabling |
---|
| 421 | + * the timer for automatic sampling if necessary. |
---|
| 422 | + * |
---|
| 423 | + * If a single session is given, it is assumed to be active, regardless of |
---|
| 424 | + * the number of active sessions. The number of performance counters belonging |
---|
| 425 | + * to the session shall be set in advance. |
---|
| 426 | + * |
---|
| 427 | + * If no session is given, the function shall start all sessions. |
---|
| 428 | + * The function does nothing if there are no active sessions. |
---|
| 429 | + * |
---|
| 430 | + * Return: 0 on success, or error code on failure. |
---|
| 431 | + */ |
---|
| 432 | +static int session_gpu_start(struct kbase_device *kbdev, |
---|
| 433 | + struct kbase_ipa_control *ipa_ctrl, |
---|
| 434 | + struct kbase_ipa_control_session *session) |
---|
| 435 | +{ |
---|
| 436 | + bool first_start = |
---|
| 437 | + (session != NULL) && (ipa_ctrl->num_active_sessions == 0); |
---|
| 438 | + int ret = 0; |
---|
| 439 | + |
---|
| 440 | + lockdep_assert_held(&kbdev->csf.ipa_control.lock); |
---|
| 441 | + |
---|
| 442 | + /* |
---|
| 443 | + * Exit immediately if the caller intends to start all sessions |
---|
| 444 | + * but there are no active sessions. It's important that no operation |
---|
| 445 | + * is done on the IPA_CONTROL interface in that case. |
---|
| 446 | + */ |
---|
| 447 | + if (!session && ipa_ctrl->num_active_sessions == 0) |
---|
| 448 | + return ret; |
---|
| 449 | + |
---|
| 450 | + /* |
---|
| 451 | + * Take a manual sample unconditionally if the caller intends |
---|
| 452 | + * to start all sessions. Otherwise, only take a manual sample |
---|
| 453 | + * if this is the first session to be initialized, for accumulator |
---|
| 454 | + * registers are empty and no timer has been configured for automatic |
---|
| 455 | + * sampling. |
---|
| 456 | + */ |
---|
| 457 | + if (!session || first_start) { |
---|
| 458 | + kbase_reg_write(kbdev, IPA_CONTROL_REG(COMMAND), |
---|
| 459 | + COMMAND_SAMPLE); |
---|
| 460 | + ret = wait_status(kbdev, STATUS_COMMAND_ACTIVE); |
---|
| 461 | + if (ret) |
---|
| 462 | + dev_err(kbdev->dev, "%s: failed to sample new counters", |
---|
| 463 | + __func__); |
---|
| 464 | + kbase_reg_write(kbdev, IPA_CONTROL_REG(TIMER), |
---|
| 465 | + timer_value(ipa_ctrl->cur_gpu_rate)); |
---|
| 466 | + } |
---|
| 467 | + |
---|
| 468 | + /* |
---|
| 469 | + * Read current raw value to start the session. |
---|
| 470 | + * This is necessary to put the first query in condition |
---|
| 471 | + * to generate a correct value by calculating the difference |
---|
| 472 | + * from the beginning of the session. This consideration |
---|
| 473 | + * is true regardless of the number of sessions the caller |
---|
| 474 | + * intends to start. |
---|
| 475 | + */ |
---|
| 476 | + if (!ret) { |
---|
| 477 | + if (session) { |
---|
| 478 | + /* On starting a session, value read is required for |
---|
| 479 | + * IPA power model's calculation initialization. |
---|
| 480 | + */ |
---|
| 481 | + session_read_raw_values(kbdev, session); |
---|
| 482 | + } else { |
---|
| 483 | + size_t session_idx; |
---|
| 484 | + |
---|
| 485 | + for (session_idx = 0; |
---|
| 486 | + session_idx < KBASE_IPA_CONTROL_MAX_SESSIONS; |
---|
| 487 | + session_idx++) { |
---|
| 488 | + struct kbase_ipa_control_session *session_to_check = &ipa_ctrl->sessions[session_idx]; |
---|
| 489 | + |
---|
| 490 | + if (session_to_check->active) |
---|
| 491 | + session_read_raw_values(kbdev, session_to_check); |
---|
| 492 | + } |
---|
| 493 | + } |
---|
| 494 | + } |
---|
| 495 | + |
---|
| 496 | + return ret; |
---|
| 497 | +} |
---|
| 498 | + |
---|
380 | 499 | int kbase_ipa_control_register( |
---|
381 | 500 | struct kbase_device *kbdev, |
---|
382 | 501 | const struct kbase_ipa_control_perf_counter *perf_counters, |
---|
.. | .. |
---|
390 | 509 | struct kbase_ipa_control_session *session = NULL; |
---|
391 | 510 | unsigned long flags; |
---|
392 | 511 | |
---|
393 | | - if (WARN_ON(kbdev == NULL) || WARN_ON(perf_counters == NULL) || |
---|
394 | | - WARN_ON(client == NULL) || |
---|
| 512 | + if (WARN_ON(unlikely(kbdev == NULL))) |
---|
| 513 | + return -ENODEV; |
---|
| 514 | + |
---|
| 515 | + if (WARN_ON(perf_counters == NULL) || WARN_ON(client == NULL) || |
---|
395 | 516 | WARN_ON(num_counters > KBASE_IPA_CONTROL_MAX_COUNTERS)) { |
---|
396 | 517 | dev_err(kbdev->dev, "%s: wrong input arguments", __func__); |
---|
397 | 518 | return -EINVAL; |
---|
.. | .. |
---|
468 | 589 | */ |
---|
469 | 590 | for (session_idx = 0; session_idx < KBASE_IPA_CONTROL_MAX_SESSIONS; |
---|
470 | 591 | session_idx++) { |
---|
471 | | - session = &ipa_ctrl->sessions[session_idx]; |
---|
472 | | - if (!session->active) |
---|
| 592 | + if (!ipa_ctrl->sessions[session_idx].active) { |
---|
| 593 | + session = &ipa_ctrl->sessions[session_idx]; |
---|
473 | 594 | break; |
---|
| 595 | + } |
---|
474 | 596 | } |
---|
475 | 597 | |
---|
476 | 598 | if (!session) { |
---|
.. | .. |
---|
525 | 647 | /* Reports to this client for GPU time spent in protected mode |
---|
526 | 648 | * should begin from the point of registration. |
---|
527 | 649 | */ |
---|
528 | | - session->last_query_time = ktime_get_ns(); |
---|
| 650 | + session->last_query_time = ktime_get_raw_ns(); |
---|
529 | 651 | |
---|
530 | 652 | /* Initially, no time has been spent in protected mode */ |
---|
531 | 653 | session->protm_time = 0; |
---|
.. | .. |
---|
539 | 661 | * before applying the new configuration. |
---|
540 | 662 | */ |
---|
541 | 663 | if (new_config) { |
---|
542 | | - u64 select_config[KBASE_IPA_CORE_TYPE_NUM]; |
---|
543 | | - |
---|
544 | | - build_select_config(ipa_ctrl, select_config); |
---|
545 | | - ret = apply_select_config(kbdev, select_config); |
---|
| 664 | + ret = update_select_registers(kbdev); |
---|
546 | 665 | if (ret) |
---|
547 | 666 | dev_err(kbdev->dev, |
---|
548 | | - "%s: failed to apply SELECT configuration", |
---|
| 667 | + "%s: failed to apply new SELECT configuration", |
---|
549 | 668 | __func__); |
---|
550 | 669 | } |
---|
551 | 670 | |
---|
552 | 671 | if (!ret) { |
---|
553 | | - /* Accumulator registers don't contain any sample if the timer |
---|
554 | | - * has not been enabled first. Take a sample manually before |
---|
555 | | - * enabling the timer. |
---|
556 | | - */ |
---|
557 | | - if (ipa_ctrl->num_active_sessions == 0) { |
---|
558 | | - kbase_reg_write(kbdev, IPA_CONTROL_REG(COMMAND), |
---|
559 | | - COMMAND_SAMPLE); |
---|
560 | | - ret = wait_status(kbdev, STATUS_COMMAND_ACTIVE); |
---|
561 | | - if (!ret) { |
---|
562 | | - kbase_reg_write( |
---|
563 | | - kbdev, IPA_CONTROL_REG(TIMER), |
---|
564 | | - timer_value(ipa_ctrl->cur_gpu_rate)); |
---|
565 | | - } else { |
---|
566 | | - dev_err(kbdev->dev, |
---|
567 | | - "%s: failed to sample new counters", |
---|
568 | | - __func__); |
---|
569 | | - } |
---|
570 | | - } |
---|
| 672 | + session->num_prfcnts = num_counters; |
---|
| 673 | + ret = session_gpu_start(kbdev, ipa_ctrl, session); |
---|
571 | 674 | } |
---|
572 | 675 | |
---|
573 | 676 | if (!ret) { |
---|
574 | | - session->num_prfcnts = num_counters; |
---|
575 | 677 | session->active = true; |
---|
576 | 678 | ipa_ctrl->num_active_sessions++; |
---|
577 | 679 | *client = session; |
---|
578 | | - |
---|
579 | | - /* |
---|
580 | | - * Read current raw value to initialize the session. |
---|
581 | | - * This is necessary to put the first query in condition |
---|
582 | | - * to generate a correct value by calculating the difference |
---|
583 | | - * from the beginning of the session. |
---|
584 | | - */ |
---|
585 | | - for (i = 0; i < session->num_prfcnts; i++) { |
---|
586 | | - struct kbase_ipa_control_prfcnt *prfcnt = |
---|
587 | | - &session->prfcnts[i]; |
---|
588 | | - u64 raw_value = read_value_cnt(kbdev, (u8)prfcnt->type, |
---|
589 | | - prfcnt->select_idx); |
---|
590 | | - prfcnt->latest_raw_value = raw_value; |
---|
591 | | - } |
---|
592 | 680 | } |
---|
593 | 681 | |
---|
594 | 682 | exit: |
---|
.. | .. |
---|
607 | 695 | unsigned long flags; |
---|
608 | 696 | bool new_config = false, valid_session = false; |
---|
609 | 697 | |
---|
610 | | - if (WARN_ON(kbdev == NULL) || WARN_ON(client == NULL)) { |
---|
| 698 | + if (WARN_ON(unlikely(kbdev == NULL))) |
---|
| 699 | + return -ENODEV; |
---|
| 700 | + |
---|
| 701 | + if (WARN_ON(client == NULL)) { |
---|
611 | 702 | dev_err(kbdev->dev, "%s: wrong input arguments", __func__); |
---|
612 | 703 | return -EINVAL; |
---|
613 | 704 | } |
---|
.. | .. |
---|
662 | 753 | } |
---|
663 | 754 | |
---|
664 | 755 | if (new_config) { |
---|
665 | | - u64 select_config[KBASE_IPA_CORE_TYPE_NUM]; |
---|
666 | | - |
---|
667 | | - build_select_config(ipa_ctrl, select_config); |
---|
668 | | - ret = apply_select_config(kbdev, select_config); |
---|
| 756 | + ret = update_select_registers(kbdev); |
---|
669 | 757 | if (ret) |
---|
670 | 758 | dev_err(kbdev->dev, |
---|
671 | 759 | "%s: failed to apply SELECT configuration", |
---|
.. | .. |
---|
692 | 780 | unsigned long flags; |
---|
693 | 781 | bool gpu_ready; |
---|
694 | 782 | |
---|
695 | | - if (WARN_ON(kbdev == NULL) || WARN_ON(client == NULL) || |
---|
696 | | - WARN_ON(values == NULL)) { |
---|
| 783 | + if (WARN_ON(unlikely(kbdev == NULL))) |
---|
| 784 | + return -ENODEV; |
---|
| 785 | + |
---|
| 786 | + if (WARN_ON(client == NULL) || WARN_ON(values == NULL)) { |
---|
697 | 787 | dev_err(kbdev->dev, "%s: wrong input arguments", __func__); |
---|
698 | 788 | return -EINVAL; |
---|
699 | 789 | } |
---|
700 | 790 | |
---|
701 | 791 | ipa_ctrl = &kbdev->csf.ipa_control; |
---|
702 | 792 | session = (struct kbase_ipa_control_session *)client; |
---|
| 793 | + |
---|
| 794 | + if (!session->active) { |
---|
| 795 | + dev_err(kbdev->dev, |
---|
| 796 | + "%s: attempt to query inactive session", __func__); |
---|
| 797 | + return -EINVAL; |
---|
| 798 | + } |
---|
703 | 799 | |
---|
704 | 800 | if (WARN_ON(num_values < session->num_prfcnts)) { |
---|
705 | 801 | dev_err(kbdev->dev, |
---|
.. | .. |
---|
721 | 817 | } |
---|
722 | 818 | |
---|
723 | 819 | if (protected_time) { |
---|
724 | | - u64 time_now = ktime_get_ns(); |
---|
| 820 | + u64 time_now = ktime_get_raw_ns(); |
---|
725 | 821 | |
---|
726 | 822 | /* This is the amount of protected-mode time spent prior to |
---|
727 | 823 | * the current protm period. |
---|
.. | .. |
---|
778 | 874 | ret); |
---|
779 | 875 | } |
---|
780 | 876 | |
---|
781 | | - for (session_idx = 0; session_idx < ipa_ctrl->num_active_sessions; |
---|
| 877 | + for (session_idx = 0; session_idx < KBASE_IPA_CONTROL_MAX_SESSIONS; |
---|
782 | 878 | session_idx++) { |
---|
| 879 | + |
---|
783 | 880 | struct kbase_ipa_control_session *session = |
---|
784 | 881 | &ipa_ctrl->sessions[session_idx]; |
---|
785 | | - size_t i; |
---|
786 | 882 | |
---|
787 | | - for (i = 0; i < session->num_prfcnts; i++) { |
---|
788 | | - struct kbase_ipa_control_prfcnt *prfcnt = |
---|
789 | | - &session->prfcnts[i]; |
---|
| 883 | + if (session->active) { |
---|
| 884 | + size_t i; |
---|
790 | 885 | |
---|
791 | | - calc_prfcnt_delta(kbdev, prfcnt, true); |
---|
| 886 | + for (i = 0; i < session->num_prfcnts; i++) { |
---|
| 887 | + struct kbase_ipa_control_prfcnt *prfcnt = |
---|
| 888 | + &session->prfcnts[i]; |
---|
| 889 | + |
---|
| 890 | + calc_prfcnt_delta(kbdev, prfcnt, true); |
---|
| 891 | + } |
---|
792 | 892 | } |
---|
793 | 893 | } |
---|
794 | | - |
---|
795 | 894 | spin_unlock(&ipa_ctrl->lock); |
---|
796 | 895 | } |
---|
797 | 896 | |
---|
.. | .. |
---|
808 | 907 | /* Interrupts are already disabled and interrupt state is also saved */ |
---|
809 | 908 | spin_lock(&ipa_ctrl->lock); |
---|
810 | 909 | |
---|
811 | | - /* Re-issue the APPLY command, this is actually needed only for CSHW */ |
---|
812 | | - kbase_reg_write(kbdev, IPA_CONTROL_REG(COMMAND), COMMAND_APPLY); |
---|
813 | | - ret = wait_status(kbdev, STATUS_COMMAND_ACTIVE); |
---|
| 910 | + ret = update_select_registers(kbdev); |
---|
814 | 911 | if (ret) { |
---|
815 | 912 | dev_err(kbdev->dev, |
---|
816 | | - "Wait for the completion of apply command failed: %d", |
---|
817 | | - ret); |
---|
| 913 | + "Failed to reconfigure the select registers: %d", ret); |
---|
818 | 914 | } |
---|
819 | 915 | |
---|
820 | | - /* Re-enable the timer for periodic sampling */ |
---|
821 | | - kbase_reg_write(kbdev, IPA_CONTROL_REG(TIMER), |
---|
822 | | - timer_value(ipa_ctrl->cur_gpu_rate)); |
---|
| 916 | + /* Accumulator registers would not contain any sample after GPU power |
---|
| 917 | + * cycle if the timer has not been enabled first. Initialize all sessions. |
---|
| 918 | + */ |
---|
| 919 | + ret = session_gpu_start(kbdev, ipa_ctrl, NULL); |
---|
823 | 920 | |
---|
824 | 921 | spin_unlock(&ipa_ctrl->lock); |
---|
825 | 922 | } |
---|
.. | .. |
---|
864 | 961 | } |
---|
865 | 962 | KBASE_EXPORT_TEST_API(kbase_ipa_control_handle_gpu_reset_post); |
---|
866 | 963 | |
---|
| 964 | +#ifdef KBASE_PM_RUNTIME |
---|
| 965 | +void kbase_ipa_control_handle_gpu_sleep_enter(struct kbase_device *kbdev) |
---|
| 966 | +{ |
---|
| 967 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
| 968 | + |
---|
| 969 | + if (kbdev->pm.backend.mcu_state == KBASE_MCU_IN_SLEEP) { |
---|
| 970 | + /* GPU Sleep is treated as a power down */ |
---|
| 971 | + kbase_ipa_control_handle_gpu_power_off(kbdev); |
---|
| 972 | + |
---|
| 973 | + /* SELECT_CSHW register needs to be cleared to prevent any |
---|
| 974 | + * IPA control message to be sent to the top level GPU HWCNT. |
---|
| 975 | + */ |
---|
| 976 | + kbase_reg_write(kbdev, IPA_CONTROL_REG(SELECT_CSHW_LO), 0); |
---|
| 977 | + kbase_reg_write(kbdev, IPA_CONTROL_REG(SELECT_CSHW_HI), 0); |
---|
| 978 | + |
---|
| 979 | + /* No need to issue the APPLY command here */ |
---|
| 980 | + } |
---|
| 981 | +} |
---|
| 982 | +KBASE_EXPORT_TEST_API(kbase_ipa_control_handle_gpu_sleep_enter); |
---|
| 983 | + |
---|
| 984 | +void kbase_ipa_control_handle_gpu_sleep_exit(struct kbase_device *kbdev) |
---|
| 985 | +{ |
---|
| 986 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
| 987 | + |
---|
| 988 | + if (kbdev->pm.backend.mcu_state == KBASE_MCU_IN_SLEEP) { |
---|
| 989 | + /* To keep things simple, currently exit from |
---|
| 990 | + * GPU Sleep is treated as a power on event where |
---|
| 991 | + * all 4 SELECT registers are reconfigured. |
---|
| 992 | + * On exit from sleep, reconfiguration is needed |
---|
| 993 | + * only for the SELECT_CSHW register. |
---|
| 994 | + */ |
---|
| 995 | + kbase_ipa_control_handle_gpu_power_on(kbdev); |
---|
| 996 | + } |
---|
| 997 | +} |
---|
| 998 | +KBASE_EXPORT_TEST_API(kbase_ipa_control_handle_gpu_sleep_exit); |
---|
| 999 | +#endif |
---|
| 1000 | + |
---|
867 | 1001 | #if MALI_UNIT_TEST |
---|
868 | 1002 | void kbase_ipa_control_rate_change_notify_test(struct kbase_device *kbdev, |
---|
869 | 1003 | u32 clk_index, u32 clk_rate_hz) |
---|
.. | .. |
---|
883 | 1017 | struct kbase_ipa_control *ipa_ctrl = &kbdev->csf.ipa_control; |
---|
884 | 1018 | |
---|
885 | 1019 | lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
886 | | - ipa_ctrl->protm_start = ktime_get_ns(); |
---|
| 1020 | + ipa_ctrl->protm_start = ktime_get_raw_ns(); |
---|
887 | 1021 | } |
---|
888 | 1022 | |
---|
889 | 1023 | void kbase_ipa_control_protm_exited(struct kbase_device *kbdev) |
---|
890 | 1024 | { |
---|
891 | 1025 | struct kbase_ipa_control *ipa_ctrl = &kbdev->csf.ipa_control; |
---|
892 | 1026 | size_t i; |
---|
893 | | - u64 time_now = ktime_get_ns(); |
---|
| 1027 | + u64 time_now = ktime_get_raw_ns(); |
---|
894 | 1028 | u32 status; |
---|
895 | 1029 | |
---|
896 | 1030 | lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
897 | 1031 | |
---|
898 | | - for (i = 0; i < ipa_ctrl->num_active_sessions; i++) { |
---|
| 1032 | + for (i = 0; i < KBASE_IPA_CONTROL_MAX_SESSIONS; i++) { |
---|
| 1033 | + |
---|
899 | 1034 | struct kbase_ipa_control_session *session = |
---|
900 | 1035 | &ipa_ctrl->sessions[i]; |
---|
901 | | - u64 protm_time = time_now - MAX(session->last_query_time, |
---|
902 | | - ipa_ctrl->protm_start); |
---|
903 | 1036 | |
---|
904 | | - session->protm_time += protm_time; |
---|
| 1037 | + if (session->active) { |
---|
| 1038 | + u64 protm_time = time_now - MAX(session->last_query_time, |
---|
| 1039 | + ipa_ctrl->protm_start); |
---|
| 1040 | + |
---|
| 1041 | + session->protm_time += protm_time; |
---|
| 1042 | + } |
---|
905 | 1043 | } |
---|
906 | 1044 | |
---|
907 | 1045 | /* Acknowledge the protected_mode bit in the IPA_CONTROL STATUS |
---|