From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 08:20:59 +0000 Subject: [PATCH] kernel_5.10 no rt --- kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c | 33 ++++++++++++++++++++------------- 1 files changed, 20 insertions(+), 13 deletions(-) diff --git a/kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c b/kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c index 05253ae..4cac787 100644 --- a/kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c +++ b/kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note /* * - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -88,12 +88,11 @@ * context's address space, when the page fault occurs for * MCU's address space. */ - if (!queue_work(as->pf_wq, &as->work_pagefault)) - kbase_ctx_sched_release_ctx(kctx); - else { + if (!queue_work(as->pf_wq, &as->work_pagefault)) { dev_dbg(kbdev->dev, - "Page fault is already pending for as %u\n", - as_nr); + "Page fault is already pending for as %u", as_nr); + kbase_ctx_sched_release_ctx(kctx); + } else { atomic_inc(&kbdev->faults_pending); } } @@ -122,6 +121,8 @@ access_type, kbase_gpu_access_type_name(fault->status), source_id); + kbase_debug_csf_fault_notify(kbdev, NULL, DF_GPU_PAGE_FAULT); + /* Report MMU fault for all address spaces (except MCU_AS_NR) */ for (as_no = 1; as_no < kbdev->nr_hw_address_spaces; as_no++) submit_work_pagefault(kbdev, as_no, fault); @@ -130,6 +131,7 @@ if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR)) kbase_reset_gpu(kbdev); + } KBASE_EXPORT_TEST_API(kbase_mmu_report_mcu_as_fault_and_reset); @@ -148,17 +150,18 @@ "true" : "false"; int as_no = as->number; unsigned long flags; + const uintptr_t fault_addr = fault->addr; /* terminal fault, print info about the fault */ dev_err(kbdev->dev, - "GPU bus fault in AS%d at VA 0x%016llX\n" - "VA_VALID: %s\n" + "GPU bus fault in AS%d at PA %pK\n" + "PA_VALID: %s\n" "raw fault status: 0x%X\n" "exception type 0x%X: %s\n" "access type 0x%X: %s\n" "source id 0x%X\n" "pid: %d\n", - as_no, fault->addr, + as_no, (void *)fault_addr, addr_valid, status, exception_type, kbase_gpu_exception_name(exception_type), @@ -187,6 +190,7 @@ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAR_FAULT); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } /* @@ -248,6 +252,7 @@ mutex_unlock(&kbdev->mmu_hw_mutex); /* AS transaction end */ + kbase_debug_csf_fault_notify(kbdev, kctx, DF_GPU_PAGE_FAULT); /* Switching to UNMAPPED mode above would have enabled the firmware to * recover from the fault (if the memory access was made by firmware) * and it can then respond to CSG termination requests to be sent now. @@ -261,6 +266,7 @@ KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); + } /** @@ -482,8 +488,6 @@ kbase_csf_ctx_handle_fault(kctx, fault); kbase_ctx_sched_release_ctx_lock(kctx); - atomic_dec(&kbdev->faults_pending); - /* A work for GPU fault is complete. * Till reaching here, no further GPU fault will be reported. * Now clear the GPU fault to allow next GPU fault interrupt report. @@ -492,6 +496,8 @@ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAR_FAULT); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + atomic_dec(&kbdev->faults_pending); } /** @@ -546,14 +552,15 @@ } KBASE_EXPORT_TEST_API(kbase_mmu_gpu_fault_interrupt); -int kbase_mmu_as_init(struct kbase_device *kbdev, int i) +int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i) { kbdev->as[i].number = i; kbdev->as[i].bf_data.addr = 0ULL; kbdev->as[i].pf_data.addr = 0ULL; kbdev->as[i].gf_data.addr = 0ULL; + kbdev->as[i].is_unresponsive = false; - kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", 0, 1, i); + kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", WQ_UNBOUND, 1, i); if (!kbdev->as[i].pf_wq) return -ENOMEM; -- Gitblit v1.6.2