hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c
....@@ -239,7 +239,7 @@
239239 return failed ? -EFAULT : 0;
240240 }
241241
242
-static ssize_t show_dummy_job_wa_info(struct device * const dev,
242
+static ssize_t dummy_job_wa_info_show(struct device * const dev,
243243 struct device_attribute * const attr, char * const buf)
244244 {
245245 struct kbase_device *const kbdev = dev_get_drvdata(dev);
....@@ -254,7 +254,7 @@
254254 return err;
255255 }
256256
257
-static DEVICE_ATTR(dummy_job_wa_info, 0444, show_dummy_job_wa_info, NULL);
257
+static DEVICE_ATTR_RO(dummy_job_wa_info);
258258
259259 static bool wa_blob_load_needed(struct kbase_device *kbdev)
260260 {
....@@ -280,6 +280,11 @@
280280 u32 blob_offset;
281281 int err;
282282 struct kbase_context *kctx;
283
+
284
+ /* Calls to this function are inherently asynchronous, with respect to
285
+ * MMU operations.
286
+ */
287
+ const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
283288
284289 lockdep_assert_held(&kbdev->fw_load_lock);
285290
....@@ -375,8 +380,8 @@
375380 nr_pages = PFN_UP(blob->size);
376381 flags = blob->map_flags | BASE_MEM_FLAG_MAP_FIXED;
377382
378
- va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages,
379
- 0, &flags, &gpu_va);
383
+ va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
384
+ &gpu_va, mmu_sync_info);
380385
381386 if (!va_region) {
382387 dev_err(kbdev->dev, "Failed to allocate for blob\n");