.. | .. |
---|
239 | 239 | return failed ? -EFAULT : 0; |
---|
240 | 240 | } |
---|
241 | 241 | |
---|
242 | | -static ssize_t show_dummy_job_wa_info(struct device * const dev, |
---|
| 242 | +static ssize_t dummy_job_wa_info_show(struct device * const dev, |
---|
243 | 243 | struct device_attribute * const attr, char * const buf) |
---|
244 | 244 | { |
---|
245 | 245 | struct kbase_device *const kbdev = dev_get_drvdata(dev); |
---|
.. | .. |
---|
254 | 254 | return err; |
---|
255 | 255 | } |
---|
256 | 256 | |
---|
257 | | -static DEVICE_ATTR(dummy_job_wa_info, 0444, show_dummy_job_wa_info, NULL); |
---|
| 257 | +static DEVICE_ATTR_RO(dummy_job_wa_info); |
---|
258 | 258 | |
---|
259 | 259 | static bool wa_blob_load_needed(struct kbase_device *kbdev) |
---|
260 | 260 | { |
---|
.. | .. |
---|
280 | 280 | u32 blob_offset; |
---|
281 | 281 | int err; |
---|
282 | 282 | struct kbase_context *kctx; |
---|
| 283 | + |
---|
| 284 | + /* Calls to this function are inherently asynchronous, with respect to |
---|
| 285 | + * MMU operations. |
---|
| 286 | + */ |
---|
| 287 | + const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC; |
---|
283 | 288 | |
---|
284 | 289 | lockdep_assert_held(&kbdev->fw_load_lock); |
---|
285 | 290 | |
---|
.. | .. |
---|
375 | 380 | nr_pages = PFN_UP(blob->size); |
---|
376 | 381 | flags = blob->map_flags | BASE_MEM_FLAG_MAP_FIXED; |
---|
377 | 382 | |
---|
378 | | - va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages, |
---|
379 | | - 0, &flags, &gpu_va); |
---|
| 383 | + va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags, |
---|
| 384 | + &gpu_va, mmu_sync_info); |
---|
380 | 385 | |
---|
381 | 386 | if (!va_region) { |
---|
382 | 387 | dev_err(kbdev->dev, "Failed to allocate for blob\n"); |
---|