.. | .. |
---|
10 | 10 | */ |
---|
11 | 11 | |
---|
12 | 12 | #include "linux/mman.h" |
---|
| 13 | +#include <linux/version_compat_defs.h> |
---|
13 | 14 | #include <mali_kbase.h> |
---|
14 | 15 | |
---|
15 | 16 | /* mali_kbase_mmap.c |
---|
.. | .. |
---|
90 | 91 | if ((*gap_end < info->low_limit) || (*gap_end < gap_start)) |
---|
91 | 92 | return false; |
---|
92 | 93 | |
---|
93 | | - |
---|
94 | 94 | return true; |
---|
95 | 95 | } |
---|
96 | 96 | |
---|
.. | .. |
---|
127 | 127 | * |
---|
128 | 128 | * Return: address of the found gap end (high limit) if area is found; |
---|
129 | 129 | * -ENOMEM if search is unsuccessful |
---|
130 | | -*/ |
---|
| 130 | + */ |
---|
131 | 131 | |
---|
132 | 132 | static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info |
---|
133 | 133 | *info, bool is_shader_code, bool is_same_4gb_page) |
---|
134 | 134 | { |
---|
| 135 | +#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE) |
---|
135 | 136 | struct mm_struct *mm = current->mm; |
---|
136 | 137 | struct vm_area_struct *vma; |
---|
137 | 138 | unsigned long length, low_limit, high_limit, gap_start, gap_end; |
---|
.. | .. |
---|
225 | 226 | } |
---|
226 | 227 | } |
---|
227 | 228 | } |
---|
| 229 | +#else |
---|
| 230 | + unsigned long length, high_limit, gap_start, gap_end; |
---|
228 | 231 | |
---|
| 232 | + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); |
---|
| 233 | + /* Adjust search length to account for worst case alignment overhead */ |
---|
| 234 | + length = info->length + info->align_mask; |
---|
| 235 | + if (length < info->length) |
---|
| 236 | + return -ENOMEM; |
---|
| 237 | + |
---|
| 238 | + /* |
---|
| 239 | + * Adjust search limits by the desired length. |
---|
| 240 | + * See implementation comment at top of unmapped_area(). |
---|
| 241 | + */ |
---|
| 242 | + gap_end = info->high_limit; |
---|
| 243 | + if (gap_end < length) |
---|
| 244 | + return -ENOMEM; |
---|
| 245 | + high_limit = gap_end - length; |
---|
| 246 | + |
---|
| 247 | + if (info->low_limit > high_limit) |
---|
| 248 | + return -ENOMEM; |
---|
| 249 | + |
---|
| 250 | + while (true) { |
---|
| 251 | + if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, length)) |
---|
| 252 | + return -ENOMEM; |
---|
| 253 | + gap_end = mas.last + 1; |
---|
| 254 | + gap_start = mas.min; |
---|
| 255 | + |
---|
| 256 | + if (align_and_check(&gap_end, gap_start, info, is_shader_code, is_same_4gb_page)) |
---|
| 257 | + return gap_end; |
---|
| 258 | + } |
---|
| 259 | +#endif |
---|
229 | 260 | return -ENOMEM; |
---|
230 | 261 | } |
---|
231 | 262 | |
---|
.. | .. |
---|
242 | 273 | struct vm_unmapped_area_info info; |
---|
243 | 274 | unsigned long align_offset = 0; |
---|
244 | 275 | unsigned long align_mask = 0; |
---|
| 276 | +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) |
---|
| 277 | + unsigned long high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
---|
| 278 | + unsigned long low_limit = max_t(unsigned long, PAGE_SIZE, kbase_mmap_min_addr); |
---|
| 279 | +#else |
---|
245 | 280 | unsigned long high_limit = mm->mmap_base; |
---|
246 | 281 | unsigned long low_limit = PAGE_SIZE; |
---|
| 282 | +#endif |
---|
247 | 283 | int cpu_va_bits = BITS_PER_LONG; |
---|
248 | 284 | int gpu_pc_bits = |
---|
249 | 285 | kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size; |
---|
.. | .. |
---|
270 | 306 | struct kbase_reg_zone *zone = |
---|
271 | 307 | kbase_ctx_reg_zone_get_nolock(kctx, KBASE_REG_ZONE_SAME_VA); |
---|
272 | 308 | u64 same_va_end_addr = kbase_reg_zone_end_pfn(zone) << PAGE_SHIFT; |
---|
| 309 | +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) |
---|
| 310 | + const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
---|
| 311 | + |
---|
| 312 | + /* requested length too big for entire address space */ |
---|
| 313 | + if (len > mmap_end - kbase_mmap_min_addr) |
---|
| 314 | + return -ENOMEM; |
---|
| 315 | +#endif |
---|
273 | 316 | |
---|
274 | 317 | /* err on fixed address */ |
---|
275 | 318 | if ((flags & MAP_FIXED) || addr) |
---|
.. | .. |
---|
282 | 325 | |
---|
283 | 326 | if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { |
---|
284 | 327 | high_limit = |
---|
285 | | - min_t(unsigned long, mm->mmap_base, same_va_end_addr); |
---|
| 328 | + min_t(unsigned long, high_limit, same_va_end_addr); |
---|
286 | 329 | |
---|
287 | 330 | /* If there's enough (> 33 bits) of GPU VA space, align |
---|
288 | 331 | * to 2MB boundaries. |
---|
.. | .. |
---|
301 | 344 | #endif /* CONFIG_64BIT */ |
---|
302 | 345 | if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) && |
---|
303 | 346 | (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) { |
---|
304 | | - int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE); |
---|
305 | | - struct kbase_va_region *reg; |
---|
| 347 | + int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE); |
---|
| 348 | + struct kbase_va_region *reg; |
---|
306 | 349 | |
---|
307 | | - /* Need to hold gpu vm lock when using reg */ |
---|
308 | | - kbase_gpu_vm_lock(kctx); |
---|
309 | | - reg = kctx->pending_regions[cookie]; |
---|
310 | | - if (!reg) { |
---|
311 | | - kbase_gpu_vm_unlock(kctx); |
---|
312 | | - return -EINVAL; |
---|
313 | | - } |
---|
314 | | - if (!(reg->flags & KBASE_REG_GPU_NX)) { |
---|
315 | | - if (cpu_va_bits > gpu_pc_bits) { |
---|
316 | | - align_offset = 1ULL << gpu_pc_bits; |
---|
317 | | - align_mask = align_offset - 1; |
---|
318 | | - is_shader_code = true; |
---|
319 | | - } |
---|
320 | | -#if !MALI_USE_CSF |
---|
321 | | - } else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) { |
---|
322 | | - unsigned long extension_bytes = |
---|
323 | | - (unsigned long)(reg->extension |
---|
324 | | - << PAGE_SHIFT); |
---|
325 | | - /* kbase_check_alloc_sizes() already satisfies |
---|
326 | | - * these checks, but they're here to avoid |
---|
327 | | - * maintenance hazards due to the assumptions |
---|
328 | | - * involved |
---|
329 | | - */ |
---|
330 | | - WARN_ON(reg->extension > |
---|
331 | | - (ULONG_MAX >> PAGE_SHIFT)); |
---|
332 | | - WARN_ON(reg->initial_commit > (ULONG_MAX >> PAGE_SHIFT)); |
---|
333 | | - WARN_ON(!is_power_of_2(extension_bytes)); |
---|
334 | | - align_mask = extension_bytes - 1; |
---|
335 | | - align_offset = |
---|
336 | | - extension_bytes - |
---|
337 | | - (reg->initial_commit << PAGE_SHIFT); |
---|
338 | | -#endif /* !MALI_USE_CSF */ |
---|
339 | | - } else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) { |
---|
340 | | - is_same_4gb_page = true; |
---|
341 | | - } |
---|
| 350 | + /* Need to hold gpu vm lock when using reg */ |
---|
| 351 | + kbase_gpu_vm_lock(kctx); |
---|
| 352 | + reg = kctx->pending_regions[cookie]; |
---|
| 353 | + if (!reg) { |
---|
342 | 354 | kbase_gpu_vm_unlock(kctx); |
---|
| 355 | + return -EINVAL; |
---|
| 356 | + } |
---|
| 357 | + if (!(reg->flags & KBASE_REG_GPU_NX)) { |
---|
| 358 | + if (cpu_va_bits > gpu_pc_bits) { |
---|
| 359 | + align_offset = 1ULL << gpu_pc_bits; |
---|
| 360 | + align_mask = align_offset - 1; |
---|
| 361 | + is_shader_code = true; |
---|
| 362 | + } |
---|
| 363 | +#if !MALI_USE_CSF |
---|
| 364 | + } else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) { |
---|
| 365 | + unsigned long extension_bytes = |
---|
| 366 | + (unsigned long)(reg->extension |
---|
| 367 | + << PAGE_SHIFT); |
---|
| 368 | + /* kbase_check_alloc_sizes() already satisfies |
---|
| 369 | + * these checks, but they're here to avoid |
---|
| 370 | + * maintenance hazards due to the assumptions |
---|
| 371 | + * involved |
---|
| 372 | + */ |
---|
| 373 | + WARN_ON(reg->extension > |
---|
| 374 | + (ULONG_MAX >> PAGE_SHIFT)); |
---|
| 375 | + WARN_ON(reg->initial_commit > (ULONG_MAX >> PAGE_SHIFT)); |
---|
| 376 | + WARN_ON(!is_power_of_2(extension_bytes)); |
---|
| 377 | + align_mask = extension_bytes - 1; |
---|
| 378 | + align_offset = |
---|
| 379 | + extension_bytes - |
---|
| 380 | + (reg->initial_commit << PAGE_SHIFT); |
---|
| 381 | +#endif /* !MALI_USE_CSF */ |
---|
| 382 | + } else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) { |
---|
| 383 | + is_same_4gb_page = true; |
---|
| 384 | + } |
---|
| 385 | + kbase_gpu_vm_unlock(kctx); |
---|
343 | 386 | #ifndef CONFIG_64BIT |
---|
344 | 387 | } else { |
---|
345 | 388 | return current->mm->get_unmapped_area( |
---|
.. | .. |
---|
359 | 402 | |
---|
360 | 403 | if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base && |
---|
361 | 404 | high_limit < same_va_end_addr) { |
---|
| 405 | +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) |
---|
| 406 | + /* Retry above TASK_UNMAPPED_BASE */ |
---|
| 407 | + info.low_limit = TASK_UNMAPPED_BASE; |
---|
| 408 | + info.high_limit = min_t(u64, mmap_end, same_va_end_addr); |
---|
| 409 | +#else |
---|
362 | 410 | /* Retry above mmap_base */ |
---|
363 | 411 | info.low_limit = mm->mmap_base; |
---|
364 | 412 | info.high_limit = min_t(u64, TASK_SIZE, same_va_end_addr); |
---|
| 413 | +#endif |
---|
365 | 414 | |
---|
366 | 415 | ret = kbase_unmapped_area_topdown(&info, is_shader_code, |
---|
367 | 416 | is_same_4gb_page); |
---|