.. | .. |
---|
42 | 42 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
---|
43 | 43 | */ |
---|
44 | 44 | |
---|
45 | | -#include <drm/drmP.h> |
---|
46 | | -#include <drm/drm_mm.h> |
---|
47 | | -#include <linux/slab.h> |
---|
48 | | -#include <linux/seq_file.h> |
---|
49 | 45 | #include <linux/export.h> |
---|
50 | 46 | #include <linux/interval_tree_generic.h> |
---|
| 47 | +#include <linux/seq_file.h> |
---|
| 48 | +#include <linux/slab.h> |
---|
| 49 | +#include <linux/stacktrace.h> |
---|
| 50 | + |
---|
| 51 | +#include <drm/drm_mm.h> |
---|
51 | 52 | |
---|
52 | 53 | /** |
---|
53 | 54 | * DOC: Overview |
---|
.. | .. |
---|
106 | 107 | static noinline void save_stack(struct drm_mm_node *node) |
---|
107 | 108 | { |
---|
108 | 109 | unsigned long entries[STACKDEPTH]; |
---|
109 | | - struct stack_trace trace = { |
---|
110 | | - .entries = entries, |
---|
111 | | - .max_entries = STACKDEPTH, |
---|
112 | | - .skip = 1 |
---|
113 | | - }; |
---|
| 110 | + unsigned int n; |
---|
114 | 111 | |
---|
115 | | - save_stack_trace(&trace); |
---|
116 | | - if (trace.nr_entries != 0 && |
---|
117 | | - trace.entries[trace.nr_entries-1] == ULONG_MAX) |
---|
118 | | - trace.nr_entries--; |
---|
| 112 | + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
---|
119 | 113 | |
---|
120 | 114 | /* May be called under spinlock, so avoid sleeping */ |
---|
121 | | - node->stack = depot_save_stack(&trace, GFP_NOWAIT); |
---|
| 115 | + node->stack = stack_depot_save(entries, n, GFP_NOWAIT); |
---|
122 | 116 | } |
---|
123 | 117 | |
---|
124 | 118 | static void show_leaks(struct drm_mm *mm) |
---|
125 | 119 | { |
---|
126 | 120 | struct drm_mm_node *node; |
---|
127 | | - unsigned long entries[STACKDEPTH]; |
---|
| 121 | + unsigned long *entries; |
---|
| 122 | + unsigned int nr_entries; |
---|
128 | 123 | char *buf; |
---|
129 | 124 | |
---|
130 | 125 | buf = kmalloc(BUFSZ, GFP_KERNEL); |
---|
.. | .. |
---|
132 | 127 | return; |
---|
133 | 128 | |
---|
134 | 129 | list_for_each_entry(node, drm_mm_nodes(mm), node_list) { |
---|
135 | | - struct stack_trace trace = { |
---|
136 | | - .entries = entries, |
---|
137 | | - .max_entries = STACKDEPTH |
---|
138 | | - }; |
---|
139 | | - |
---|
140 | 130 | if (!node->stack) { |
---|
141 | 131 | DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", |
---|
142 | 132 | node->start, node->size); |
---|
143 | 133 | continue; |
---|
144 | 134 | } |
---|
145 | 135 | |
---|
146 | | - depot_fetch_stack(node->stack, &trace); |
---|
147 | | - snprint_stack_trace(buf, BUFSZ, &trace, 0); |
---|
| 136 | + nr_entries = stack_depot_fetch(node->stack, &entries); |
---|
| 137 | + stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); |
---|
148 | 138 | DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", |
---|
149 | 139 | node->start, node->size, buf); |
---|
150 | 140 | } |
---|
.. | .. |
---|
184 | 174 | |
---|
185 | 175 | node->__subtree_last = LAST(node); |
---|
186 | 176 | |
---|
187 | | - if (hole_node->allocated) { |
---|
| 177 | + if (drm_mm_node_allocated(hole_node)) { |
---|
188 | 178 | rb = &hole_node->rb; |
---|
189 | 179 | while (rb) { |
---|
190 | 180 | parent = rb_entry(rb, struct drm_mm_node, rb); |
---|
.. | .. |
---|
222 | 212 | &drm_mm_interval_tree_augment); |
---|
223 | 213 | } |
---|
224 | 214 | |
---|
225 | | -#define RB_INSERT(root, member, expr) do { \ |
---|
226 | | - struct rb_node **link = &root.rb_node, *rb = NULL; \ |
---|
227 | | - u64 x = expr(node); \ |
---|
228 | | - while (*link) { \ |
---|
229 | | - rb = *link; \ |
---|
230 | | - if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ |
---|
231 | | - link = &rb->rb_left; \ |
---|
232 | | - else \ |
---|
233 | | - link = &rb->rb_right; \ |
---|
234 | | - } \ |
---|
235 | | - rb_link_node(&node->member, rb, link); \ |
---|
236 | | - rb_insert_color(&node->member, &root); \ |
---|
237 | | -} while (0) |
---|
238 | | - |
---|
239 | 215 | #define HOLE_SIZE(NODE) ((NODE)->hole_size) |
---|
240 | 216 | #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) |
---|
241 | 217 | |
---|
.. | .. |
---|
265 | 241 | rb_insert_color_cached(&node->rb_hole_size, root, first); |
---|
266 | 242 | } |
---|
267 | 243 | |
---|
| 244 | +RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, |
---|
| 245 | + struct drm_mm_node, rb_hole_addr, |
---|
| 246 | + u64, subtree_max_hole, HOLE_SIZE) |
---|
| 247 | + |
---|
| 248 | +static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) |
---|
| 249 | +{ |
---|
| 250 | + struct rb_node **link = &root->rb_node, *rb_parent = NULL; |
---|
| 251 | + u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; |
---|
| 252 | + struct drm_mm_node *parent; |
---|
| 253 | + |
---|
| 254 | + while (*link) { |
---|
| 255 | + rb_parent = *link; |
---|
| 256 | + parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); |
---|
| 257 | + if (parent->subtree_max_hole < subtree_max_hole) |
---|
| 258 | + parent->subtree_max_hole = subtree_max_hole; |
---|
| 259 | + if (start < HOLE_ADDR(parent)) |
---|
| 260 | + link = &parent->rb_hole_addr.rb_left; |
---|
| 261 | + else |
---|
| 262 | + link = &parent->rb_hole_addr.rb_right; |
---|
| 263 | + } |
---|
| 264 | + |
---|
| 265 | + rb_link_node(&node->rb_hole_addr, rb_parent, link); |
---|
| 266 | + rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); |
---|
| 267 | +} |
---|
| 268 | + |
---|
268 | 269 | static void add_hole(struct drm_mm_node *node) |
---|
269 | 270 | { |
---|
270 | 271 | struct drm_mm *mm = node->mm; |
---|
271 | 272 | |
---|
272 | 273 | node->hole_size = |
---|
273 | 274 | __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); |
---|
| 275 | + node->subtree_max_hole = node->hole_size; |
---|
274 | 276 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); |
---|
275 | 277 | |
---|
276 | 278 | insert_hole_size(&mm->holes_size, node); |
---|
277 | | - RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); |
---|
| 279 | + insert_hole_addr(&mm->holes_addr, node); |
---|
278 | 280 | |
---|
279 | 281 | list_add(&node->hole_stack, &mm->hole_stack); |
---|
280 | 282 | } |
---|
.. | .. |
---|
285 | 287 | |
---|
286 | 288 | list_del(&node->hole_stack); |
---|
287 | 289 | rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); |
---|
288 | | - rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); |
---|
| 290 | + rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, |
---|
| 291 | + &augment_callbacks); |
---|
289 | 292 | node->hole_size = 0; |
---|
| 293 | + node->subtree_max_hole = 0; |
---|
290 | 294 | |
---|
291 | 295 | DRM_MM_BUG_ON(drm_mm_hole_follows(node)); |
---|
292 | 296 | } |
---|
.. | .. |
---|
299 | 303 | static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) |
---|
300 | 304 | { |
---|
301 | 305 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); |
---|
302 | | -} |
---|
303 | | - |
---|
304 | | -static inline u64 rb_hole_size(struct rb_node *rb) |
---|
305 | | -{ |
---|
306 | | - return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; |
---|
307 | 306 | } |
---|
308 | 307 | |
---|
309 | 308 | static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) |
---|
.. | .. |
---|
326 | 325 | return best; |
---|
327 | 326 | } |
---|
328 | 327 | |
---|
329 | | -static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) |
---|
| 328 | +static bool usable_hole_addr(struct rb_node *rb, u64 size) |
---|
| 329 | +{ |
---|
| 330 | + return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; |
---|
| 331 | +} |
---|
| 332 | + |
---|
| 333 | +static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) |
---|
330 | 334 | { |
---|
331 | 335 | struct rb_node *rb = mm->holes_addr.rb_node; |
---|
332 | 336 | struct drm_mm_node *node = NULL; |
---|
333 | 337 | |
---|
334 | 338 | while (rb) { |
---|
335 | 339 | u64 hole_start; |
---|
| 340 | + |
---|
| 341 | + if (!usable_hole_addr(rb, size)) |
---|
| 342 | + break; |
---|
336 | 343 | |
---|
337 | 344 | node = rb_hole_addr_to_node(rb); |
---|
338 | 345 | hole_start = __drm_mm_hole_node_start(node); |
---|
.. | .. |
---|
359 | 366 | return best_hole(mm, size); |
---|
360 | 367 | |
---|
361 | 368 | case DRM_MM_INSERT_LOW: |
---|
362 | | - return find_hole(mm, start); |
---|
| 369 | + return find_hole_addr(mm, start, size); |
---|
363 | 370 | |
---|
364 | 371 | case DRM_MM_INSERT_HIGH: |
---|
365 | | - return find_hole(mm, end); |
---|
| 372 | + return find_hole_addr(mm, end, size); |
---|
366 | 373 | |
---|
367 | 374 | case DRM_MM_INSERT_EVICT: |
---|
368 | 375 | return list_first_entry_or_null(&mm->hole_stack, |
---|
.. | .. |
---|
371 | 378 | } |
---|
372 | 379 | } |
---|
373 | 380 | |
---|
| 381 | +/** |
---|
| 382 | + * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions |
---|
| 383 | + * @name: name of function to declare |
---|
| 384 | + * @first: first rb member to traverse (either rb_left or rb_right). |
---|
| 385 | + * @last: last rb member to traverse (either rb_right or rb_left). |
---|
| 386 | + * |
---|
| 387 | + * This macro declares a function to return the next hole of the addr rb tree. |
---|
| 388 | + * While traversing the tree we take the searched size into account and only |
---|
| 389 | + * visit branches with potential big enough holes. |
---|
| 390 | + */ |
---|
| 391 | + |
---|
| 392 | +#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ |
---|
| 393 | +static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ |
---|
| 394 | +{ \ |
---|
| 395 | + struct rb_node *parent, *node = &entry->rb_hole_addr; \ |
---|
| 396 | + \ |
---|
| 397 | + if (!entry || RB_EMPTY_NODE(node)) \ |
---|
| 398 | + return NULL; \ |
---|
| 399 | + \ |
---|
| 400 | + if (usable_hole_addr(node->first, size)) { \ |
---|
| 401 | + node = node->first; \ |
---|
| 402 | + while (usable_hole_addr(node->last, size)) \ |
---|
| 403 | + node = node->last; \ |
---|
| 404 | + return rb_hole_addr_to_node(node); \ |
---|
| 405 | + } \ |
---|
| 406 | + \ |
---|
| 407 | + while ((parent = rb_parent(node)) && node == parent->first) \ |
---|
| 408 | + node = parent; \ |
---|
| 409 | + \ |
---|
| 410 | + return rb_hole_addr_to_node(parent); \ |
---|
| 411 | +} |
---|
| 412 | + |
---|
| 413 | +DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) |
---|
| 414 | +DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) |
---|
| 415 | + |
---|
374 | 416 | static struct drm_mm_node * |
---|
375 | 417 | next_hole(struct drm_mm *mm, |
---|
376 | 418 | struct drm_mm_node *node, |
---|
| 419 | + u64 size, |
---|
377 | 420 | enum drm_mm_insert_mode mode) |
---|
378 | 421 | { |
---|
379 | 422 | switch (mode) { |
---|
.. | .. |
---|
382 | 425 | return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); |
---|
383 | 426 | |
---|
384 | 427 | case DRM_MM_INSERT_LOW: |
---|
385 | | - return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); |
---|
| 428 | + return next_hole_low_addr(node, size); |
---|
386 | 429 | |
---|
387 | 430 | case DRM_MM_INSERT_HIGH: |
---|
388 | | - return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); |
---|
| 431 | + return next_hole_high_addr(node, size); |
---|
389 | 432 | |
---|
390 | 433 | case DRM_MM_INSERT_EVICT: |
---|
391 | 434 | node = list_next_entry(node, hole_stack); |
---|
.. | .. |
---|
409 | 452 | */ |
---|
410 | 453 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
---|
411 | 454 | { |
---|
412 | | - u64 end = node->start + node->size; |
---|
413 | 455 | struct drm_mm_node *hole; |
---|
414 | 456 | u64 hole_start, hole_end; |
---|
415 | 457 | u64 adj_start, adj_end; |
---|
| 458 | + u64 end; |
---|
416 | 459 | |
---|
417 | 460 | end = node->start + node->size; |
---|
418 | 461 | if (unlikely(end <= node->start)) |
---|
419 | 462 | return -ENOSPC; |
---|
420 | 463 | |
---|
421 | 464 | /* Find the relevant hole to add our node to */ |
---|
422 | | - hole = find_hole(mm, node->start); |
---|
| 465 | + hole = find_hole_addr(mm, node->start, 0); |
---|
423 | 466 | if (!hole) |
---|
424 | 467 | return -ENOSPC; |
---|
425 | 468 | |
---|
.. | .. |
---|
434 | 477 | |
---|
435 | 478 | node->mm = mm; |
---|
436 | 479 | |
---|
| 480 | + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
---|
437 | 481 | list_add(&node->node_list, &hole->node_list); |
---|
438 | 482 | drm_mm_interval_tree_add_node(hole, node); |
---|
439 | | - node->allocated = true; |
---|
440 | 483 | node->hole_size = 0; |
---|
441 | 484 | |
---|
442 | 485 | rm_hole(hole); |
---|
.. | .. |
---|
482 | 525 | u64 remainder_mask; |
---|
483 | 526 | bool once; |
---|
484 | 527 | |
---|
485 | | - DRM_MM_BUG_ON(range_start >= range_end); |
---|
| 528 | + DRM_MM_BUG_ON(range_start > range_end); |
---|
486 | 529 | |
---|
487 | 530 | if (unlikely(size == 0 || range_end - range_start < size)) |
---|
488 | 531 | return -ENOSPC; |
---|
.. | .. |
---|
499 | 542 | remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; |
---|
500 | 543 | for (hole = first_hole(mm, range_start, range_end, size, mode); |
---|
501 | 544 | hole; |
---|
502 | | - hole = once ? NULL : next_hole(mm, hole, mode)) { |
---|
| 545 | + hole = once ? NULL : next_hole(mm, hole, size, mode)) { |
---|
503 | 546 | u64 hole_start = __drm_mm_hole_node_start(hole); |
---|
504 | 547 | u64 hole_end = hole_start + hole->hole_size; |
---|
505 | 548 | u64 adj_start, adj_end; |
---|
.. | .. |
---|
553 | 596 | node->color = color; |
---|
554 | 597 | node->hole_size = 0; |
---|
555 | 598 | |
---|
| 599 | + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
---|
556 | 600 | list_add(&node->node_list, &hole->node_list); |
---|
557 | 601 | drm_mm_interval_tree_add_node(hole, node); |
---|
558 | | - node->allocated = true; |
---|
559 | 602 | |
---|
560 | 603 | rm_hole(hole); |
---|
561 | 604 | if (adj_start > hole_start) |
---|
.. | .. |
---|
571 | 614 | } |
---|
572 | 615 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
---|
573 | 616 | |
---|
| 617 | +static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) |
---|
| 618 | +{ |
---|
| 619 | + return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
---|
| 620 | +} |
---|
| 621 | + |
---|
574 | 622 | /** |
---|
575 | 623 | * drm_mm_remove_node - Remove a memory node from the allocator. |
---|
576 | 624 | * @node: drm_mm_node to remove |
---|
.. | .. |
---|
584 | 632 | struct drm_mm *mm = node->mm; |
---|
585 | 633 | struct drm_mm_node *prev_node; |
---|
586 | 634 | |
---|
587 | | - DRM_MM_BUG_ON(!node->allocated); |
---|
588 | | - DRM_MM_BUG_ON(node->scanned_block); |
---|
| 635 | + DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
---|
| 636 | + DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); |
---|
589 | 637 | |
---|
590 | 638 | prev_node = list_prev_entry(node, node_list); |
---|
591 | 639 | |
---|
.. | .. |
---|
594 | 642 | |
---|
595 | 643 | drm_mm_interval_tree_remove(node, &mm->interval_tree); |
---|
596 | 644 | list_del(&node->node_list); |
---|
597 | | - node->allocated = false; |
---|
598 | 645 | |
---|
599 | 646 | if (drm_mm_hole_follows(prev_node)) |
---|
600 | 647 | rm_hole(prev_node); |
---|
601 | 648 | add_hole(prev_node); |
---|
| 649 | + |
---|
| 650 | + clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
---|
602 | 651 | } |
---|
603 | 652 | EXPORT_SYMBOL(drm_mm_remove_node); |
---|
604 | 653 | |
---|
.. | .. |
---|
615 | 664 | { |
---|
616 | 665 | struct drm_mm *mm = old->mm; |
---|
617 | 666 | |
---|
618 | | - DRM_MM_BUG_ON(!old->allocated); |
---|
| 667 | + DRM_MM_BUG_ON(!drm_mm_node_allocated(old)); |
---|
619 | 668 | |
---|
620 | 669 | *new = *old; |
---|
621 | 670 | |
---|
| 671 | + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags); |
---|
622 | 672 | list_replace(&old->node_list, &new->node_list); |
---|
623 | 673 | rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); |
---|
624 | 674 | |
---|
.. | .. |
---|
632 | 682 | &mm->holes_addr); |
---|
633 | 683 | } |
---|
634 | 684 | |
---|
635 | | - old->allocated = false; |
---|
636 | | - new->allocated = true; |
---|
| 685 | + clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags); |
---|
637 | 686 | } |
---|
638 | 687 | EXPORT_SYMBOL(drm_mm_replace_node); |
---|
639 | 688 | |
---|
.. | .. |
---|
741 | 790 | u64 adj_start, adj_end; |
---|
742 | 791 | |
---|
743 | 792 | DRM_MM_BUG_ON(node->mm != mm); |
---|
744 | | - DRM_MM_BUG_ON(!node->allocated); |
---|
745 | | - DRM_MM_BUG_ON(node->scanned_block); |
---|
746 | | - node->scanned_block = true; |
---|
| 793 | + DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
---|
| 794 | + DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); |
---|
| 795 | + __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
---|
747 | 796 | mm->scan_active++; |
---|
748 | 797 | |
---|
749 | 798 | /* Remove this block from the node_list so that we enlarge the hole |
---|
.. | .. |
---|
816 | 865 | * When the scan list is empty, the selected memory nodes can be freed. An |
---|
817 | 866 | * immediately following drm_mm_insert_node_in_range_generic() or one of the |
---|
818 | 867 | * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return |
---|
819 | | - * the just freed block (because its at the top of the free_stack list). |
---|
| 868 | + * the just freed block (because it's at the top of the free_stack list). |
---|
820 | 869 | * |
---|
821 | 870 | * Returns: |
---|
822 | 871 | * True if this block should be evicted, false otherwise. Will always |
---|
.. | .. |
---|
828 | 877 | struct drm_mm_node *prev_node; |
---|
829 | 878 | |
---|
830 | 879 | DRM_MM_BUG_ON(node->mm != scan->mm); |
---|
831 | | - DRM_MM_BUG_ON(!node->scanned_block); |
---|
832 | | - node->scanned_block = false; |
---|
| 880 | + DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); |
---|
| 881 | + __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
---|
833 | 882 | |
---|
834 | 883 | DRM_MM_BUG_ON(!node->mm->scan_active); |
---|
835 | 884 | node->mm->scan_active--; |
---|
.. | .. |
---|
927 | 976 | |
---|
928 | 977 | /* Clever trick to avoid a special case in the free hole tracking. */ |
---|
929 | 978 | INIT_LIST_HEAD(&mm->head_node.node_list); |
---|
930 | | - mm->head_node.allocated = false; |
---|
| 979 | + mm->head_node.flags = 0; |
---|
931 | 980 | mm->head_node.mm = mm; |
---|
932 | 981 | mm->head_node.start = start + size; |
---|
933 | 982 | mm->head_node.size = -size; |
---|