.. | .. |
---|
45 | 45 | #define CREATE_TRACE_POINTS |
---|
46 | 46 | #include <trace/events/compaction.h> |
---|
47 | 47 | |
---|
| 48 | +#undef CREATE_TRACE_POINTS |
---|
| 49 | +#ifndef __GENKSYMS__ |
---|
| 50 | +#include <trace/hooks/mm.h> |
---|
| 51 | +#endif |
---|
| 52 | + |
---|
48 | 53 | #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) |
---|
49 | 54 | #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) |
---|
50 | 55 | #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) |
---|
51 | 56 | #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) |
---|
| 57 | + |
---|
| 58 | +/* |
---|
| 59 | + * Fragmentation score check interval for proactive compaction purposes. |
---|
| 60 | + */ |
---|
| 61 | +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; |
---|
| 62 | + |
---|
| 63 | +/* |
---|
| 64 | + * Page order with-respect-to which proactive compaction |
---|
| 65 | + * calculates external fragmentation, which is used as |
---|
| 66 | + * the "fragmentation score" of a node/zone. |
---|
| 67 | + */ |
---|
| 68 | +#if defined CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 69 | +#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER |
---|
| 70 | +#elif defined CONFIG_HUGETLBFS |
---|
| 71 | +#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER |
---|
| 72 | +#else |
---|
| 73 | +#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) |
---|
| 74 | +#endif |
---|
52 | 75 | |
---|
53 | 76 | static unsigned long release_freepages(struct list_head *freelist) |
---|
54 | 77 | { |
---|
.. | .. |
---|
66 | 89 | return high_pfn; |
---|
67 | 90 | } |
---|
68 | 91 | |
---|
69 | | -static void map_pages(struct list_head *list) |
---|
| 92 | +static void split_map_pages(struct list_head *list) |
---|
70 | 93 | { |
---|
71 | 94 | unsigned int i, order, nr_pages; |
---|
72 | 95 | struct page *page, *next; |
---|
.. | .. |
---|
136 | 159 | |
---|
137 | 160 | /* |
---|
138 | 161 | * Compaction is deferred when compaction fails to result in a page |
---|
139 | | - * allocation success. 1 << compact_defer_limit compactions are skipped up |
---|
| 162 | + * allocation success. 1 << compact_defer_shift, compactions are skipped up |
---|
140 | 163 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT |
---|
141 | 164 | */ |
---|
142 | 165 | void defer_compaction(struct zone *zone, int order) |
---|
.. | .. |
---|
162 | 185 | return false; |
---|
163 | 186 | |
---|
164 | 187 | /* Avoid possible overflow */ |
---|
165 | | - if (++zone->compact_considered > defer_limit) |
---|
| 188 | + if (++zone->compact_considered >= defer_limit) { |
---|
166 | 189 | zone->compact_considered = defer_limit; |
---|
167 | | - |
---|
168 | | - if (zone->compact_considered >= defer_limit) |
---|
169 | 190 | return false; |
---|
| 191 | + } |
---|
170 | 192 | |
---|
171 | 193 | trace_mm_compaction_deferred(zone, order); |
---|
172 | 194 | |
---|
.. | .. |
---|
237 | 259 | return false; |
---|
238 | 260 | } |
---|
239 | 261 | |
---|
| 262 | +static bool |
---|
| 263 | +__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, |
---|
| 264 | + bool check_target) |
---|
| 265 | +{ |
---|
| 266 | + struct page *page = pfn_to_online_page(pfn); |
---|
| 267 | + struct page *block_page; |
---|
| 268 | + struct page *end_page; |
---|
| 269 | + unsigned long block_pfn; |
---|
| 270 | + |
---|
| 271 | + if (!page) |
---|
| 272 | + return false; |
---|
| 273 | + if (zone != page_zone(page)) |
---|
| 274 | + return false; |
---|
| 275 | + if (pageblock_skip_persistent(page)) |
---|
| 276 | + return false; |
---|
| 277 | + |
---|
| 278 | + /* |
---|
| 279 | + * If skip is already cleared do no further checking once the |
---|
| 280 | + * restart points have been set. |
---|
| 281 | + */ |
---|
| 282 | + if (check_source && check_target && !get_pageblock_skip(page)) |
---|
| 283 | + return true; |
---|
| 284 | + |
---|
| 285 | + /* |
---|
| 286 | + * If clearing skip for the target scanner, do not select a |
---|
| 287 | + * non-movable pageblock as the starting point. |
---|
| 288 | + */ |
---|
| 289 | + if (!check_source && check_target && |
---|
| 290 | + get_pageblock_migratetype(page) != MIGRATE_MOVABLE) |
---|
| 291 | + return false; |
---|
| 292 | + |
---|
| 293 | + /* Ensure the start of the pageblock or zone is online and valid */ |
---|
| 294 | + block_pfn = pageblock_start_pfn(pfn); |
---|
| 295 | + block_pfn = max(block_pfn, zone->zone_start_pfn); |
---|
| 296 | + block_page = pfn_to_online_page(block_pfn); |
---|
| 297 | + if (block_page) { |
---|
| 298 | + page = block_page; |
---|
| 299 | + pfn = block_pfn; |
---|
| 300 | + } |
---|
| 301 | + |
---|
| 302 | + /* Ensure the end of the pageblock or zone is online and valid */ |
---|
| 303 | + block_pfn = pageblock_end_pfn(pfn) - 1; |
---|
| 304 | + block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); |
---|
| 305 | + end_page = pfn_to_online_page(block_pfn); |
---|
| 306 | + if (!end_page) |
---|
| 307 | + return false; |
---|
| 308 | + |
---|
| 309 | + /* |
---|
| 310 | + * Only clear the hint if a sample indicates there is either a |
---|
| 311 | + * free page or an LRU page in the block. One or other condition |
---|
| 312 | + * is necessary for the block to be a migration source/target. |
---|
| 313 | + */ |
---|
| 314 | + do { |
---|
| 315 | + if (pfn_valid_within(pfn)) { |
---|
| 316 | + if (check_source && PageLRU(page)) { |
---|
| 317 | + clear_pageblock_skip(page); |
---|
| 318 | + return true; |
---|
| 319 | + } |
---|
| 320 | + |
---|
| 321 | + if (check_target && PageBuddy(page)) { |
---|
| 322 | + clear_pageblock_skip(page); |
---|
| 323 | + return true; |
---|
| 324 | + } |
---|
| 325 | + } |
---|
| 326 | + |
---|
| 327 | + page += (1 << PAGE_ALLOC_COSTLY_ORDER); |
---|
| 328 | + pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); |
---|
| 329 | + } while (page <= end_page); |
---|
| 330 | + |
---|
| 331 | + return false; |
---|
| 332 | +} |
---|
| 333 | + |
---|
240 | 334 | /* |
---|
241 | 335 | * This function is called to clear all cached information on pageblocks that |
---|
242 | 336 | * should be skipped for page isolation when the migrate and free page scanner |
---|
.. | .. |
---|
244 | 338 | */ |
---|
245 | 339 | static void __reset_isolation_suitable(struct zone *zone) |
---|
246 | 340 | { |
---|
247 | | - unsigned long start_pfn = zone->zone_start_pfn; |
---|
248 | | - unsigned long end_pfn = zone_end_pfn(zone); |
---|
249 | | - unsigned long pfn; |
---|
| 341 | + unsigned long migrate_pfn = zone->zone_start_pfn; |
---|
| 342 | + unsigned long free_pfn = zone_end_pfn(zone) - 1; |
---|
| 343 | + unsigned long reset_migrate = free_pfn; |
---|
| 344 | + unsigned long reset_free = migrate_pfn; |
---|
| 345 | + bool source_set = false; |
---|
| 346 | + bool free_set = false; |
---|
| 347 | + |
---|
| 348 | + if (!zone->compact_blockskip_flush) |
---|
| 349 | + return; |
---|
250 | 350 | |
---|
251 | 351 | zone->compact_blockskip_flush = false; |
---|
252 | 352 | |
---|
253 | | - /* Walk the zone and mark every pageblock as suitable for isolation */ |
---|
254 | | - for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
---|
255 | | - struct page *page; |
---|
256 | | - |
---|
| 353 | + /* |
---|
| 354 | + * Walk the zone and update pageblock skip information. Source looks |
---|
| 355 | + * for PageLRU while target looks for PageBuddy. When the scanner |
---|
| 356 | + * is found, both PageBuddy and PageLRU are checked as the pageblock |
---|
| 357 | + * is suitable as both source and target. |
---|
| 358 | + */ |
---|
| 359 | + for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, |
---|
| 360 | + free_pfn -= pageblock_nr_pages) { |
---|
257 | 361 | cond_resched(); |
---|
258 | 362 | |
---|
259 | | - page = pfn_to_online_page(pfn); |
---|
260 | | - if (!page) |
---|
261 | | - continue; |
---|
262 | | - if (zone != page_zone(page)) |
---|
263 | | - continue; |
---|
264 | | - if (pageblock_skip_persistent(page)) |
---|
265 | | - continue; |
---|
| 363 | + /* Update the migrate PFN */ |
---|
| 364 | + if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && |
---|
| 365 | + migrate_pfn < reset_migrate) { |
---|
| 366 | + source_set = true; |
---|
| 367 | + reset_migrate = migrate_pfn; |
---|
| 368 | + zone->compact_init_migrate_pfn = reset_migrate; |
---|
| 369 | + zone->compact_cached_migrate_pfn[0] = reset_migrate; |
---|
| 370 | + zone->compact_cached_migrate_pfn[1] = reset_migrate; |
---|
| 371 | + } |
---|
266 | 372 | |
---|
267 | | - clear_pageblock_skip(page); |
---|
| 373 | + /* Update the free PFN */ |
---|
| 374 | + if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && |
---|
| 375 | + free_pfn > reset_free) { |
---|
| 376 | + free_set = true; |
---|
| 377 | + reset_free = free_pfn; |
---|
| 378 | + zone->compact_init_free_pfn = reset_free; |
---|
| 379 | + zone->compact_cached_free_pfn = reset_free; |
---|
| 380 | + } |
---|
268 | 381 | } |
---|
269 | 382 | |
---|
270 | | - reset_cached_positions(zone); |
---|
| 383 | + /* Leave no distance if no suitable block was reset */ |
---|
| 384 | + if (reset_migrate >= reset_free) { |
---|
| 385 | + zone->compact_cached_migrate_pfn[0] = migrate_pfn; |
---|
| 386 | + zone->compact_cached_migrate_pfn[1] = migrate_pfn; |
---|
| 387 | + zone->compact_cached_free_pfn = free_pfn; |
---|
| 388 | + } |
---|
271 | 389 | } |
---|
272 | 390 | |
---|
273 | 391 | void reset_isolation_suitable(pg_data_t *pgdat) |
---|
.. | .. |
---|
286 | 404 | } |
---|
287 | 405 | |
---|
288 | 406 | /* |
---|
| 407 | + * Sets the pageblock skip bit if it was clear. Note that this is a hint as |
---|
| 408 | + * locks are not required for read/writers. Returns true if it was already set. |
---|
| 409 | + */ |
---|
| 410 | +static bool test_and_set_skip(struct compact_control *cc, struct page *page, |
---|
| 411 | + unsigned long pfn) |
---|
| 412 | +{ |
---|
| 413 | + bool skip; |
---|
| 414 | + |
---|
| 415 | + /* Do no update if skip hint is being ignored */ |
---|
| 416 | + if (cc->ignore_skip_hint) |
---|
| 417 | + return false; |
---|
| 418 | + |
---|
| 419 | + if (!IS_ALIGNED(pfn, pageblock_nr_pages)) |
---|
| 420 | + return false; |
---|
| 421 | + |
---|
| 422 | + skip = get_pageblock_skip(page); |
---|
| 423 | + if (!skip && !cc->no_set_skip_hint) |
---|
| 424 | + set_pageblock_skip(page); |
---|
| 425 | + |
---|
| 426 | + return skip; |
---|
| 427 | +} |
---|
| 428 | + |
---|
| 429 | +static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) |
---|
| 430 | +{ |
---|
| 431 | + struct zone *zone = cc->zone; |
---|
| 432 | + |
---|
| 433 | + pfn = pageblock_end_pfn(pfn); |
---|
| 434 | + |
---|
| 435 | + /* Set for isolation rather than compaction */ |
---|
| 436 | + if (cc->no_set_skip_hint) |
---|
| 437 | + return; |
---|
| 438 | + |
---|
| 439 | + if (pfn > zone->compact_cached_migrate_pfn[0]) |
---|
| 440 | + zone->compact_cached_migrate_pfn[0] = pfn; |
---|
| 441 | + if (cc->mode != MIGRATE_ASYNC && |
---|
| 442 | + pfn > zone->compact_cached_migrate_pfn[1]) |
---|
| 443 | + zone->compact_cached_migrate_pfn[1] = pfn; |
---|
| 444 | +} |
---|
| 445 | + |
---|
| 446 | +/* |
---|
289 | 447 | * If no pages were isolated then mark this pageblock to be skipped in the |
---|
290 | 448 | * future. The information is later cleared by __reset_isolation_suitable(). |
---|
291 | 449 | */ |
---|
292 | 450 | static void update_pageblock_skip(struct compact_control *cc, |
---|
293 | | - struct page *page, unsigned long nr_isolated, |
---|
294 | | - bool migrate_scanner) |
---|
| 451 | + struct page *page, unsigned long pfn) |
---|
295 | 452 | { |
---|
296 | 453 | struct zone *zone = cc->zone; |
---|
297 | | - unsigned long pfn; |
---|
298 | 454 | |
---|
299 | 455 | if (cc->no_set_skip_hint) |
---|
300 | 456 | return; |
---|
.. | .. |
---|
302 | 458 | if (!page) |
---|
303 | 459 | return; |
---|
304 | 460 | |
---|
305 | | - if (nr_isolated) |
---|
306 | | - return; |
---|
307 | | - |
---|
308 | 461 | set_pageblock_skip(page); |
---|
309 | 462 | |
---|
310 | | - pfn = page_to_pfn(page); |
---|
311 | | - |
---|
312 | 463 | /* Update where async and sync compaction should restart */ |
---|
313 | | - if (migrate_scanner) { |
---|
314 | | - if (pfn > zone->compact_cached_migrate_pfn[0]) |
---|
315 | | - zone->compact_cached_migrate_pfn[0] = pfn; |
---|
316 | | - if (cc->mode != MIGRATE_ASYNC && |
---|
317 | | - pfn > zone->compact_cached_migrate_pfn[1]) |
---|
318 | | - zone->compact_cached_migrate_pfn[1] = pfn; |
---|
319 | | - } else { |
---|
320 | | - if (pfn < zone->compact_cached_free_pfn) |
---|
321 | | - zone->compact_cached_free_pfn = pfn; |
---|
322 | | - } |
---|
| 464 | + if (pfn < zone->compact_cached_free_pfn) |
---|
| 465 | + zone->compact_cached_free_pfn = pfn; |
---|
323 | 466 | } |
---|
324 | 467 | #else |
---|
325 | 468 | static inline bool isolation_suitable(struct compact_control *cc, |
---|
.. | .. |
---|
334 | 477 | } |
---|
335 | 478 | |
---|
336 | 479 | static inline void update_pageblock_skip(struct compact_control *cc, |
---|
337 | | - struct page *page, unsigned long nr_isolated, |
---|
338 | | - bool migrate_scanner) |
---|
| 480 | + struct page *page, unsigned long pfn) |
---|
339 | 481 | { |
---|
| 482 | +} |
---|
| 483 | + |
---|
| 484 | +static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) |
---|
| 485 | +{ |
---|
| 486 | +} |
---|
| 487 | + |
---|
| 488 | +static bool test_and_set_skip(struct compact_control *cc, struct page *page, |
---|
| 489 | + unsigned long pfn) |
---|
| 490 | +{ |
---|
| 491 | + return false; |
---|
340 | 492 | } |
---|
341 | 493 | #endif /* CONFIG_COMPACTION */ |
---|
342 | 494 | |
---|
343 | 495 | /* |
---|
344 | 496 | * Compaction requires the taking of some coarse locks that are potentially |
---|
345 | | - * very heavily contended. For async compaction, back out if the lock cannot |
---|
346 | | - * be taken immediately. For sync compaction, spin on the lock if needed. |
---|
| 497 | + * very heavily contended. For async compaction, trylock and record if the |
---|
| 498 | + * lock is contended. The lock will still be acquired but compaction will |
---|
| 499 | + * abort when the current block is finished regardless of success rate. |
---|
| 500 | + * Sync compaction acquires the lock. |
---|
347 | 501 | * |
---|
348 | | - * Returns true if the lock is held |
---|
349 | | - * Returns false if the lock is not held and compaction should abort |
---|
| 502 | + * Always returns true which makes it easier to track lock state in callers. |
---|
350 | 503 | */ |
---|
351 | | -static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, |
---|
| 504 | +static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, |
---|
352 | 505 | struct compact_control *cc) |
---|
| 506 | + __acquires(lock) |
---|
353 | 507 | { |
---|
354 | | - if (cc->mode == MIGRATE_ASYNC) { |
---|
355 | | - if (!spin_trylock_irqsave(lock, *flags)) { |
---|
356 | | - cc->contended = true; |
---|
357 | | - return false; |
---|
358 | | - } |
---|
359 | | - } else { |
---|
360 | | - spin_lock_irqsave(lock, *flags); |
---|
| 508 | + /* Track if the lock is contended in async mode */ |
---|
| 509 | + if (cc->mode == MIGRATE_ASYNC && !cc->contended) { |
---|
| 510 | + if (spin_trylock_irqsave(lock, *flags)) |
---|
| 511 | + return true; |
---|
| 512 | + |
---|
| 513 | + cc->contended = true; |
---|
361 | 514 | } |
---|
362 | 515 | |
---|
| 516 | + spin_lock_irqsave(lock, *flags); |
---|
363 | 517 | return true; |
---|
364 | 518 | } |
---|
365 | 519 | |
---|
.. | .. |
---|
391 | 545 | return true; |
---|
392 | 546 | } |
---|
393 | 547 | |
---|
394 | | - if (need_resched()) { |
---|
395 | | - if (cc->mode == MIGRATE_ASYNC) { |
---|
396 | | - cc->contended = true; |
---|
397 | | - return true; |
---|
398 | | - } |
---|
399 | | - cond_resched(); |
---|
400 | | - } |
---|
401 | | - |
---|
402 | | - return false; |
---|
403 | | -} |
---|
404 | | - |
---|
405 | | -/* |
---|
406 | | - * Aside from avoiding lock contention, compaction also periodically checks |
---|
407 | | - * need_resched() and either schedules in sync compaction or aborts async |
---|
408 | | - * compaction. This is similar to what compact_unlock_should_abort() does, but |
---|
409 | | - * is used where no lock is concerned. |
---|
410 | | - * |
---|
411 | | - * Returns false when no scheduling was needed, or sync compaction scheduled. |
---|
412 | | - * Returns true when async compaction should abort. |
---|
413 | | - */ |
---|
414 | | -static inline bool compact_should_abort(struct compact_control *cc) |
---|
415 | | -{ |
---|
416 | | - /* async compaction aborts if contended */ |
---|
417 | | - if (need_resched()) { |
---|
418 | | - if (cc->mode == MIGRATE_ASYNC) { |
---|
419 | | - cc->contended = true; |
---|
420 | | - return true; |
---|
421 | | - } |
---|
422 | | - |
---|
423 | | - cond_resched(); |
---|
424 | | - } |
---|
| 548 | + cond_resched(); |
---|
425 | 549 | |
---|
426 | 550 | return false; |
---|
427 | 551 | } |
---|
.. | .. |
---|
435 | 559 | unsigned long *start_pfn, |
---|
436 | 560 | unsigned long end_pfn, |
---|
437 | 561 | struct list_head *freelist, |
---|
| 562 | + unsigned int stride, |
---|
438 | 563 | bool strict) |
---|
439 | 564 | { |
---|
440 | 565 | int nr_scanned = 0, total_isolated = 0; |
---|
441 | | - struct page *cursor, *valid_page = NULL; |
---|
| 566 | + struct page *cursor; |
---|
442 | 567 | unsigned long flags = 0; |
---|
443 | 568 | bool locked = false; |
---|
444 | 569 | unsigned long blockpfn = *start_pfn; |
---|
445 | 570 | unsigned int order; |
---|
446 | 571 | |
---|
| 572 | + /* Strict mode is for isolation, speed is secondary */ |
---|
| 573 | + if (strict) |
---|
| 574 | + stride = 1; |
---|
| 575 | + |
---|
447 | 576 | cursor = pfn_to_page(blockpfn); |
---|
448 | 577 | |
---|
449 | 578 | /* Isolate free pages. */ |
---|
450 | | - for (; blockpfn < end_pfn; blockpfn++, cursor++) { |
---|
| 579 | + for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { |
---|
451 | 580 | int isolated; |
---|
452 | 581 | struct page *page = cursor; |
---|
453 | 582 | |
---|
.. | .. |
---|
464 | 593 | nr_scanned++; |
---|
465 | 594 | if (!pfn_valid_within(blockpfn)) |
---|
466 | 595 | goto isolate_fail; |
---|
467 | | - |
---|
468 | | - if (!valid_page) |
---|
469 | | - valid_page = page; |
---|
470 | 596 | |
---|
471 | 597 | /* |
---|
472 | 598 | * For compound pages such as THP and hugetlbfs, we can save |
---|
.. | .. |
---|
495 | 621 | * recheck as well. |
---|
496 | 622 | */ |
---|
497 | 623 | if (!locked) { |
---|
498 | | - /* |
---|
499 | | - * The zone lock must be held to isolate freepages. |
---|
500 | | - * Unfortunately this is a very coarse lock and can be |
---|
501 | | - * heavily contended if there are parallel allocations |
---|
502 | | - * or parallel compactions. For async compaction do not |
---|
503 | | - * spin on the lock and we acquire the lock as late as |
---|
504 | | - * possible. |
---|
505 | | - */ |
---|
506 | | - locked = compact_trylock_irqsave(&cc->zone->lock, |
---|
| 624 | + locked = compact_lock_irqsave(&cc->zone->lock, |
---|
507 | 625 | &flags, cc); |
---|
508 | | - if (!locked) |
---|
509 | | - break; |
---|
510 | 626 | |
---|
511 | 627 | /* Recheck this is a buddy page under lock */ |
---|
512 | 628 | if (!PageBuddy(page)) |
---|
.. | .. |
---|
514 | 630 | } |
---|
515 | 631 | |
---|
516 | 632 | /* Found a free page, will break it into order-0 pages */ |
---|
517 | | - order = page_order(page); |
---|
| 633 | + order = buddy_order(page); |
---|
518 | 634 | isolated = __isolate_free_page(page, order); |
---|
519 | 635 | if (!isolated) |
---|
520 | 636 | break; |
---|
.. | .. |
---|
564 | 680 | */ |
---|
565 | 681 | if (strict && blockpfn < end_pfn) |
---|
566 | 682 | total_isolated = 0; |
---|
567 | | - |
---|
568 | | - /* Update the pageblock-skip if the whole pageblock was scanned */ |
---|
569 | | - if (blockpfn == end_pfn) |
---|
570 | | - update_pageblock_skip(cc, valid_page, total_isolated, false); |
---|
571 | 683 | |
---|
572 | 684 | cc->total_free_scanned += nr_scanned; |
---|
573 | 685 | if (total_isolated) |
---|
.. | .. |
---|
626 | 738 | break; |
---|
627 | 739 | |
---|
628 | 740 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
---|
629 | | - block_end_pfn, &freelist, true); |
---|
| 741 | + block_end_pfn, &freelist, 0, true); |
---|
630 | 742 | |
---|
631 | 743 | /* |
---|
632 | 744 | * In strict mode, isolate_freepages_block() returns 0 if |
---|
.. | .. |
---|
644 | 756 | } |
---|
645 | 757 | |
---|
646 | 758 | /* __isolate_free_page() does not map the pages */ |
---|
647 | | - map_pages(&freelist); |
---|
| 759 | + split_map_pages(&freelist); |
---|
648 | 760 | |
---|
649 | 761 | if (pfn < end_pfn) { |
---|
650 | 762 | /* Loop terminated early, cleanup. */ |
---|
.. | .. |
---|
656 | 768 | return pfn; |
---|
657 | 769 | } |
---|
658 | 770 | |
---|
| 771 | +#ifdef CONFIG_COMPACTION |
---|
| 772 | +unsigned long isolate_and_split_free_page(struct page *page, |
---|
| 773 | + struct list_head *list) |
---|
| 774 | +{ |
---|
| 775 | + unsigned long isolated; |
---|
| 776 | + unsigned int order; |
---|
| 777 | + |
---|
| 778 | + if (!PageBuddy(page)) |
---|
| 779 | + return 0; |
---|
| 780 | + |
---|
| 781 | + order = buddy_order(page); |
---|
| 782 | + isolated = __isolate_free_page(page, order); |
---|
| 783 | + if (!isolated) |
---|
| 784 | + return 0; |
---|
| 785 | + |
---|
| 786 | + set_page_private(page, order); |
---|
| 787 | + list_add(&page->lru, list); |
---|
| 788 | + |
---|
| 789 | + split_map_pages(list); |
---|
| 790 | + |
---|
| 791 | + return isolated; |
---|
| 792 | +} |
---|
| 793 | +EXPORT_SYMBOL_GPL(isolate_and_split_free_page); |
---|
| 794 | +#endif |
---|
| 795 | + |
---|
659 | 796 | /* Similar to reclaim, but different enough that they don't share logic */ |
---|
660 | | -static bool too_many_isolated(struct zone *zone) |
---|
| 797 | +static bool too_many_isolated(pg_data_t *pgdat) |
---|
661 | 798 | { |
---|
662 | 799 | unsigned long active, inactive, isolated; |
---|
663 | 800 | |
---|
664 | | - inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + |
---|
665 | | - node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); |
---|
666 | | - active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + |
---|
667 | | - node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); |
---|
668 | | - isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + |
---|
669 | | - node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); |
---|
| 801 | + inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + |
---|
| 802 | + node_page_state(pgdat, NR_INACTIVE_ANON); |
---|
| 803 | + active = node_page_state(pgdat, NR_ACTIVE_FILE) + |
---|
| 804 | + node_page_state(pgdat, NR_ACTIVE_ANON); |
---|
| 805 | + isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + |
---|
| 806 | + node_page_state(pgdat, NR_ISOLATED_ANON); |
---|
670 | 807 | |
---|
671 | 808 | return isolated > (inactive + active) / 2; |
---|
672 | 809 | } |
---|
.. | .. |
---|
693 | 830 | isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
---|
694 | 831 | unsigned long end_pfn, isolate_mode_t isolate_mode) |
---|
695 | 832 | { |
---|
696 | | - struct zone *zone = cc->zone; |
---|
| 833 | + pg_data_t *pgdat = cc->zone->zone_pgdat; |
---|
697 | 834 | unsigned long nr_scanned = 0, nr_isolated = 0; |
---|
698 | 835 | struct lruvec *lruvec; |
---|
699 | 836 | unsigned long flags = 0; |
---|
.. | .. |
---|
702 | 839 | unsigned long start_pfn = low_pfn; |
---|
703 | 840 | bool skip_on_failure = false; |
---|
704 | 841 | unsigned long next_skip_pfn = 0; |
---|
| 842 | + bool skip_updated = false; |
---|
705 | 843 | |
---|
706 | 844 | /* |
---|
707 | 845 | * Ensure that there are not too many pages isolated from the LRU |
---|
708 | 846 | * list by either parallel reclaimers or compaction. If there are, |
---|
709 | 847 | * delay for some time until fewer pages are isolated |
---|
710 | 848 | */ |
---|
711 | | - while (unlikely(too_many_isolated(zone))) { |
---|
| 849 | + while (unlikely(too_many_isolated(pgdat))) { |
---|
| 850 | + /* stop isolation if there are still pages not migrated */ |
---|
| 851 | + if (cc->nr_migratepages) |
---|
| 852 | + return 0; |
---|
| 853 | + |
---|
712 | 854 | /* async migration should just abort */ |
---|
713 | 855 | if (cc->mode == MIGRATE_ASYNC) |
---|
714 | 856 | return 0; |
---|
.. | .. |
---|
719 | 861 | return 0; |
---|
720 | 862 | } |
---|
721 | 863 | |
---|
722 | | - if (compact_should_abort(cc)) |
---|
723 | | - return 0; |
---|
| 864 | + cond_resched(); |
---|
724 | 865 | |
---|
725 | 866 | if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { |
---|
726 | 867 | skip_on_failure = true; |
---|
.. | .. |
---|
754 | 895 | |
---|
755 | 896 | /* |
---|
756 | 897 | * Periodically drop the lock (if held) regardless of its |
---|
757 | | - * contention, to give chance to IRQs. Abort async compaction |
---|
758 | | - * if contended. |
---|
| 898 | + * contention, to give chance to IRQs. Abort completely if |
---|
| 899 | + * a fatal signal is pending. |
---|
759 | 900 | */ |
---|
760 | 901 | if (!(low_pfn % SWAP_CLUSTER_MAX) |
---|
761 | | - && compact_unlock_should_abort(zone_lru_lock(zone), flags, |
---|
762 | | - &locked, cc)) |
---|
763 | | - break; |
---|
| 902 | + && compact_unlock_should_abort(&pgdat->lru_lock, |
---|
| 903 | + flags, &locked, cc)) { |
---|
| 904 | + low_pfn = 0; |
---|
| 905 | + goto fatal_pending; |
---|
| 906 | + } |
---|
764 | 907 | |
---|
765 | 908 | if (!pfn_valid_within(low_pfn)) |
---|
766 | 909 | goto isolate_fail; |
---|
.. | .. |
---|
768 | 911 | |
---|
769 | 912 | page = pfn_to_page(low_pfn); |
---|
770 | 913 | |
---|
771 | | - if (!valid_page) |
---|
| 914 | + /* |
---|
| 915 | + * Check if the pageblock has already been marked skipped. |
---|
| 916 | + * Only the aligned PFN is checked as the caller isolates |
---|
| 917 | + * COMPACT_CLUSTER_MAX at a time so the second call must |
---|
| 918 | + * not falsely conclude that the block should be skipped. |
---|
| 919 | + */ |
---|
| 920 | + if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { |
---|
| 921 | + if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { |
---|
| 922 | + low_pfn = end_pfn; |
---|
| 923 | + goto isolate_abort; |
---|
| 924 | + } |
---|
772 | 925 | valid_page = page; |
---|
| 926 | + } |
---|
773 | 927 | |
---|
774 | 928 | /* |
---|
775 | 929 | * Skip if free. We read page order here without zone lock |
---|
.. | .. |
---|
778 | 932 | * potential isolation targets. |
---|
779 | 933 | */ |
---|
780 | 934 | if (PageBuddy(page)) { |
---|
781 | | - unsigned long freepage_order = page_order_unsafe(page); |
---|
| 935 | + unsigned long freepage_order = buddy_order_unsafe(page); |
---|
782 | 936 | |
---|
783 | 937 | /* |
---|
784 | 938 | * Without lock, we cannot be sure that what we got is |
---|
.. | .. |
---|
792 | 946 | |
---|
793 | 947 | /* |
---|
794 | 948 | * Regardless of being on LRU, compound pages such as THP and |
---|
795 | | - * hugetlbfs are not to be compacted. We can potentially save |
---|
796 | | - * a lot of iterations if we skip them at once. The check is |
---|
797 | | - * racy, but we can consider only valid values and the only |
---|
798 | | - * danger is skipping too much. |
---|
| 949 | + * hugetlbfs are not to be compacted unless we are attempting |
---|
| 950 | + * an allocation much larger than the huge page size (eg CMA). |
---|
| 951 | + * We can potentially save a lot of iterations if we skip them |
---|
| 952 | + * at once. The check is racy, but we can consider only valid |
---|
| 953 | + * values and the only danger is skipping too much. |
---|
799 | 954 | */ |
---|
800 | | - if (PageCompound(page)) { |
---|
| 955 | + if (PageCompound(page) && !cc->alloc_contig) { |
---|
801 | 956 | const unsigned int order = compound_order(page); |
---|
802 | 957 | |
---|
803 | 958 | if (likely(order < MAX_ORDER)) |
---|
.. | .. |
---|
818 | 973 | if (unlikely(__PageMovable(page)) && |
---|
819 | 974 | !PageIsolated(page)) { |
---|
820 | 975 | if (locked) { |
---|
821 | | - spin_unlock_irqrestore(zone_lru_lock(zone), |
---|
| 976 | + spin_unlock_irqrestore(&pgdat->lru_lock, |
---|
822 | 977 | flags); |
---|
823 | 978 | locked = false; |
---|
824 | 979 | } |
---|
.. | .. |
---|
848 | 1003 | |
---|
849 | 1004 | /* If we already hold the lock, we can skip some rechecking */ |
---|
850 | 1005 | if (!locked) { |
---|
851 | | - locked = compact_trylock_irqsave(zone_lru_lock(zone), |
---|
| 1006 | + locked = compact_lock_irqsave(&pgdat->lru_lock, |
---|
852 | 1007 | &flags, cc); |
---|
853 | | - if (!locked) |
---|
854 | | - break; |
---|
| 1008 | + |
---|
| 1009 | + /* Try get exclusive access under lock */ |
---|
| 1010 | + if (!skip_updated) { |
---|
| 1011 | + skip_updated = true; |
---|
| 1012 | + if (test_and_set_skip(cc, page, low_pfn)) |
---|
| 1013 | + goto isolate_abort; |
---|
| 1014 | + } |
---|
855 | 1015 | |
---|
856 | 1016 | /* Recheck PageLRU and PageCompound under lock */ |
---|
857 | 1017 | if (!PageLRU(page)) |
---|
.. | .. |
---|
862 | 1022 | * and it's on LRU. It can only be a THP so the order |
---|
863 | 1023 | * is safe to read and it's 0 for tail pages. |
---|
864 | 1024 | */ |
---|
865 | | - if (unlikely(PageCompound(page))) { |
---|
866 | | - low_pfn += (1UL << compound_order(page)) - 1; |
---|
| 1025 | + if (unlikely(PageCompound(page) && !cc->alloc_contig)) { |
---|
| 1026 | + low_pfn += compound_nr(page) - 1; |
---|
867 | 1027 | goto isolate_fail; |
---|
868 | 1028 | } |
---|
869 | 1029 | } |
---|
870 | 1030 | |
---|
871 | | - lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); |
---|
| 1031 | + lruvec = mem_cgroup_page_lruvec(page, pgdat); |
---|
872 | 1032 | |
---|
873 | 1033 | /* Try isolate the page */ |
---|
874 | 1034 | if (__isolate_lru_page(page, isolate_mode) != 0) |
---|
875 | 1035 | goto isolate_fail; |
---|
876 | 1036 | |
---|
877 | | - VM_BUG_ON_PAGE(PageCompound(page), page); |
---|
| 1037 | + /* The whole page is taken off the LRU; skip the tail pages. */ |
---|
| 1038 | + if (PageCompound(page)) |
---|
| 1039 | + low_pfn += compound_nr(page) - 1; |
---|
878 | 1040 | |
---|
879 | 1041 | /* Successfully isolated */ |
---|
880 | 1042 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
---|
881 | | - inc_node_page_state(page, |
---|
882 | | - NR_ISOLATED_ANON + page_is_file_cache(page)); |
---|
| 1043 | + mod_node_page_state(page_pgdat(page), |
---|
| 1044 | + NR_ISOLATED_ANON + page_is_file_lru(page), |
---|
| 1045 | + thp_nr_pages(page)); |
---|
883 | 1046 | |
---|
884 | 1047 | isolate_success: |
---|
885 | 1048 | list_add(&page->lru, &cc->migratepages); |
---|
886 | | - cc->nr_migratepages++; |
---|
887 | | - nr_isolated++; |
---|
| 1049 | + cc->nr_migratepages += compound_nr(page); |
---|
| 1050 | + nr_isolated += compound_nr(page); |
---|
888 | 1051 | |
---|
889 | 1052 | /* |
---|
890 | | - * Record where we could have freed pages by migration and not |
---|
891 | | - * yet flushed them to buddy allocator. |
---|
892 | | - * - this is the lowest page that was isolated and likely be |
---|
893 | | - * then freed by migration. |
---|
| 1053 | + * Avoid isolating too much unless this block is being |
---|
| 1054 | + * rescanned (e.g. dirty/writeback pages, parallel allocation) |
---|
| 1055 | + * or a lock is contended. For contention, isolate quickly to |
---|
| 1056 | + * potentially remove one source of contention. |
---|
894 | 1057 | */ |
---|
895 | | - if (!cc->last_migrated_pfn) |
---|
896 | | - cc->last_migrated_pfn = low_pfn; |
---|
897 | | - |
---|
898 | | - /* Avoid isolating too much */ |
---|
899 | | - if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { |
---|
| 1058 | + if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && |
---|
| 1059 | + !cc->rescan && !cc->contended) { |
---|
900 | 1060 | ++low_pfn; |
---|
901 | 1061 | break; |
---|
902 | 1062 | } |
---|
.. | .. |
---|
913 | 1073 | */ |
---|
914 | 1074 | if (nr_isolated) { |
---|
915 | 1075 | if (locked) { |
---|
916 | | - spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
---|
| 1076 | + spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
---|
917 | 1077 | locked = false; |
---|
918 | 1078 | } |
---|
919 | 1079 | putback_movable_pages(&cc->migratepages); |
---|
920 | 1080 | cc->nr_migratepages = 0; |
---|
921 | | - cc->last_migrated_pfn = 0; |
---|
922 | 1081 | nr_isolated = 0; |
---|
923 | 1082 | } |
---|
924 | 1083 | |
---|
.. | .. |
---|
939 | 1098 | if (unlikely(low_pfn > end_pfn)) |
---|
940 | 1099 | low_pfn = end_pfn; |
---|
941 | 1100 | |
---|
| 1101 | +isolate_abort: |
---|
942 | 1102 | if (locked) |
---|
943 | | - spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
---|
| 1103 | + spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
---|
944 | 1104 | |
---|
945 | 1105 | /* |
---|
946 | | - * Update the pageblock-skip information and cached scanner pfn, |
---|
947 | | - * if the whole pageblock was scanned without isolating any page. |
---|
| 1106 | + * Updated the cached scanner pfn once the pageblock has been scanned |
---|
| 1107 | + * Pages will either be migrated in which case there is no point |
---|
| 1108 | + * scanning in the near future or migration failed in which case the |
---|
| 1109 | + * failure reason may persist. The block is marked for skipping if |
---|
| 1110 | + * there were no pages isolated in the block or if the block is |
---|
| 1111 | + * rescanned twice in a row. |
---|
948 | 1112 | */ |
---|
949 | | - if (low_pfn == end_pfn) |
---|
950 | | - update_pageblock_skip(cc, valid_page, nr_isolated, true); |
---|
| 1113 | + if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { |
---|
| 1114 | + if (valid_page && !skip_updated) |
---|
| 1115 | + set_pageblock_skip(valid_page); |
---|
| 1116 | + update_cached_migrate(cc, low_pfn); |
---|
| 1117 | + } |
---|
951 | 1118 | |
---|
952 | 1119 | trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, |
---|
953 | 1120 | nr_scanned, nr_isolated); |
---|
954 | 1121 | |
---|
| 1122 | +fatal_pending: |
---|
955 | 1123 | cc->total_migrate_scanned += nr_scanned; |
---|
956 | 1124 | if (nr_isolated) |
---|
957 | 1125 | count_compact_events(COMPACTISOLATED, nr_isolated); |
---|
.. | .. |
---|
998 | 1166 | if (!pfn) |
---|
999 | 1167 | break; |
---|
1000 | 1168 | |
---|
1001 | | - if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
---|
| 1169 | + if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) |
---|
1002 | 1170 | break; |
---|
1003 | 1171 | } |
---|
1004 | 1172 | |
---|
.. | .. |
---|
1012 | 1180 | struct page *page) |
---|
1013 | 1181 | { |
---|
1014 | 1182 | int block_mt; |
---|
| 1183 | + |
---|
| 1184 | + if (pageblock_skip_persistent(page)) |
---|
| 1185 | + return false; |
---|
1015 | 1186 | |
---|
1016 | 1187 | if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) |
---|
1017 | 1188 | return true; |
---|
.. | .. |
---|
1035 | 1206 | * the only small danger is that we skip a potentially suitable |
---|
1036 | 1207 | * pageblock, so it's not worth to check order for valid range. |
---|
1037 | 1208 | */ |
---|
1038 | | - if (page_order_unsafe(page) >= pageblock_order) |
---|
| 1209 | + if (buddy_order_unsafe(page) >= pageblock_order) |
---|
1039 | 1210 | return false; |
---|
1040 | 1211 | } |
---|
1041 | 1212 | |
---|
.. | .. |
---|
1050 | 1221 | return false; |
---|
1051 | 1222 | } |
---|
1052 | 1223 | |
---|
| 1224 | +static inline unsigned int |
---|
| 1225 | +freelist_scan_limit(struct compact_control *cc) |
---|
| 1226 | +{ |
---|
| 1227 | + unsigned short shift = BITS_PER_LONG - 1; |
---|
| 1228 | + |
---|
| 1229 | + return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; |
---|
| 1230 | +} |
---|
| 1231 | + |
---|
1053 | 1232 | /* |
---|
1054 | 1233 | * Test whether the free scanner has reached the same or lower pageblock than |
---|
1055 | 1234 | * the migration scanner, and compaction should thus terminate. |
---|
.. | .. |
---|
1058 | 1237 | { |
---|
1059 | 1238 | return (cc->free_pfn >> pageblock_order) |
---|
1060 | 1239 | <= (cc->migrate_pfn >> pageblock_order); |
---|
| 1240 | +} |
---|
| 1241 | + |
---|
| 1242 | +/* |
---|
| 1243 | + * Used when scanning for a suitable migration target which scans freelists |
---|
| 1244 | + * in reverse. Reorders the list such as the unscanned pages are scanned |
---|
| 1245 | + * first on the next iteration of the free scanner |
---|
| 1246 | + */ |
---|
| 1247 | +static void |
---|
| 1248 | +move_freelist_head(struct list_head *freelist, struct page *freepage) |
---|
| 1249 | +{ |
---|
| 1250 | + LIST_HEAD(sublist); |
---|
| 1251 | + |
---|
| 1252 | + if (!list_is_last(freelist, &freepage->lru)) { |
---|
| 1253 | + list_cut_before(&sublist, freelist, &freepage->lru); |
---|
| 1254 | + if (!list_empty(&sublist)) |
---|
| 1255 | + list_splice_tail(&sublist, freelist); |
---|
| 1256 | + } |
---|
| 1257 | +} |
---|
| 1258 | + |
---|
| 1259 | +/* |
---|
| 1260 | + * Similar to move_freelist_head except used by the migration scanner |
---|
| 1261 | + * when scanning forward. It's possible for these list operations to |
---|
| 1262 | + * move against each other if they search the free list exactly in |
---|
| 1263 | + * lockstep. |
---|
| 1264 | + */ |
---|
| 1265 | +static void |
---|
| 1266 | +move_freelist_tail(struct list_head *freelist, struct page *freepage) |
---|
| 1267 | +{ |
---|
| 1268 | + LIST_HEAD(sublist); |
---|
| 1269 | + |
---|
| 1270 | + if (!list_is_first(freelist, &freepage->lru)) { |
---|
| 1271 | + list_cut_position(&sublist, freelist, &freepage->lru); |
---|
| 1272 | + if (!list_empty(&sublist)) |
---|
| 1273 | + list_splice_tail(&sublist, freelist); |
---|
| 1274 | + } |
---|
| 1275 | +} |
---|
| 1276 | + |
---|
| 1277 | +static void |
---|
| 1278 | +fast_isolate_around(struct compact_control *cc, unsigned long pfn) |
---|
| 1279 | +{ |
---|
| 1280 | + unsigned long start_pfn, end_pfn; |
---|
| 1281 | + struct page *page; |
---|
| 1282 | + |
---|
| 1283 | + /* Do not search around if there are enough pages already */ |
---|
| 1284 | + if (cc->nr_freepages >= cc->nr_migratepages) |
---|
| 1285 | + return; |
---|
| 1286 | + |
---|
| 1287 | + /* Minimise scanning during async compaction */ |
---|
| 1288 | + if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) |
---|
| 1289 | + return; |
---|
| 1290 | + |
---|
| 1291 | + /* Pageblock boundaries */ |
---|
| 1292 | + start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); |
---|
| 1293 | + end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); |
---|
| 1294 | + |
---|
| 1295 | + page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); |
---|
| 1296 | + if (!page) |
---|
| 1297 | + return; |
---|
| 1298 | + |
---|
| 1299 | + isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); |
---|
| 1300 | + |
---|
| 1301 | + /* Skip this pageblock in the future as it's full or nearly full */ |
---|
| 1302 | + if (cc->nr_freepages < cc->nr_migratepages) |
---|
| 1303 | + set_pageblock_skip(page); |
---|
| 1304 | + |
---|
| 1305 | + return; |
---|
| 1306 | +} |
---|
| 1307 | + |
---|
| 1308 | +/* Search orders in round-robin fashion */ |
---|
| 1309 | +static int next_search_order(struct compact_control *cc, int order) |
---|
| 1310 | +{ |
---|
| 1311 | + order--; |
---|
| 1312 | + if (order < 0) |
---|
| 1313 | + order = cc->order - 1; |
---|
| 1314 | + |
---|
| 1315 | + /* Search wrapped around? */ |
---|
| 1316 | + if (order == cc->search_order) { |
---|
| 1317 | + cc->search_order--; |
---|
| 1318 | + if (cc->search_order < 0) |
---|
| 1319 | + cc->search_order = cc->order - 1; |
---|
| 1320 | + return -1; |
---|
| 1321 | + } |
---|
| 1322 | + |
---|
| 1323 | + return order; |
---|
| 1324 | +} |
---|
| 1325 | + |
---|
| 1326 | +static unsigned long |
---|
| 1327 | +fast_isolate_freepages(struct compact_control *cc) |
---|
| 1328 | +{ |
---|
| 1329 | + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); |
---|
| 1330 | + unsigned int nr_scanned = 0; |
---|
| 1331 | + unsigned long low_pfn, min_pfn, highest = 0; |
---|
| 1332 | + unsigned long nr_isolated = 0; |
---|
| 1333 | + unsigned long distance; |
---|
| 1334 | + struct page *page = NULL; |
---|
| 1335 | + bool scan_start = false; |
---|
| 1336 | + int order; |
---|
| 1337 | + |
---|
| 1338 | + /* Full compaction passes in a negative order */ |
---|
| 1339 | + if (cc->order <= 0) |
---|
| 1340 | + return cc->free_pfn; |
---|
| 1341 | + |
---|
| 1342 | + /* |
---|
| 1343 | + * If starting the scan, use a deeper search and use the highest |
---|
| 1344 | + * PFN found if a suitable one is not found. |
---|
| 1345 | + */ |
---|
| 1346 | + if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { |
---|
| 1347 | + limit = pageblock_nr_pages >> 1; |
---|
| 1348 | + scan_start = true; |
---|
| 1349 | + } |
---|
| 1350 | + |
---|
| 1351 | + /* |
---|
| 1352 | + * Preferred point is in the top quarter of the scan space but take |
---|
| 1353 | + * a pfn from the top half if the search is problematic. |
---|
| 1354 | + */ |
---|
| 1355 | + distance = (cc->free_pfn - cc->migrate_pfn); |
---|
| 1356 | + low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); |
---|
| 1357 | + min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); |
---|
| 1358 | + |
---|
| 1359 | + if (WARN_ON_ONCE(min_pfn > low_pfn)) |
---|
| 1360 | + low_pfn = min_pfn; |
---|
| 1361 | + |
---|
| 1362 | + /* |
---|
| 1363 | + * Search starts from the last successful isolation order or the next |
---|
| 1364 | + * order to search after a previous failure |
---|
| 1365 | + */ |
---|
| 1366 | + cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); |
---|
| 1367 | + |
---|
| 1368 | + for (order = cc->search_order; |
---|
| 1369 | + !page && order >= 0; |
---|
| 1370 | + order = next_search_order(cc, order)) { |
---|
| 1371 | + struct free_area *area = &cc->zone->free_area[order]; |
---|
| 1372 | + struct list_head *freelist; |
---|
| 1373 | + struct page *freepage; |
---|
| 1374 | + unsigned long flags; |
---|
| 1375 | + unsigned int order_scanned = 0; |
---|
| 1376 | + unsigned long high_pfn = 0; |
---|
| 1377 | + |
---|
| 1378 | + if (!area->nr_free) |
---|
| 1379 | + continue; |
---|
| 1380 | + |
---|
| 1381 | + spin_lock_irqsave(&cc->zone->lock, flags); |
---|
| 1382 | + freelist = &area->free_list[MIGRATE_MOVABLE]; |
---|
| 1383 | + list_for_each_entry_reverse(freepage, freelist, lru) { |
---|
| 1384 | + unsigned long pfn; |
---|
| 1385 | + |
---|
| 1386 | + order_scanned++; |
---|
| 1387 | + nr_scanned++; |
---|
| 1388 | + pfn = page_to_pfn(freepage); |
---|
| 1389 | + |
---|
| 1390 | + if (pfn >= highest) |
---|
| 1391 | + highest = max(pageblock_start_pfn(pfn), |
---|
| 1392 | + cc->zone->zone_start_pfn); |
---|
| 1393 | + |
---|
| 1394 | + if (pfn >= low_pfn) { |
---|
| 1395 | + cc->fast_search_fail = 0; |
---|
| 1396 | + cc->search_order = order; |
---|
| 1397 | + page = freepage; |
---|
| 1398 | + break; |
---|
| 1399 | + } |
---|
| 1400 | + |
---|
| 1401 | + if (pfn >= min_pfn && pfn > high_pfn) { |
---|
| 1402 | + high_pfn = pfn; |
---|
| 1403 | + |
---|
| 1404 | + /* Shorten the scan if a candidate is found */ |
---|
| 1405 | + limit >>= 1; |
---|
| 1406 | + } |
---|
| 1407 | + |
---|
| 1408 | + if (order_scanned >= limit) |
---|
| 1409 | + break; |
---|
| 1410 | + } |
---|
| 1411 | + |
---|
| 1412 | + /* Use a minimum pfn if a preferred one was not found */ |
---|
| 1413 | + if (!page && high_pfn) { |
---|
| 1414 | + page = pfn_to_page(high_pfn); |
---|
| 1415 | + |
---|
| 1416 | + /* Update freepage for the list reorder below */ |
---|
| 1417 | + freepage = page; |
---|
| 1418 | + } |
---|
| 1419 | + |
---|
| 1420 | + /* Reorder to so a future search skips recent pages */ |
---|
| 1421 | + move_freelist_head(freelist, freepage); |
---|
| 1422 | + |
---|
| 1423 | + /* Isolate the page if available */ |
---|
| 1424 | + if (page) { |
---|
| 1425 | + if (__isolate_free_page(page, order)) { |
---|
| 1426 | + set_page_private(page, order); |
---|
| 1427 | + nr_isolated = 1 << order; |
---|
| 1428 | + cc->nr_freepages += nr_isolated; |
---|
| 1429 | + list_add_tail(&page->lru, &cc->freepages); |
---|
| 1430 | + count_compact_events(COMPACTISOLATED, nr_isolated); |
---|
| 1431 | + } else { |
---|
| 1432 | + /* If isolation fails, abort the search */ |
---|
| 1433 | + order = cc->search_order + 1; |
---|
| 1434 | + page = NULL; |
---|
| 1435 | + } |
---|
| 1436 | + } |
---|
| 1437 | + |
---|
| 1438 | + spin_unlock_irqrestore(&cc->zone->lock, flags); |
---|
| 1439 | + |
---|
| 1440 | + /* |
---|
| 1441 | + * Smaller scan on next order so the total scan ig related |
---|
| 1442 | + * to freelist_scan_limit. |
---|
| 1443 | + */ |
---|
| 1444 | + if (order_scanned >= limit) |
---|
| 1445 | + limit = min(1U, limit >> 1); |
---|
| 1446 | + } |
---|
| 1447 | + |
---|
| 1448 | + if (!page) { |
---|
| 1449 | + cc->fast_search_fail++; |
---|
| 1450 | + if (scan_start) { |
---|
| 1451 | + /* |
---|
| 1452 | + * Use the highest PFN found above min. If one was |
---|
| 1453 | + * not found, be pessimistic for direct compaction |
---|
| 1454 | + * and use the min mark. |
---|
| 1455 | + */ |
---|
| 1456 | + if (highest) { |
---|
| 1457 | + page = pfn_to_page(highest); |
---|
| 1458 | + cc->free_pfn = highest; |
---|
| 1459 | + } else { |
---|
| 1460 | + if (cc->direct_compaction && pfn_valid(min_pfn)) { |
---|
| 1461 | + page = pageblock_pfn_to_page(min_pfn, |
---|
| 1462 | + min(pageblock_end_pfn(min_pfn), |
---|
| 1463 | + zone_end_pfn(cc->zone)), |
---|
| 1464 | + cc->zone); |
---|
| 1465 | + cc->free_pfn = min_pfn; |
---|
| 1466 | + } |
---|
| 1467 | + } |
---|
| 1468 | + } |
---|
| 1469 | + } |
---|
| 1470 | + |
---|
| 1471 | + if (highest && highest >= cc->zone->compact_cached_free_pfn) { |
---|
| 1472 | + highest -= pageblock_nr_pages; |
---|
| 1473 | + cc->zone->compact_cached_free_pfn = highest; |
---|
| 1474 | + } |
---|
| 1475 | + |
---|
| 1476 | + cc->total_free_scanned += nr_scanned; |
---|
| 1477 | + if (!page) |
---|
| 1478 | + return cc->free_pfn; |
---|
| 1479 | + |
---|
| 1480 | + low_pfn = page_to_pfn(page); |
---|
| 1481 | + fast_isolate_around(cc, low_pfn); |
---|
| 1482 | + return low_pfn; |
---|
1061 | 1483 | } |
---|
1062 | 1484 | |
---|
1063 | 1485 | /* |
---|
.. | .. |
---|
1073 | 1495 | unsigned long block_end_pfn; /* end of current pageblock */ |
---|
1074 | 1496 | unsigned long low_pfn; /* lowest pfn scanner is able to scan */ |
---|
1075 | 1497 | struct list_head *freelist = &cc->freepages; |
---|
| 1498 | + unsigned int stride; |
---|
| 1499 | + |
---|
| 1500 | + /* Try a small search of the free lists for a candidate */ |
---|
| 1501 | + isolate_start_pfn = fast_isolate_freepages(cc); |
---|
| 1502 | + if (cc->nr_freepages) |
---|
| 1503 | + goto splitmap; |
---|
1076 | 1504 | |
---|
1077 | 1505 | /* |
---|
1078 | 1506 | * Initialise the free scanner. The starting point is where we last |
---|
.. | .. |
---|
1081 | 1509 | * this pfn aligned down to the pageblock boundary, because we do |
---|
1082 | 1510 | * block_start_pfn -= pageblock_nr_pages in the for loop. |
---|
1083 | 1511 | * For ending point, take care when isolating in last pageblock of a |
---|
1084 | | - * a zone which ends in the middle of a pageblock. |
---|
| 1512 | + * zone which ends in the middle of a pageblock. |
---|
1085 | 1513 | * The low boundary is the end of the pageblock the migration scanner |
---|
1086 | 1514 | * is using. |
---|
1087 | 1515 | */ |
---|
1088 | 1516 | isolate_start_pfn = cc->free_pfn; |
---|
1089 | | - block_start_pfn = pageblock_start_pfn(cc->free_pfn); |
---|
| 1517 | + block_start_pfn = pageblock_start_pfn(isolate_start_pfn); |
---|
1090 | 1518 | block_end_pfn = min(block_start_pfn + pageblock_nr_pages, |
---|
1091 | 1519 | zone_end_pfn(zone)); |
---|
1092 | 1520 | low_pfn = pageblock_end_pfn(cc->migrate_pfn); |
---|
| 1521 | + stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; |
---|
1093 | 1522 | |
---|
1094 | 1523 | /* |
---|
1095 | 1524 | * Isolate free pages until enough are available to migrate the |
---|
.. | .. |
---|
1100 | 1529 | block_end_pfn = block_start_pfn, |
---|
1101 | 1530 | block_start_pfn -= pageblock_nr_pages, |
---|
1102 | 1531 | isolate_start_pfn = block_start_pfn) { |
---|
| 1532 | + unsigned long nr_isolated; |
---|
| 1533 | + |
---|
1103 | 1534 | /* |
---|
1104 | 1535 | * This can iterate a massively long zone without finding any |
---|
1105 | | - * suitable migration targets, so periodically check if we need |
---|
1106 | | - * to schedule, or even abort async compaction. |
---|
| 1536 | + * suitable migration targets, so periodically check resched. |
---|
1107 | 1537 | */ |
---|
1108 | | - if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
---|
1109 | | - && compact_should_abort(cc)) |
---|
1110 | | - break; |
---|
| 1538 | + if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) |
---|
| 1539 | + cond_resched(); |
---|
1111 | 1540 | |
---|
1112 | 1541 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
---|
1113 | 1542 | zone); |
---|
.. | .. |
---|
1123 | 1552 | continue; |
---|
1124 | 1553 | |
---|
1125 | 1554 | /* Found a block suitable for isolating free pages from. */ |
---|
1126 | | - isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, |
---|
1127 | | - freelist, false); |
---|
| 1555 | + nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
---|
| 1556 | + block_end_pfn, freelist, stride, false); |
---|
1128 | 1557 | |
---|
1129 | | - /* |
---|
1130 | | - * If we isolated enough freepages, or aborted due to lock |
---|
1131 | | - * contention, terminate. |
---|
1132 | | - */ |
---|
1133 | | - if ((cc->nr_freepages >= cc->nr_migratepages) |
---|
1134 | | - || cc->contended) { |
---|
| 1558 | + /* Update the skip hint if the full pageblock was scanned */ |
---|
| 1559 | + if (isolate_start_pfn == block_end_pfn) |
---|
| 1560 | + update_pageblock_skip(cc, page, block_start_pfn); |
---|
| 1561 | + |
---|
| 1562 | + /* Are enough freepages isolated? */ |
---|
| 1563 | + if (cc->nr_freepages >= cc->nr_migratepages) { |
---|
1135 | 1564 | if (isolate_start_pfn >= block_end_pfn) { |
---|
1136 | 1565 | /* |
---|
1137 | 1566 | * Restart at previous pageblock if more |
---|
.. | .. |
---|
1148 | 1577 | */ |
---|
1149 | 1578 | break; |
---|
1150 | 1579 | } |
---|
1151 | | - } |
---|
1152 | 1580 | |
---|
1153 | | - /* __isolate_free_page() does not map the pages */ |
---|
1154 | | - map_pages(freelist); |
---|
| 1581 | + /* Adjust stride depending on isolation */ |
---|
| 1582 | + if (nr_isolated) { |
---|
| 1583 | + stride = 1; |
---|
| 1584 | + continue; |
---|
| 1585 | + } |
---|
| 1586 | + stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); |
---|
| 1587 | + } |
---|
1155 | 1588 | |
---|
1156 | 1589 | /* |
---|
1157 | 1590 | * Record where the free scanner will restart next time. Either we |
---|
.. | .. |
---|
1160 | 1593 | * and the loop terminated due to isolate_start_pfn < low_pfn |
---|
1161 | 1594 | */ |
---|
1162 | 1595 | cc->free_pfn = isolate_start_pfn; |
---|
| 1596 | + |
---|
| 1597 | +splitmap: |
---|
| 1598 | + /* __isolate_free_page() does not map the pages */ |
---|
| 1599 | + split_map_pages(freelist); |
---|
1163 | 1600 | } |
---|
1164 | 1601 | |
---|
1165 | 1602 | /* |
---|
.. | .. |
---|
1172 | 1609 | struct compact_control *cc = (struct compact_control *)data; |
---|
1173 | 1610 | struct page *freepage; |
---|
1174 | 1611 | |
---|
1175 | | - /* |
---|
1176 | | - * Isolate free pages if necessary, and if we are not aborting due to |
---|
1177 | | - * contention. |
---|
1178 | | - */ |
---|
1179 | 1612 | if (list_empty(&cc->freepages)) { |
---|
1180 | | - if (!cc->contended) |
---|
1181 | | - isolate_freepages(cc); |
---|
| 1613 | + isolate_freepages(cc); |
---|
1182 | 1614 | |
---|
1183 | 1615 | if (list_empty(&cc->freepages)) |
---|
1184 | 1616 | return NULL; |
---|
.. | .. |
---|
1215 | 1647 | * Allow userspace to control policy on scanning the unevictable LRU for |
---|
1216 | 1648 | * compactable pages. |
---|
1217 | 1649 | */ |
---|
| 1650 | +#ifdef CONFIG_PREEMPT_RT |
---|
| 1651 | +int sysctl_compact_unevictable_allowed __read_mostly = 0; |
---|
| 1652 | +#else |
---|
1218 | 1653 | int sysctl_compact_unevictable_allowed __read_mostly = 1; |
---|
| 1654 | +#endif |
---|
| 1655 | + |
---|
| 1656 | +static inline void |
---|
| 1657 | +update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) |
---|
| 1658 | +{ |
---|
| 1659 | + if (cc->fast_start_pfn == ULONG_MAX) |
---|
| 1660 | + return; |
---|
| 1661 | + |
---|
| 1662 | + if (!cc->fast_start_pfn) |
---|
| 1663 | + cc->fast_start_pfn = pfn; |
---|
| 1664 | + |
---|
| 1665 | + cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); |
---|
| 1666 | +} |
---|
| 1667 | + |
---|
| 1668 | +static inline unsigned long |
---|
| 1669 | +reinit_migrate_pfn(struct compact_control *cc) |
---|
| 1670 | +{ |
---|
| 1671 | + if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) |
---|
| 1672 | + return cc->migrate_pfn; |
---|
| 1673 | + |
---|
| 1674 | + cc->migrate_pfn = cc->fast_start_pfn; |
---|
| 1675 | + cc->fast_start_pfn = ULONG_MAX; |
---|
| 1676 | + |
---|
| 1677 | + return cc->migrate_pfn; |
---|
| 1678 | +} |
---|
| 1679 | + |
---|
| 1680 | +/* |
---|
| 1681 | + * Briefly search the free lists for a migration source that already has |
---|
| 1682 | + * some free pages to reduce the number of pages that need migration |
---|
| 1683 | + * before a pageblock is free. |
---|
| 1684 | + */ |
---|
| 1685 | +static unsigned long fast_find_migrateblock(struct compact_control *cc) |
---|
| 1686 | +{ |
---|
| 1687 | + unsigned int limit = freelist_scan_limit(cc); |
---|
| 1688 | + unsigned int nr_scanned = 0; |
---|
| 1689 | + unsigned long distance; |
---|
| 1690 | + unsigned long pfn = cc->migrate_pfn; |
---|
| 1691 | + unsigned long high_pfn; |
---|
| 1692 | + int order; |
---|
| 1693 | + bool found_block = false; |
---|
| 1694 | + |
---|
| 1695 | + /* Skip hints are relied on to avoid repeats on the fast search */ |
---|
| 1696 | + if (cc->ignore_skip_hint) |
---|
| 1697 | + return pfn; |
---|
| 1698 | + |
---|
| 1699 | + /* |
---|
| 1700 | + * If the migrate_pfn is not at the start of a zone or the start |
---|
| 1701 | + * of a pageblock then assume this is a continuation of a previous |
---|
| 1702 | + * scan restarted due to COMPACT_CLUSTER_MAX. |
---|
| 1703 | + */ |
---|
| 1704 | + if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) |
---|
| 1705 | + return pfn; |
---|
| 1706 | + |
---|
| 1707 | + /* |
---|
| 1708 | + * For smaller orders, just linearly scan as the number of pages |
---|
| 1709 | + * to migrate should be relatively small and does not necessarily |
---|
| 1710 | + * justify freeing up a large block for a small allocation. |
---|
| 1711 | + */ |
---|
| 1712 | + if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) |
---|
| 1713 | + return pfn; |
---|
| 1714 | + |
---|
| 1715 | + /* |
---|
| 1716 | + * Only allow kcompactd and direct requests for movable pages to |
---|
| 1717 | + * quickly clear out a MOVABLE pageblock for allocation. This |
---|
| 1718 | + * reduces the risk that a large movable pageblock is freed for |
---|
| 1719 | + * an unmovable/reclaimable small allocation. |
---|
| 1720 | + */ |
---|
| 1721 | + if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) |
---|
| 1722 | + return pfn; |
---|
| 1723 | + |
---|
| 1724 | + /* |
---|
| 1725 | + * When starting the migration scanner, pick any pageblock within the |
---|
| 1726 | + * first half of the search space. Otherwise try and pick a pageblock |
---|
| 1727 | + * within the first eighth to reduce the chances that a migration |
---|
| 1728 | + * target later becomes a source. |
---|
| 1729 | + */ |
---|
| 1730 | + distance = (cc->free_pfn - cc->migrate_pfn) >> 1; |
---|
| 1731 | + if (cc->migrate_pfn != cc->zone->zone_start_pfn) |
---|
| 1732 | + distance >>= 2; |
---|
| 1733 | + high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); |
---|
| 1734 | + |
---|
| 1735 | + for (order = cc->order - 1; |
---|
| 1736 | + order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; |
---|
| 1737 | + order--) { |
---|
| 1738 | + struct free_area *area = &cc->zone->free_area[order]; |
---|
| 1739 | + struct list_head *freelist; |
---|
| 1740 | + unsigned long flags; |
---|
| 1741 | + struct page *freepage; |
---|
| 1742 | + |
---|
| 1743 | + if (!area->nr_free) |
---|
| 1744 | + continue; |
---|
| 1745 | + |
---|
| 1746 | + spin_lock_irqsave(&cc->zone->lock, flags); |
---|
| 1747 | + freelist = &area->free_list[MIGRATE_MOVABLE]; |
---|
| 1748 | + list_for_each_entry(freepage, freelist, lru) { |
---|
| 1749 | + unsigned long free_pfn; |
---|
| 1750 | + |
---|
| 1751 | + if (nr_scanned++ >= limit) { |
---|
| 1752 | + move_freelist_tail(freelist, freepage); |
---|
| 1753 | + break; |
---|
| 1754 | + } |
---|
| 1755 | + |
---|
| 1756 | + free_pfn = page_to_pfn(freepage); |
---|
| 1757 | + if (free_pfn < high_pfn) { |
---|
| 1758 | + /* |
---|
| 1759 | + * Avoid if skipped recently. Ideally it would |
---|
| 1760 | + * move to the tail but even safe iteration of |
---|
| 1761 | + * the list assumes an entry is deleted, not |
---|
| 1762 | + * reordered. |
---|
| 1763 | + */ |
---|
| 1764 | + if (get_pageblock_skip(freepage)) |
---|
| 1765 | + continue; |
---|
| 1766 | + |
---|
| 1767 | + /* Reorder to so a future search skips recent pages */ |
---|
| 1768 | + move_freelist_tail(freelist, freepage); |
---|
| 1769 | + |
---|
| 1770 | + update_fast_start_pfn(cc, free_pfn); |
---|
| 1771 | + pfn = pageblock_start_pfn(free_pfn); |
---|
| 1772 | + if (pfn < cc->zone->zone_start_pfn) |
---|
| 1773 | + pfn = cc->zone->zone_start_pfn; |
---|
| 1774 | + cc->fast_search_fail = 0; |
---|
| 1775 | + found_block = true; |
---|
| 1776 | + set_pageblock_skip(freepage); |
---|
| 1777 | + break; |
---|
| 1778 | + } |
---|
| 1779 | + } |
---|
| 1780 | + spin_unlock_irqrestore(&cc->zone->lock, flags); |
---|
| 1781 | + } |
---|
| 1782 | + |
---|
| 1783 | + cc->total_migrate_scanned += nr_scanned; |
---|
| 1784 | + |
---|
| 1785 | + /* |
---|
| 1786 | + * If fast scanning failed then use a cached entry for a page block |
---|
| 1787 | + * that had free pages as the basis for starting a linear scan. |
---|
| 1788 | + */ |
---|
| 1789 | + if (!found_block) { |
---|
| 1790 | + cc->fast_search_fail++; |
---|
| 1791 | + pfn = reinit_migrate_pfn(cc); |
---|
| 1792 | + } |
---|
| 1793 | + return pfn; |
---|
| 1794 | +} |
---|
1219 | 1795 | |
---|
1220 | 1796 | /* |
---|
1221 | 1797 | * Isolate all pages that can be migrated from the first suitable block, |
---|
1222 | 1798 | * starting at the block pointed to by the migrate scanner pfn within |
---|
1223 | 1799 | * compact_control. |
---|
1224 | 1800 | */ |
---|
1225 | | -static isolate_migrate_t isolate_migratepages(struct zone *zone, |
---|
1226 | | - struct compact_control *cc) |
---|
| 1801 | +static isolate_migrate_t isolate_migratepages(struct compact_control *cc) |
---|
1227 | 1802 | { |
---|
1228 | 1803 | unsigned long block_start_pfn; |
---|
1229 | 1804 | unsigned long block_end_pfn; |
---|
.. | .. |
---|
1232 | 1807 | const isolate_mode_t isolate_mode = |
---|
1233 | 1808 | (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | |
---|
1234 | 1809 | (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); |
---|
| 1810 | + bool fast_find_block; |
---|
1235 | 1811 | |
---|
1236 | 1812 | /* |
---|
1237 | 1813 | * Start at where we last stopped, or beginning of the zone as |
---|
1238 | | - * initialized by compact_zone() |
---|
| 1814 | + * initialized by compact_zone(). The first failure will use |
---|
| 1815 | + * the lowest PFN as the starting point for linear scanning. |
---|
1239 | 1816 | */ |
---|
1240 | | - low_pfn = cc->migrate_pfn; |
---|
| 1817 | + low_pfn = fast_find_migrateblock(cc); |
---|
1241 | 1818 | block_start_pfn = pageblock_start_pfn(low_pfn); |
---|
1242 | | - if (block_start_pfn < zone->zone_start_pfn) |
---|
1243 | | - block_start_pfn = zone->zone_start_pfn; |
---|
| 1819 | + if (block_start_pfn < cc->zone->zone_start_pfn) |
---|
| 1820 | + block_start_pfn = cc->zone->zone_start_pfn; |
---|
| 1821 | + |
---|
| 1822 | + /* |
---|
| 1823 | + * fast_find_migrateblock marks a pageblock skipped so to avoid |
---|
| 1824 | + * the isolation_suitable check below, check whether the fast |
---|
| 1825 | + * search was successful. |
---|
| 1826 | + */ |
---|
| 1827 | + fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; |
---|
1244 | 1828 | |
---|
1245 | 1829 | /* Only scan within a pageblock boundary */ |
---|
1246 | 1830 | block_end_pfn = pageblock_end_pfn(low_pfn); |
---|
.. | .. |
---|
1250 | 1834 | * Do not cross the free scanner. |
---|
1251 | 1835 | */ |
---|
1252 | 1836 | for (; block_end_pfn <= cc->free_pfn; |
---|
| 1837 | + fast_find_block = false, |
---|
1253 | 1838 | low_pfn = block_end_pfn, |
---|
1254 | 1839 | block_start_pfn = block_end_pfn, |
---|
1255 | 1840 | block_end_pfn += pageblock_nr_pages) { |
---|
.. | .. |
---|
1257 | 1842 | /* |
---|
1258 | 1843 | * This can potentially iterate a massively long zone with |
---|
1259 | 1844 | * many pageblocks unsuitable, so periodically check if we |
---|
1260 | | - * need to schedule, or even abort async compaction. |
---|
| 1845 | + * need to schedule. |
---|
1261 | 1846 | */ |
---|
1262 | | - if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
---|
1263 | | - && compact_should_abort(cc)) |
---|
1264 | | - break; |
---|
| 1847 | + if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) |
---|
| 1848 | + cond_resched(); |
---|
1265 | 1849 | |
---|
1266 | | - page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
---|
1267 | | - zone); |
---|
| 1850 | + page = pageblock_pfn_to_page(block_start_pfn, |
---|
| 1851 | + block_end_pfn, cc->zone); |
---|
1268 | 1852 | if (!page) |
---|
1269 | 1853 | continue; |
---|
1270 | 1854 | |
---|
1271 | | - /* If isolation recently failed, do not retry */ |
---|
1272 | | - if (!isolation_suitable(cc, page)) |
---|
| 1855 | + /* |
---|
| 1856 | + * If isolation recently failed, do not retry. Only check the |
---|
| 1857 | + * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock |
---|
| 1858 | + * to be visited multiple times. Assume skip was checked |
---|
| 1859 | + * before making it "skip" so other compaction instances do |
---|
| 1860 | + * not scan the same block. |
---|
| 1861 | + */ |
---|
| 1862 | + if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && |
---|
| 1863 | + !fast_find_block && !isolation_suitable(cc, page)) |
---|
1273 | 1864 | continue; |
---|
1274 | 1865 | |
---|
1275 | 1866 | /* |
---|
1276 | | - * For async compaction, also only scan in MOVABLE blocks. |
---|
1277 | | - * Async compaction is optimistic to see if the minimum amount |
---|
1278 | | - * of work satisfies the allocation. |
---|
| 1867 | + * For async compaction, also only scan in MOVABLE blocks |
---|
| 1868 | + * without huge pages. Async compaction is optimistic to see |
---|
| 1869 | + * if the minimum amount of work satisfies the allocation. |
---|
| 1870 | + * The cached PFN is updated as it's possible that all |
---|
| 1871 | + * remaining blocks between source and target are unsuitable |
---|
| 1872 | + * and the compaction scanners fail to meet. |
---|
1279 | 1873 | */ |
---|
1280 | | - if (!suitable_migration_source(cc, page)) |
---|
| 1874 | + if (!suitable_migration_source(cc, page)) { |
---|
| 1875 | + update_cached_migrate(cc, block_end_pfn); |
---|
1281 | 1876 | continue; |
---|
| 1877 | + } |
---|
1282 | 1878 | |
---|
1283 | 1879 | /* Perform the isolation */ |
---|
1284 | 1880 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
---|
1285 | 1881 | block_end_pfn, isolate_mode); |
---|
1286 | 1882 | |
---|
1287 | | - if (!low_pfn || cc->contended) |
---|
| 1883 | + if (!low_pfn) |
---|
1288 | 1884 | return ISOLATE_ABORT; |
---|
1289 | 1885 | |
---|
1290 | 1886 | /* |
---|
.. | .. |
---|
1310 | 1906 | return order == -1; |
---|
1311 | 1907 | } |
---|
1312 | 1908 | |
---|
1313 | | -static enum compact_result __compact_finished(struct zone *zone, |
---|
1314 | | - struct compact_control *cc) |
---|
| 1909 | +static bool kswapd_is_running(pg_data_t *pgdat) |
---|
| 1910 | +{ |
---|
| 1911 | + return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); |
---|
| 1912 | +} |
---|
| 1913 | + |
---|
| 1914 | +/* |
---|
| 1915 | + * A zone's fragmentation score is the external fragmentation wrt to the |
---|
| 1916 | + * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. |
---|
| 1917 | + */ |
---|
| 1918 | +static unsigned int fragmentation_score_zone(struct zone *zone) |
---|
| 1919 | +{ |
---|
| 1920 | + return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); |
---|
| 1921 | +} |
---|
| 1922 | + |
---|
| 1923 | +/* |
---|
| 1924 | + * A weighted zone's fragmentation score is the external fragmentation |
---|
| 1925 | + * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It |
---|
| 1926 | + * returns a value in the range [0, 100]. |
---|
| 1927 | + * |
---|
| 1928 | + * The scaling factor ensures that proactive compaction focuses on larger |
---|
| 1929 | + * zones like ZONE_NORMAL, rather than smaller, specialized zones like |
---|
| 1930 | + * ZONE_DMA32. For smaller zones, the score value remains close to zero, |
---|
| 1931 | + * and thus never exceeds the high threshold for proactive compaction. |
---|
| 1932 | + */ |
---|
| 1933 | +static unsigned int fragmentation_score_zone_weighted(struct zone *zone) |
---|
| 1934 | +{ |
---|
| 1935 | + unsigned long score; |
---|
| 1936 | + |
---|
| 1937 | + score = zone->present_pages * fragmentation_score_zone(zone); |
---|
| 1938 | + return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); |
---|
| 1939 | +} |
---|
| 1940 | + |
---|
| 1941 | +/* |
---|
| 1942 | + * The per-node proactive (background) compaction process is started by its |
---|
| 1943 | + * corresponding kcompactd thread when the node's fragmentation score |
---|
| 1944 | + * exceeds the high threshold. The compaction process remains active till |
---|
| 1945 | + * the node's score falls below the low threshold, or one of the back-off |
---|
| 1946 | + * conditions is met. |
---|
| 1947 | + */ |
---|
| 1948 | +static unsigned int fragmentation_score_node(pg_data_t *pgdat) |
---|
| 1949 | +{ |
---|
| 1950 | + unsigned int score = 0; |
---|
| 1951 | + int zoneid; |
---|
| 1952 | + |
---|
| 1953 | + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
---|
| 1954 | + struct zone *zone; |
---|
| 1955 | + |
---|
| 1956 | + zone = &pgdat->node_zones[zoneid]; |
---|
| 1957 | + score += fragmentation_score_zone_weighted(zone); |
---|
| 1958 | + } |
---|
| 1959 | + |
---|
| 1960 | + return score; |
---|
| 1961 | +} |
---|
| 1962 | + |
---|
| 1963 | +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) |
---|
| 1964 | +{ |
---|
| 1965 | + unsigned int wmark_low; |
---|
| 1966 | + |
---|
| 1967 | + /* |
---|
| 1968 | + * Cap the low watermak to avoid excessive compaction |
---|
| 1969 | + * activity in case a user sets the proactivess tunable |
---|
| 1970 | + * close to 100 (maximum). |
---|
| 1971 | + */ |
---|
| 1972 | + wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); |
---|
| 1973 | + return low ? wmark_low : min(wmark_low + 10, 100U); |
---|
| 1974 | +} |
---|
| 1975 | + |
---|
| 1976 | +static bool should_proactive_compact_node(pg_data_t *pgdat) |
---|
| 1977 | +{ |
---|
| 1978 | + int wmark_high; |
---|
| 1979 | + |
---|
| 1980 | + if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) |
---|
| 1981 | + return false; |
---|
| 1982 | + |
---|
| 1983 | + wmark_high = fragmentation_score_wmark(pgdat, false); |
---|
| 1984 | + return fragmentation_score_node(pgdat) > wmark_high; |
---|
| 1985 | +} |
---|
| 1986 | + |
---|
| 1987 | +static enum compact_result __compact_finished(struct compact_control *cc) |
---|
1315 | 1988 | { |
---|
1316 | 1989 | unsigned int order; |
---|
1317 | 1990 | const int migratetype = cc->migratetype; |
---|
1318 | | - |
---|
1319 | | - if (cc->contended || fatal_signal_pending(current)) |
---|
1320 | | - return COMPACT_CONTENDED; |
---|
| 1991 | + int ret; |
---|
| 1992 | + bool abort_compact = false; |
---|
1321 | 1993 | |
---|
1322 | 1994 | /* Compaction run completes if the migrate and free scanner meet */ |
---|
1323 | 1995 | if (compact_scanners_met(cc)) { |
---|
1324 | 1996 | /* Let the next compaction start anew. */ |
---|
1325 | | - reset_cached_positions(zone); |
---|
| 1997 | + reset_cached_positions(cc->zone); |
---|
1326 | 1998 | |
---|
1327 | 1999 | /* |
---|
1328 | 2000 | * Mark that the PG_migrate_skip information should be cleared |
---|
.. | .. |
---|
1331 | 2003 | * based on an allocation request. |
---|
1332 | 2004 | */ |
---|
1333 | 2005 | if (cc->direct_compaction) |
---|
1334 | | - zone->compact_blockskip_flush = true; |
---|
| 2006 | + cc->zone->compact_blockskip_flush = true; |
---|
1335 | 2007 | |
---|
1336 | 2008 | if (cc->whole_zone) |
---|
1337 | 2009 | return COMPACT_COMPLETE; |
---|
.. | .. |
---|
1339 | 2011 | return COMPACT_PARTIAL_SKIPPED; |
---|
1340 | 2012 | } |
---|
1341 | 2013 | |
---|
| 2014 | + if (cc->proactive_compaction) { |
---|
| 2015 | + int score, wmark_low; |
---|
| 2016 | + pg_data_t *pgdat; |
---|
| 2017 | + |
---|
| 2018 | + pgdat = cc->zone->zone_pgdat; |
---|
| 2019 | + if (kswapd_is_running(pgdat)) |
---|
| 2020 | + return COMPACT_PARTIAL_SKIPPED; |
---|
| 2021 | + |
---|
| 2022 | + score = fragmentation_score_zone(cc->zone); |
---|
| 2023 | + wmark_low = fragmentation_score_wmark(pgdat, true); |
---|
| 2024 | + |
---|
| 2025 | + if (score > wmark_low) |
---|
| 2026 | + ret = COMPACT_CONTINUE; |
---|
| 2027 | + else |
---|
| 2028 | + ret = COMPACT_SUCCESS; |
---|
| 2029 | + |
---|
| 2030 | + goto out; |
---|
| 2031 | + } |
---|
| 2032 | + |
---|
1342 | 2033 | if (is_via_compact_memory(cc->order)) |
---|
1343 | 2034 | return COMPACT_CONTINUE; |
---|
1344 | 2035 | |
---|
1345 | | - if (cc->finishing_block) { |
---|
1346 | | - /* |
---|
1347 | | - * We have finished the pageblock, but better check again that |
---|
1348 | | - * we really succeeded. |
---|
1349 | | - */ |
---|
1350 | | - if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) |
---|
1351 | | - cc->finishing_block = false; |
---|
1352 | | - else |
---|
1353 | | - return COMPACT_CONTINUE; |
---|
1354 | | - } |
---|
| 2036 | + /* |
---|
| 2037 | + * Always finish scanning a pageblock to reduce the possibility of |
---|
| 2038 | + * fallbacks in the future. This is particularly important when |
---|
| 2039 | + * migration source is unmovable/reclaimable but it's not worth |
---|
| 2040 | + * special casing. |
---|
| 2041 | + */ |
---|
| 2042 | + if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) |
---|
| 2043 | + return COMPACT_CONTINUE; |
---|
1355 | 2044 | |
---|
1356 | 2045 | /* Direct compactor: Is a suitable page free? */ |
---|
| 2046 | + ret = COMPACT_NO_SUITABLE_PAGE; |
---|
1357 | 2047 | for (order = cc->order; order < MAX_ORDER; order++) { |
---|
1358 | | - struct free_area *area = &zone->free_area[order]; |
---|
| 2048 | + struct free_area *area = &cc->zone->free_area[order]; |
---|
1359 | 2049 | bool can_steal; |
---|
1360 | 2050 | |
---|
1361 | 2051 | /* Job done if page is free of the right migratetype */ |
---|
1362 | | - if (!list_empty(&area->free_list[migratetype])) |
---|
| 2052 | + if (!free_area_empty(area, migratetype)) |
---|
1363 | 2053 | return COMPACT_SUCCESS; |
---|
1364 | 2054 | |
---|
1365 | 2055 | #ifdef CONFIG_CMA |
---|
1366 | 2056 | /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ |
---|
1367 | 2057 | if (migratetype == MIGRATE_MOVABLE && |
---|
1368 | | - !list_empty(&area->free_list[MIGRATE_CMA])) |
---|
| 2058 | + !free_area_empty(area, MIGRATE_CMA)) |
---|
1369 | 2059 | return COMPACT_SUCCESS; |
---|
1370 | 2060 | #endif |
---|
1371 | 2061 | /* |
---|
.. | .. |
---|
1393 | 2083 | return COMPACT_SUCCESS; |
---|
1394 | 2084 | } |
---|
1395 | 2085 | |
---|
1396 | | - cc->finishing_block = true; |
---|
1397 | | - return COMPACT_CONTINUE; |
---|
| 2086 | + ret = COMPACT_CONTINUE; |
---|
| 2087 | + break; |
---|
1398 | 2088 | } |
---|
1399 | 2089 | } |
---|
1400 | 2090 | |
---|
1401 | | - return COMPACT_NO_SUITABLE_PAGE; |
---|
| 2091 | +out: |
---|
| 2092 | + trace_android_vh_compact_finished(&abort_compact); |
---|
| 2093 | + if (cc->contended || fatal_signal_pending(current) || abort_compact) |
---|
| 2094 | + ret = COMPACT_CONTENDED; |
---|
| 2095 | + |
---|
| 2096 | + return ret; |
---|
1402 | 2097 | } |
---|
1403 | 2098 | |
---|
1404 | | -static enum compact_result compact_finished(struct zone *zone, |
---|
1405 | | - struct compact_control *cc) |
---|
| 2099 | +static enum compact_result compact_finished(struct compact_control *cc) |
---|
1406 | 2100 | { |
---|
1407 | 2101 | int ret; |
---|
1408 | 2102 | |
---|
1409 | | - ret = __compact_finished(zone, cc); |
---|
1410 | | - trace_mm_compaction_finished(zone, cc->order, ret); |
---|
| 2103 | + ret = __compact_finished(cc); |
---|
| 2104 | + trace_mm_compaction_finished(cc->zone, cc->order, ret); |
---|
1411 | 2105 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
---|
1412 | 2106 | ret = COMPACT_CONTINUE; |
---|
1413 | 2107 | |
---|
.. | .. |
---|
1423 | 2117 | */ |
---|
1424 | 2118 | static enum compact_result __compaction_suitable(struct zone *zone, int order, |
---|
1425 | 2119 | unsigned int alloc_flags, |
---|
1426 | | - int classzone_idx, |
---|
| 2120 | + int highest_zoneidx, |
---|
1427 | 2121 | unsigned long wmark_target) |
---|
1428 | 2122 | { |
---|
1429 | 2123 | unsigned long watermark; |
---|
.. | .. |
---|
1431 | 2125 | if (is_via_compact_memory(order)) |
---|
1432 | 2126 | return COMPACT_CONTINUE; |
---|
1433 | 2127 | |
---|
1434 | | - watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; |
---|
| 2128 | + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); |
---|
1435 | 2129 | /* |
---|
1436 | 2130 | * If watermarks for high-order allocation are already met, there |
---|
1437 | 2131 | * should be no need for compaction at all. |
---|
1438 | 2132 | */ |
---|
1439 | | - if (zone_watermark_ok(zone, order, watermark, classzone_idx, |
---|
| 2133 | + if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, |
---|
1440 | 2134 | alloc_flags)) |
---|
1441 | 2135 | return COMPACT_SUCCESS; |
---|
1442 | 2136 | |
---|
.. | .. |
---|
1446 | 2140 | * watermark and alloc_flags have to match, or be more pessimistic than |
---|
1447 | 2141 | * the check in __isolate_free_page(). We don't use the direct |
---|
1448 | 2142 | * compactor's alloc_flags, as they are not relevant for freepage |
---|
1449 | | - * isolation. We however do use the direct compactor's classzone_idx to |
---|
1450 | | - * skip over zones where lowmem reserves would prevent allocation even |
---|
1451 | | - * if compaction succeeds. |
---|
| 2143 | + * isolation. We however do use the direct compactor's highest_zoneidx |
---|
| 2144 | + * to skip over zones where lowmem reserves would prevent allocation |
---|
| 2145 | + * even if compaction succeeds. |
---|
1452 | 2146 | * For costly orders, we require low watermark instead of min for |
---|
1453 | 2147 | * compaction to proceed to increase its chances. |
---|
1454 | 2148 | * ALLOC_CMA is used, as pages in CMA pageblocks are considered |
---|
.. | .. |
---|
1457 | 2151 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
---|
1458 | 2152 | low_wmark_pages(zone) : min_wmark_pages(zone); |
---|
1459 | 2153 | watermark += compact_gap(order); |
---|
1460 | | - if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, |
---|
| 2154 | + if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, |
---|
1461 | 2155 | ALLOC_CMA, wmark_target)) |
---|
1462 | 2156 | return COMPACT_SKIPPED; |
---|
1463 | 2157 | |
---|
.. | .. |
---|
1466 | 2160 | |
---|
1467 | 2161 | enum compact_result compaction_suitable(struct zone *zone, int order, |
---|
1468 | 2162 | unsigned int alloc_flags, |
---|
1469 | | - int classzone_idx) |
---|
| 2163 | + int highest_zoneidx) |
---|
1470 | 2164 | { |
---|
1471 | 2165 | enum compact_result ret; |
---|
1472 | 2166 | int fragindex; |
---|
1473 | 2167 | |
---|
1474 | | - ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, |
---|
| 2168 | + ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, |
---|
1475 | 2169 | zone_page_state(zone, NR_FREE_PAGES)); |
---|
1476 | 2170 | /* |
---|
1477 | 2171 | * fragmentation index determines if allocation failures are due to |
---|
.. | .. |
---|
1512 | 2206 | * Make sure at least one zone would pass __compaction_suitable if we continue |
---|
1513 | 2207 | * retrying the reclaim. |
---|
1514 | 2208 | */ |
---|
1515 | | - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
---|
1516 | | - ac->nodemask) { |
---|
| 2209 | + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, |
---|
| 2210 | + ac->highest_zoneidx, ac->nodemask) { |
---|
1517 | 2211 | unsigned long available; |
---|
1518 | 2212 | enum compact_result compact_result; |
---|
1519 | 2213 | |
---|
.. | .. |
---|
1526 | 2220 | available = zone_reclaimable_pages(zone) / order; |
---|
1527 | 2221 | available += zone_page_state_snapshot(zone, NR_FREE_PAGES); |
---|
1528 | 2222 | compact_result = __compaction_suitable(zone, order, alloc_flags, |
---|
1529 | | - ac_classzone_idx(ac), available); |
---|
| 2223 | + ac->highest_zoneidx, available); |
---|
1530 | 2224 | if (compact_result != COMPACT_SKIPPED) |
---|
1531 | 2225 | return true; |
---|
1532 | 2226 | } |
---|
.. | .. |
---|
1534 | 2228 | return false; |
---|
1535 | 2229 | } |
---|
1536 | 2230 | |
---|
1537 | | -static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) |
---|
| 2231 | +static enum compact_result |
---|
| 2232 | +compact_zone(struct compact_control *cc, struct capture_control *capc) |
---|
1538 | 2233 | { |
---|
1539 | 2234 | enum compact_result ret; |
---|
1540 | | - unsigned long start_pfn = zone->zone_start_pfn; |
---|
1541 | | - unsigned long end_pfn = zone_end_pfn(zone); |
---|
| 2235 | + unsigned long start_pfn = cc->zone->zone_start_pfn; |
---|
| 2236 | + unsigned long end_pfn = zone_end_pfn(cc->zone); |
---|
| 2237 | + unsigned long last_migrated_pfn; |
---|
1542 | 2238 | const bool sync = cc->mode != MIGRATE_ASYNC; |
---|
| 2239 | + bool update_cached; |
---|
1543 | 2240 | |
---|
1544 | 2241 | /* |
---|
1545 | 2242 | * These counters track activities during zone compaction. Initialize |
---|
.. | .. |
---|
1552 | 2249 | INIT_LIST_HEAD(&cc->freepages); |
---|
1553 | 2250 | INIT_LIST_HEAD(&cc->migratepages); |
---|
1554 | 2251 | |
---|
1555 | | - cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
---|
1556 | | - ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
---|
1557 | | - cc->classzone_idx); |
---|
| 2252 | + cc->migratetype = gfp_migratetype(cc->gfp_mask); |
---|
| 2253 | + ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, |
---|
| 2254 | + cc->highest_zoneidx); |
---|
1558 | 2255 | /* Compaction is likely to fail */ |
---|
1559 | 2256 | if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) |
---|
1560 | 2257 | return ret; |
---|
.. | .. |
---|
1566 | 2263 | * Clear pageblock skip if there were failures recently and compaction |
---|
1567 | 2264 | * is about to be retried after being deferred. |
---|
1568 | 2265 | */ |
---|
1569 | | - if (compaction_restarting(zone, cc->order)) |
---|
1570 | | - __reset_isolation_suitable(zone); |
---|
| 2266 | + if (compaction_restarting(cc->zone, cc->order)) |
---|
| 2267 | + __reset_isolation_suitable(cc->zone); |
---|
1571 | 2268 | |
---|
1572 | 2269 | /* |
---|
1573 | 2270 | * Setup to move all movable pages to the end of the zone. Used cached |
---|
.. | .. |
---|
1575 | 2272 | * want to compact the whole zone), but check that it is initialised |
---|
1576 | 2273 | * by ensuring the values are within zone boundaries. |
---|
1577 | 2274 | */ |
---|
| 2275 | + cc->fast_start_pfn = 0; |
---|
1578 | 2276 | if (cc->whole_zone) { |
---|
1579 | 2277 | cc->migrate_pfn = start_pfn; |
---|
1580 | 2278 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
---|
1581 | 2279 | } else { |
---|
1582 | | - cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
---|
1583 | | - cc->free_pfn = zone->compact_cached_free_pfn; |
---|
| 2280 | + cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; |
---|
| 2281 | + cc->free_pfn = cc->zone->compact_cached_free_pfn; |
---|
1584 | 2282 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
---|
1585 | 2283 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
---|
1586 | | - zone->compact_cached_free_pfn = cc->free_pfn; |
---|
| 2284 | + cc->zone->compact_cached_free_pfn = cc->free_pfn; |
---|
1587 | 2285 | } |
---|
1588 | 2286 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
---|
1589 | 2287 | cc->migrate_pfn = start_pfn; |
---|
1590 | | - zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
---|
1591 | | - zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
---|
| 2288 | + cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
---|
| 2289 | + cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
---|
1592 | 2290 | } |
---|
1593 | 2291 | |
---|
1594 | | - if (cc->migrate_pfn == start_pfn) |
---|
| 2292 | + if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) |
---|
1595 | 2293 | cc->whole_zone = true; |
---|
1596 | 2294 | } |
---|
1597 | 2295 | |
---|
1598 | | - cc->last_migrated_pfn = 0; |
---|
| 2296 | + last_migrated_pfn = 0; |
---|
| 2297 | + |
---|
| 2298 | + /* |
---|
| 2299 | + * Migrate has separate cached PFNs for ASYNC and SYNC* migration on |
---|
| 2300 | + * the basis that some migrations will fail in ASYNC mode. However, |
---|
| 2301 | + * if the cached PFNs match and pageblocks are skipped due to having |
---|
| 2302 | + * no isolation candidates, then the sync state does not matter. |
---|
| 2303 | + * Until a pageblock with isolation candidates is found, keep the |
---|
| 2304 | + * cached PFNs in sync to avoid revisiting the same blocks. |
---|
| 2305 | + */ |
---|
| 2306 | + update_cached = !sync && |
---|
| 2307 | + cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; |
---|
1599 | 2308 | |
---|
1600 | 2309 | trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, |
---|
1601 | 2310 | cc->free_pfn, end_pfn, sync); |
---|
1602 | 2311 | |
---|
1603 | | - migrate_prep_local(); |
---|
| 2312 | + /* lru_add_drain_all could be expensive with involving other CPUs */ |
---|
| 2313 | + lru_add_drain(); |
---|
1604 | 2314 | |
---|
1605 | | - while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
---|
| 2315 | + while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { |
---|
1606 | 2316 | int err; |
---|
| 2317 | + unsigned long start_pfn = cc->migrate_pfn; |
---|
1607 | 2318 | |
---|
1608 | | - switch (isolate_migratepages(zone, cc)) { |
---|
| 2319 | + /* |
---|
| 2320 | + * Avoid multiple rescans which can happen if a page cannot be |
---|
| 2321 | + * isolated (dirty/writeback in async mode) or if the migrated |
---|
| 2322 | + * pages are being allocated before the pageblock is cleared. |
---|
| 2323 | + * The first rescan will capture the entire pageblock for |
---|
| 2324 | + * migration. If it fails, it'll be marked skip and scanning |
---|
| 2325 | + * will proceed as normal. |
---|
| 2326 | + */ |
---|
| 2327 | + cc->rescan = false; |
---|
| 2328 | + if (pageblock_start_pfn(last_migrated_pfn) == |
---|
| 2329 | + pageblock_start_pfn(start_pfn)) { |
---|
| 2330 | + cc->rescan = true; |
---|
| 2331 | + } |
---|
| 2332 | + |
---|
| 2333 | + switch (isolate_migratepages(cc)) { |
---|
1609 | 2334 | case ISOLATE_ABORT: |
---|
1610 | 2335 | ret = COMPACT_CONTENDED; |
---|
1611 | 2336 | putback_movable_pages(&cc->migratepages); |
---|
1612 | 2337 | cc->nr_migratepages = 0; |
---|
1613 | 2338 | goto out; |
---|
1614 | 2339 | case ISOLATE_NONE: |
---|
| 2340 | + if (update_cached) { |
---|
| 2341 | + cc->zone->compact_cached_migrate_pfn[1] = |
---|
| 2342 | + cc->zone->compact_cached_migrate_pfn[0]; |
---|
| 2343 | + } |
---|
| 2344 | + |
---|
1615 | 2345 | /* |
---|
1616 | 2346 | * We haven't isolated and migrated anything, but |
---|
1617 | 2347 | * there might still be unflushed migrations from |
---|
.. | .. |
---|
1619 | 2349 | */ |
---|
1620 | 2350 | goto check_drain; |
---|
1621 | 2351 | case ISOLATE_SUCCESS: |
---|
| 2352 | + update_cached = false; |
---|
| 2353 | + last_migrated_pfn = start_pfn; |
---|
1622 | 2354 | ; |
---|
1623 | 2355 | } |
---|
1624 | 2356 | |
---|
.. | .. |
---|
1650 | 2382 | cc->migrate_pfn = block_end_pfn( |
---|
1651 | 2383 | cc->migrate_pfn - 1, cc->order); |
---|
1652 | 2384 | /* Draining pcplists is useless in this case */ |
---|
1653 | | - cc->last_migrated_pfn = 0; |
---|
1654 | | - |
---|
| 2385 | + last_migrated_pfn = 0; |
---|
1655 | 2386 | } |
---|
1656 | 2387 | } |
---|
1657 | 2388 | |
---|
.. | .. |
---|
1663 | 2394 | * compact_finished() can detect immediately if allocation |
---|
1664 | 2395 | * would succeed. |
---|
1665 | 2396 | */ |
---|
1666 | | - if (cc->order > 0 && cc->last_migrated_pfn) { |
---|
1667 | | - int cpu; |
---|
| 2397 | + if (cc->order > 0 && last_migrated_pfn) { |
---|
1668 | 2398 | unsigned long current_block_start = |
---|
1669 | 2399 | block_start_pfn(cc->migrate_pfn, cc->order); |
---|
1670 | 2400 | |
---|
1671 | | - if (cc->last_migrated_pfn < current_block_start) { |
---|
1672 | | - cpu = get_cpu(); |
---|
1673 | | - lru_add_drain_cpu(cpu); |
---|
1674 | | - drain_local_pages(zone); |
---|
1675 | | - put_cpu(); |
---|
| 2401 | + if (last_migrated_pfn < current_block_start) { |
---|
| 2402 | + lru_add_drain_cpu_zone(cc->zone); |
---|
1676 | 2403 | /* No more flushing until we migrate again */ |
---|
1677 | | - cc->last_migrated_pfn = 0; |
---|
| 2404 | + last_migrated_pfn = 0; |
---|
1678 | 2405 | } |
---|
1679 | 2406 | } |
---|
1680 | 2407 | |
---|
| 2408 | + /* Stop if a page has been captured */ |
---|
| 2409 | + if (capc && capc->page) { |
---|
| 2410 | + ret = COMPACT_SUCCESS; |
---|
| 2411 | + break; |
---|
| 2412 | + } |
---|
1681 | 2413 | } |
---|
1682 | 2414 | |
---|
1683 | 2415 | out: |
---|
.. | .. |
---|
1696 | 2428 | * Only go back, not forward. The cached pfn might have been |
---|
1697 | 2429 | * already reset to zone end in compact_finished() |
---|
1698 | 2430 | */ |
---|
1699 | | - if (free_pfn > zone->compact_cached_free_pfn) |
---|
1700 | | - zone->compact_cached_free_pfn = free_pfn; |
---|
| 2431 | + if (free_pfn > cc->zone->compact_cached_free_pfn) |
---|
| 2432 | + cc->zone->compact_cached_free_pfn = free_pfn; |
---|
1701 | 2433 | } |
---|
1702 | 2434 | |
---|
1703 | 2435 | count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); |
---|
.. | .. |
---|
1711 | 2443 | |
---|
1712 | 2444 | static enum compact_result compact_zone_order(struct zone *zone, int order, |
---|
1713 | 2445 | gfp_t gfp_mask, enum compact_priority prio, |
---|
1714 | | - unsigned int alloc_flags, int classzone_idx) |
---|
| 2446 | + unsigned int alloc_flags, int highest_zoneidx, |
---|
| 2447 | + struct page **capture) |
---|
1715 | 2448 | { |
---|
1716 | 2449 | enum compact_result ret; |
---|
1717 | 2450 | struct compact_control cc = { |
---|
1718 | 2451 | .order = order, |
---|
| 2452 | + .search_order = order, |
---|
1719 | 2453 | .gfp_mask = gfp_mask, |
---|
1720 | 2454 | .zone = zone, |
---|
1721 | 2455 | .mode = (prio == COMPACT_PRIO_ASYNC) ? |
---|
1722 | 2456 | MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, |
---|
1723 | 2457 | .alloc_flags = alloc_flags, |
---|
1724 | | - .classzone_idx = classzone_idx, |
---|
| 2458 | + .highest_zoneidx = highest_zoneidx, |
---|
1725 | 2459 | .direct_compaction = true, |
---|
1726 | 2460 | .whole_zone = (prio == MIN_COMPACT_PRIORITY), |
---|
1727 | 2461 | .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), |
---|
1728 | 2462 | .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) |
---|
1729 | 2463 | }; |
---|
| 2464 | + struct capture_control capc = { |
---|
| 2465 | + .cc = &cc, |
---|
| 2466 | + .page = NULL, |
---|
| 2467 | + }; |
---|
1730 | 2468 | |
---|
1731 | | - ret = compact_zone(zone, &cc); |
---|
| 2469 | + /* |
---|
| 2470 | + * Make sure the structs are really initialized before we expose the |
---|
| 2471 | + * capture control, in case we are interrupted and the interrupt handler |
---|
| 2472 | + * frees a page. |
---|
| 2473 | + */ |
---|
| 2474 | + barrier(); |
---|
| 2475 | + WRITE_ONCE(current->capture_control, &capc); |
---|
| 2476 | + |
---|
| 2477 | + ret = compact_zone(&cc, &capc); |
---|
1732 | 2478 | |
---|
1733 | 2479 | VM_BUG_ON(!list_empty(&cc.freepages)); |
---|
1734 | 2480 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
---|
| 2481 | + |
---|
| 2482 | + /* |
---|
| 2483 | + * Make sure we hide capture control first before we read the captured |
---|
| 2484 | + * page pointer, otherwise an interrupt could free and capture a page |
---|
| 2485 | + * and we would leak it. |
---|
| 2486 | + */ |
---|
| 2487 | + WRITE_ONCE(current->capture_control, NULL); |
---|
| 2488 | + *capture = READ_ONCE(capc.page); |
---|
1735 | 2489 | |
---|
1736 | 2490 | return ret; |
---|
1737 | 2491 | } |
---|
.. | .. |
---|
1745 | 2499 | * @alloc_flags: The allocation flags of the current allocation |
---|
1746 | 2500 | * @ac: The context of current allocation |
---|
1747 | 2501 | * @prio: Determines how hard direct compaction should try to succeed |
---|
| 2502 | + * @capture: Pointer to free page created by compaction will be stored here |
---|
1748 | 2503 | * |
---|
1749 | 2504 | * This is the main entry point for direct page compaction. |
---|
1750 | 2505 | */ |
---|
1751 | 2506 | enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
---|
1752 | 2507 | unsigned int alloc_flags, const struct alloc_context *ac, |
---|
1753 | | - enum compact_priority prio) |
---|
| 2508 | + enum compact_priority prio, struct page **capture) |
---|
1754 | 2509 | { |
---|
1755 | 2510 | int may_perform_io = gfp_mask & __GFP_IO; |
---|
1756 | 2511 | struct zoneref *z; |
---|
.. | .. |
---|
1767 | 2522 | trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); |
---|
1768 | 2523 | |
---|
1769 | 2524 | /* Compact each zone in the list */ |
---|
1770 | | - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
---|
1771 | | - ac->nodemask) { |
---|
| 2525 | + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, |
---|
| 2526 | + ac->highest_zoneidx, ac->nodemask) { |
---|
1772 | 2527 | enum compact_result status; |
---|
1773 | 2528 | |
---|
1774 | 2529 | if (prio > MIN_COMPACT_PRIORITY |
---|
.. | .. |
---|
1778 | 2533 | } |
---|
1779 | 2534 | |
---|
1780 | 2535 | status = compact_zone_order(zone, order, gfp_mask, prio, |
---|
1781 | | - alloc_flags, ac_classzone_idx(ac)); |
---|
| 2536 | + alloc_flags, ac->highest_zoneidx, capture); |
---|
1782 | 2537 | rc = max(status, rc); |
---|
1783 | 2538 | |
---|
1784 | 2539 | /* The allocation should succeed, stop compacting */ |
---|
.. | .. |
---|
1816 | 2571 | return rc; |
---|
1817 | 2572 | } |
---|
1818 | 2573 | |
---|
| 2574 | +/* |
---|
| 2575 | + * Compact all zones within a node till each zone's fragmentation score |
---|
| 2576 | + * reaches within proactive compaction thresholds (as determined by the |
---|
| 2577 | + * proactiveness tunable). |
---|
| 2578 | + * |
---|
| 2579 | + * It is possible that the function returns before reaching score targets |
---|
| 2580 | + * due to various back-off conditions, such as, contention on per-node or |
---|
| 2581 | + * per-zone locks. |
---|
| 2582 | + */ |
---|
| 2583 | +static void proactive_compact_node(pg_data_t *pgdat) |
---|
| 2584 | +{ |
---|
| 2585 | + int zoneid; |
---|
| 2586 | + struct zone *zone; |
---|
| 2587 | + struct compact_control cc = { |
---|
| 2588 | + .order = -1, |
---|
| 2589 | + .mode = MIGRATE_SYNC_LIGHT, |
---|
| 2590 | + .ignore_skip_hint = true, |
---|
| 2591 | + .whole_zone = true, |
---|
| 2592 | + .gfp_mask = GFP_KERNEL, |
---|
| 2593 | + .proactive_compaction = true, |
---|
| 2594 | + }; |
---|
| 2595 | + |
---|
| 2596 | + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
---|
| 2597 | + zone = &pgdat->node_zones[zoneid]; |
---|
| 2598 | + if (!populated_zone(zone)) |
---|
| 2599 | + continue; |
---|
| 2600 | + |
---|
| 2601 | + cc.zone = zone; |
---|
| 2602 | + |
---|
| 2603 | + compact_zone(&cc, NULL); |
---|
| 2604 | + |
---|
| 2605 | + VM_BUG_ON(!list_empty(&cc.freepages)); |
---|
| 2606 | + VM_BUG_ON(!list_empty(&cc.migratepages)); |
---|
| 2607 | + } |
---|
| 2608 | +} |
---|
1819 | 2609 | |
---|
1820 | 2610 | /* Compact all zones within a node */ |
---|
1821 | 2611 | static void compact_node(int nid) |
---|
.. | .. |
---|
1840 | 2630 | |
---|
1841 | 2631 | cc.zone = zone; |
---|
1842 | 2632 | |
---|
1843 | | - compact_zone(zone, &cc); |
---|
| 2633 | + compact_zone(&cc, NULL); |
---|
1844 | 2634 | |
---|
1845 | 2635 | VM_BUG_ON(!list_empty(&cc.freepages)); |
---|
1846 | 2636 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
---|
.. | .. |
---|
1863 | 2653 | int sysctl_compact_memory; |
---|
1864 | 2654 | |
---|
1865 | 2655 | /* |
---|
1866 | | - * This is the entry point for compacting all nodes via |
---|
1867 | | - * /proc/sys/vm/compact_memory |
---|
| 2656 | + * Tunable for proactive compaction. It determines how |
---|
| 2657 | + * aggressively the kernel should compact memory in the |
---|
| 2658 | + * background. It takes values in the range [0, 100]. |
---|
1868 | 2659 | */ |
---|
1869 | | -int sysctl_compaction_handler(struct ctl_table *table, int write, |
---|
1870 | | - void __user *buffer, size_t *length, loff_t *ppos) |
---|
| 2660 | +unsigned int __read_mostly sysctl_compaction_proactiveness = 20; |
---|
| 2661 | + |
---|
| 2662 | +int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, |
---|
| 2663 | + void *buffer, size_t *length, loff_t *ppos) |
---|
1871 | 2664 | { |
---|
1872 | | - if (write) |
---|
1873 | | - compact_nodes(); |
---|
| 2665 | + int rc, nid; |
---|
| 2666 | + |
---|
| 2667 | + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); |
---|
| 2668 | + if (rc) |
---|
| 2669 | + return rc; |
---|
| 2670 | + |
---|
| 2671 | + if (write && sysctl_compaction_proactiveness) { |
---|
| 2672 | + for_each_online_node(nid) { |
---|
| 2673 | + pg_data_t *pgdat = NODE_DATA(nid); |
---|
| 2674 | + |
---|
| 2675 | + if (pgdat->proactive_compact_trigger) |
---|
| 2676 | + continue; |
---|
| 2677 | + |
---|
| 2678 | + pgdat->proactive_compact_trigger = true; |
---|
| 2679 | + wake_up_interruptible(&pgdat->kcompactd_wait); |
---|
| 2680 | + } |
---|
| 2681 | + } |
---|
1874 | 2682 | |
---|
1875 | 2683 | return 0; |
---|
1876 | 2684 | } |
---|
1877 | 2685 | |
---|
1878 | | -int sysctl_extfrag_handler(struct ctl_table *table, int write, |
---|
1879 | | - void __user *buffer, size_t *length, loff_t *ppos) |
---|
| 2686 | +/* |
---|
| 2687 | + * This is the entry point for compacting all nodes via |
---|
| 2688 | + * /proc/sys/vm/compact_memory |
---|
| 2689 | + */ |
---|
| 2690 | +int sysctl_compaction_handler(struct ctl_table *table, int write, |
---|
| 2691 | + void *buffer, size_t *length, loff_t *ppos) |
---|
1880 | 2692 | { |
---|
1881 | | - proc_dointvec_minmax(table, write, buffer, length, ppos); |
---|
| 2693 | + if (write) |
---|
| 2694 | + compact_nodes(); |
---|
1882 | 2695 | |
---|
1883 | 2696 | return 0; |
---|
1884 | 2697 | } |
---|
.. | .. |
---|
1914 | 2727 | |
---|
1915 | 2728 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) |
---|
1916 | 2729 | { |
---|
1917 | | - return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); |
---|
| 2730 | + return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || |
---|
| 2731 | + pgdat->proactive_compact_trigger; |
---|
1918 | 2732 | } |
---|
1919 | 2733 | |
---|
1920 | 2734 | static bool kcompactd_node_suitable(pg_data_t *pgdat) |
---|
1921 | 2735 | { |
---|
1922 | 2736 | int zoneid; |
---|
1923 | 2737 | struct zone *zone; |
---|
1924 | | - enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; |
---|
| 2738 | + enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; |
---|
1925 | 2739 | |
---|
1926 | | - for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { |
---|
| 2740 | + for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { |
---|
1927 | 2741 | zone = &pgdat->node_zones[zoneid]; |
---|
1928 | 2742 | |
---|
1929 | 2743 | if (!populated_zone(zone)) |
---|
1930 | 2744 | continue; |
---|
1931 | 2745 | |
---|
1932 | 2746 | if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, |
---|
1933 | | - classzone_idx) == COMPACT_CONTINUE) |
---|
| 2747 | + highest_zoneidx) == COMPACT_CONTINUE) |
---|
1934 | 2748 | return true; |
---|
1935 | 2749 | } |
---|
1936 | 2750 | |
---|
.. | .. |
---|
1947 | 2761 | struct zone *zone; |
---|
1948 | 2762 | struct compact_control cc = { |
---|
1949 | 2763 | .order = pgdat->kcompactd_max_order, |
---|
1950 | | - .classzone_idx = pgdat->kcompactd_classzone_idx, |
---|
| 2764 | + .search_order = pgdat->kcompactd_max_order, |
---|
| 2765 | + .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, |
---|
1951 | 2766 | .mode = MIGRATE_SYNC_LIGHT, |
---|
1952 | 2767 | .ignore_skip_hint = false, |
---|
1953 | 2768 | .gfp_mask = GFP_KERNEL, |
---|
1954 | 2769 | }; |
---|
1955 | 2770 | trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, |
---|
1956 | | - cc.classzone_idx); |
---|
| 2771 | + cc.highest_zoneidx); |
---|
1957 | 2772 | count_compact_event(KCOMPACTD_WAKE); |
---|
1958 | 2773 | |
---|
1959 | | - for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { |
---|
| 2774 | + for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { |
---|
1960 | 2775 | int status; |
---|
1961 | 2776 | |
---|
1962 | 2777 | zone = &pgdat->node_zones[zoneid]; |
---|
.. | .. |
---|
1974 | 2789 | return; |
---|
1975 | 2790 | |
---|
1976 | 2791 | cc.zone = zone; |
---|
1977 | | - status = compact_zone(zone, &cc); |
---|
| 2792 | + status = compact_zone(&cc, NULL); |
---|
1978 | 2793 | |
---|
1979 | 2794 | if (status == COMPACT_SUCCESS) { |
---|
1980 | 2795 | compaction_defer_reset(zone, cc.order, false); |
---|
.. | .. |
---|
2005 | 2820 | |
---|
2006 | 2821 | /* |
---|
2007 | 2822 | * Regardless of success, we are done until woken up next. But remember |
---|
2008 | | - * the requested order/classzone_idx in case it was higher/tighter than |
---|
2009 | | - * our current ones |
---|
| 2823 | + * the requested order/highest_zoneidx in case it was higher/tighter |
---|
| 2824 | + * than our current ones |
---|
2010 | 2825 | */ |
---|
2011 | 2826 | if (pgdat->kcompactd_max_order <= cc.order) |
---|
2012 | 2827 | pgdat->kcompactd_max_order = 0; |
---|
2013 | | - if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) |
---|
2014 | | - pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
---|
| 2828 | + if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) |
---|
| 2829 | + pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; |
---|
2015 | 2830 | } |
---|
2016 | 2831 | |
---|
2017 | | -void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) |
---|
| 2832 | +void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) |
---|
2018 | 2833 | { |
---|
2019 | 2834 | if (!order) |
---|
2020 | 2835 | return; |
---|
.. | .. |
---|
2022 | 2837 | if (pgdat->kcompactd_max_order < order) |
---|
2023 | 2838 | pgdat->kcompactd_max_order = order; |
---|
2024 | 2839 | |
---|
2025 | | - if (pgdat->kcompactd_classzone_idx > classzone_idx) |
---|
2026 | | - pgdat->kcompactd_classzone_idx = classzone_idx; |
---|
| 2840 | + if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) |
---|
| 2841 | + pgdat->kcompactd_highest_zoneidx = highest_zoneidx; |
---|
2027 | 2842 | |
---|
2028 | 2843 | /* |
---|
2029 | 2844 | * Pairs with implicit barrier in wait_event_freezable() |
---|
.. | .. |
---|
2036 | 2851 | return; |
---|
2037 | 2852 | |
---|
2038 | 2853 | trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, |
---|
2039 | | - classzone_idx); |
---|
| 2854 | + highest_zoneidx); |
---|
2040 | 2855 | wake_up_interruptible(&pgdat->kcompactd_wait); |
---|
2041 | 2856 | } |
---|
2042 | 2857 | |
---|
.. | .. |
---|
2048 | 2863 | { |
---|
2049 | 2864 | pg_data_t *pgdat = (pg_data_t*)p; |
---|
2050 | 2865 | struct task_struct *tsk = current; |
---|
| 2866 | + unsigned int proactive_defer = 0; |
---|
2051 | 2867 | |
---|
2052 | 2868 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
---|
2053 | 2869 | |
---|
.. | .. |
---|
2057 | 2873 | set_freezable(); |
---|
2058 | 2874 | |
---|
2059 | 2875 | pgdat->kcompactd_max_order = 0; |
---|
2060 | | - pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
---|
| 2876 | + pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; |
---|
2061 | 2877 | |
---|
2062 | 2878 | while (!kthread_should_stop()) { |
---|
2063 | 2879 | unsigned long pflags; |
---|
| 2880 | + long timeout; |
---|
2064 | 2881 | |
---|
| 2882 | + timeout = sysctl_compaction_proactiveness ? |
---|
| 2883 | + msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC) : |
---|
| 2884 | + MAX_SCHEDULE_TIMEOUT; |
---|
2065 | 2885 | trace_mm_compaction_kcompactd_sleep(pgdat->node_id); |
---|
2066 | | - wait_event_freezable(pgdat->kcompactd_wait, |
---|
2067 | | - kcompactd_work_requested(pgdat)); |
---|
| 2886 | + if (wait_event_freezable_timeout(pgdat->kcompactd_wait, |
---|
| 2887 | + kcompactd_work_requested(pgdat), timeout) && |
---|
| 2888 | + !pgdat->proactive_compact_trigger) { |
---|
2068 | 2889 | |
---|
2069 | | - psi_memstall_enter(&pflags); |
---|
2070 | | - kcompactd_do_work(pgdat); |
---|
2071 | | - psi_memstall_leave(&pflags); |
---|
| 2890 | + psi_memstall_enter(&pflags); |
---|
| 2891 | + kcompactd_do_work(pgdat); |
---|
| 2892 | + psi_memstall_leave(&pflags); |
---|
| 2893 | + continue; |
---|
| 2894 | + } |
---|
| 2895 | + |
---|
| 2896 | + /* kcompactd wait timeout */ |
---|
| 2897 | + if (should_proactive_compact_node(pgdat)) { |
---|
| 2898 | + unsigned int prev_score, score; |
---|
| 2899 | + |
---|
| 2900 | + /* |
---|
| 2901 | + * On wakeup of proactive compaction by sysctl |
---|
| 2902 | + * write, ignore the accumulated defer score. |
---|
| 2903 | + * Anyway, if the proactive compaction didn't |
---|
| 2904 | + * make any progress for the new value, it will |
---|
| 2905 | + * be further deferred by 2^COMPACT_MAX_DEFER_SHIFT |
---|
| 2906 | + * times. |
---|
| 2907 | + */ |
---|
| 2908 | + if (proactive_defer && |
---|
| 2909 | + !pgdat->proactive_compact_trigger) { |
---|
| 2910 | + proactive_defer--; |
---|
| 2911 | + continue; |
---|
| 2912 | + } |
---|
| 2913 | + |
---|
| 2914 | + prev_score = fragmentation_score_node(pgdat); |
---|
| 2915 | + proactive_compact_node(pgdat); |
---|
| 2916 | + score = fragmentation_score_node(pgdat); |
---|
| 2917 | + /* |
---|
| 2918 | + * Defer proactive compaction if the fragmentation |
---|
| 2919 | + * score did not go down i.e. no progress made. |
---|
| 2920 | + */ |
---|
| 2921 | + proactive_defer = score < prev_score ? |
---|
| 2922 | + 0 : 1 << COMPACT_MAX_DEFER_SHIFT; |
---|
| 2923 | + } |
---|
| 2924 | + if (pgdat->proactive_compact_trigger) |
---|
| 2925 | + pgdat->proactive_compact_trigger = false; |
---|
2072 | 2926 | } |
---|
2073 | 2927 | |
---|
2074 | 2928 | return 0; |
---|