.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/mm/vmstat.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
75 | 76 | static DEFINE_MUTEX(vm_numa_stat_lock); |
---|
76 | 77 | |
---|
77 | 78 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
---|
78 | | - void __user *buffer, size_t *length, loff_t *ppos) |
---|
| 79 | + void *buffer, size_t *length, loff_t *ppos) |
---|
79 | 80 | { |
---|
80 | 81 | int ret, oldval; |
---|
81 | 82 | |
---|
.. | .. |
---|
227 | 228 | * 125 1024 10 16-32 GB 9 |
---|
228 | 229 | */ |
---|
229 | 230 | |
---|
230 | | - mem = zone->managed_pages >> (27 - PAGE_SHIFT); |
---|
| 231 | + mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); |
---|
231 | 232 | |
---|
232 | 233 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
---|
233 | 234 | |
---|
.. | .. |
---|
325 | 326 | |
---|
326 | 327 | t = __this_cpu_read(pcp->stat_threshold); |
---|
327 | 328 | |
---|
328 | | - if (unlikely(x > t || x < -t)) { |
---|
| 329 | + if (unlikely(abs(x) > t)) { |
---|
329 | 330 | zone_page_state_add(x, zone, item); |
---|
330 | 331 | x = 0; |
---|
331 | 332 | } |
---|
.. | .. |
---|
342 | 343 | long x; |
---|
343 | 344 | long t; |
---|
344 | 345 | |
---|
| 346 | + if (vmstat_item_in_bytes(item)) { |
---|
| 347 | + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
---|
| 348 | + delta >>= PAGE_SHIFT; |
---|
| 349 | + } |
---|
| 350 | + |
---|
345 | 351 | preempt_disable_rt(); |
---|
346 | 352 | x = delta + __this_cpu_read(*p); |
---|
347 | 353 | |
---|
348 | 354 | t = __this_cpu_read(pcp->stat_threshold); |
---|
349 | 355 | |
---|
350 | | - if (unlikely(x > t || x < -t)) { |
---|
| 356 | + if (unlikely(abs(x) > t)) { |
---|
351 | 357 | node_page_state_add(x, pgdat, item); |
---|
352 | 358 | x = 0; |
---|
353 | 359 | } |
---|
.. | .. |
---|
403 | 409 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
404 | 410 | s8 v, t; |
---|
405 | 411 | |
---|
| 412 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
| 413 | + |
---|
406 | 414 | preempt_disable_rt(); |
---|
407 | 415 | v = __this_cpu_inc_return(*p); |
---|
408 | 416 | t = __this_cpu_read(pcp->stat_threshold); |
---|
.. | .. |
---|
450 | 458 | struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; |
---|
451 | 459 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
452 | 460 | s8 v, t; |
---|
| 461 | + |
---|
| 462 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
453 | 463 | |
---|
454 | 464 | preempt_disable_rt(); |
---|
455 | 465 | v = __this_cpu_dec_return(*p); |
---|
.. | .. |
---|
513 | 523 | o = this_cpu_read(*p); |
---|
514 | 524 | n = delta + o; |
---|
515 | 525 | |
---|
516 | | - if (n > t || n < -t) { |
---|
| 526 | + if (abs(n) > t) { |
---|
517 | 527 | int os = overstep_mode * (t >> 1) ; |
---|
518 | 528 | |
---|
519 | 529 | /* Overflow must be added to zone counters */ |
---|
.. | .. |
---|
552 | 562 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
553 | 563 | long o, n, t, z; |
---|
554 | 564 | |
---|
| 565 | + if (vmstat_item_in_bytes(item)) { |
---|
| 566 | + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
---|
| 567 | + delta >>= PAGE_SHIFT; |
---|
| 568 | + } |
---|
| 569 | + |
---|
555 | 570 | do { |
---|
556 | 571 | z = 0; /* overflow to node counters */ |
---|
557 | 572 | |
---|
.. | .. |
---|
570 | 585 | o = this_cpu_read(*p); |
---|
571 | 586 | n = delta + o; |
---|
572 | 587 | |
---|
573 | | - if (n > t || n < -t) { |
---|
| 588 | + if (abs(n) > t) { |
---|
574 | 589 | int os = overstep_mode * (t >> 1) ; |
---|
575 | 590 | |
---|
576 | 591 | /* Overflow must be added to node counters */ |
---|
.. | .. |
---|
1000 | 1015 | /* |
---|
1001 | 1016 | * Determine the per node value of a stat item. |
---|
1002 | 1017 | */ |
---|
1003 | | -unsigned long node_page_state(struct pglist_data *pgdat, |
---|
1004 | | - enum node_stat_item item) |
---|
| 1018 | +unsigned long node_page_state_pages(struct pglist_data *pgdat, |
---|
| 1019 | + enum node_stat_item item) |
---|
1005 | 1020 | { |
---|
1006 | 1021 | long x = atomic_long_read(&pgdat->vm_stat[item]); |
---|
1007 | 1022 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
1009 | 1024 | x = 0; |
---|
1010 | 1025 | #endif |
---|
1011 | 1026 | return x; |
---|
| 1027 | +} |
---|
| 1028 | + |
---|
| 1029 | +unsigned long node_page_state(struct pglist_data *pgdat, |
---|
| 1030 | + enum node_stat_item item) |
---|
| 1031 | +{ |
---|
| 1032 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
| 1033 | + |
---|
| 1034 | + return node_page_state_pages(pgdat, item); |
---|
1012 | 1035 | } |
---|
1013 | 1036 | #endif |
---|
1014 | 1037 | |
---|
.. | .. |
---|
1085 | 1108 | return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); |
---|
1086 | 1109 | } |
---|
1087 | 1110 | |
---|
| 1111 | +/* |
---|
| 1112 | + * Calculates external fragmentation within a zone wrt the given order. |
---|
| 1113 | + * It is defined as the percentage of pages found in blocks of size |
---|
| 1114 | + * less than 1 << order. It returns values in range [0, 100]. |
---|
| 1115 | + */ |
---|
| 1116 | +unsigned int extfrag_for_order(struct zone *zone, unsigned int order) |
---|
| 1117 | +{ |
---|
| 1118 | + struct contig_page_info info; |
---|
| 1119 | + |
---|
| 1120 | + fill_contig_page_info(zone, order, &info); |
---|
| 1121 | + if (info.free_pages == 0) |
---|
| 1122 | + return 0; |
---|
| 1123 | + |
---|
| 1124 | + return div_u64((info.free_pages - |
---|
| 1125 | + (info.free_blocks_suitable << order)) * 100, |
---|
| 1126 | + info.free_pages); |
---|
| 1127 | +} |
---|
| 1128 | + |
---|
1088 | 1129 | /* Same as __fragmentation index but allocs contig_page_info on stack */ |
---|
1089 | 1130 | int fragmentation_index(struct zone *zone, unsigned int order) |
---|
1090 | 1131 | { |
---|
.. | .. |
---|
1095 | 1136 | } |
---|
1096 | 1137 | #endif |
---|
1097 | 1138 | |
---|
1098 | | -#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) |
---|
| 1139 | +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \ |
---|
| 1140 | + defined(CONFIG_NUMA) || defined(CONFIG_MEMCG) |
---|
1099 | 1141 | #ifdef CONFIG_ZONE_DMA |
---|
1100 | 1142 | #define TEXT_FOR_DMA(xx) xx "_dma", |
---|
1101 | 1143 | #else |
---|
.. | .. |
---|
1118 | 1160 | TEXT_FOR_HIGHMEM(xx) xx "_movable", |
---|
1119 | 1161 | |
---|
1120 | 1162 | const char * const vmstat_text[] = { |
---|
1121 | | - /* enum zone_stat_item countes */ |
---|
| 1163 | + /* enum zone_stat_item counters */ |
---|
1122 | 1164 | "nr_free_pages", |
---|
1123 | 1165 | "nr_zone_inactive_anon", |
---|
1124 | 1166 | "nr_zone_active_anon", |
---|
.. | .. |
---|
1128 | 1170 | "nr_zone_write_pending", |
---|
1129 | 1171 | "nr_mlock", |
---|
1130 | 1172 | "nr_page_table_pages", |
---|
1131 | | - "nr_kernel_stack", |
---|
1132 | | -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) |
---|
1133 | | - "nr_shadow_call_stack_bytes", |
---|
1134 | | -#endif |
---|
1135 | 1173 | "nr_bounce", |
---|
1136 | | -#if IS_ENABLED(CONFIG_ZSMALLOC) |
---|
1137 | 1174 | "nr_zspages", |
---|
1138 | | -#endif |
---|
1139 | 1175 | "nr_free_cma", |
---|
1140 | 1176 | |
---|
1141 | 1177 | /* enum numa_stat_item counters */ |
---|
.. | .. |
---|
1148 | 1184 | "numa_other", |
---|
1149 | 1185 | #endif |
---|
1150 | 1186 | |
---|
1151 | | - /* Node-based counters */ |
---|
| 1187 | + /* enum node_stat_item counters */ |
---|
1152 | 1188 | "nr_inactive_anon", |
---|
1153 | 1189 | "nr_active_anon", |
---|
1154 | 1190 | "nr_inactive_file", |
---|
.. | .. |
---|
1158 | 1194 | "nr_slab_unreclaimable", |
---|
1159 | 1195 | "nr_isolated_anon", |
---|
1160 | 1196 | "nr_isolated_file", |
---|
1161 | | - "workingset_refault", |
---|
1162 | | - "workingset_activate", |
---|
1163 | | - "workingset_restore", |
---|
| 1197 | + "workingset_nodes", |
---|
| 1198 | + "workingset_refault_anon", |
---|
| 1199 | + "workingset_refault_file", |
---|
| 1200 | + "workingset_activate_anon", |
---|
| 1201 | + "workingset_activate_file", |
---|
| 1202 | + "workingset_restore_anon", |
---|
| 1203 | + "workingset_restore_file", |
---|
1164 | 1204 | "workingset_nodereclaim", |
---|
1165 | 1205 | "nr_anon_pages", |
---|
1166 | 1206 | "nr_mapped", |
---|
.. | .. |
---|
1171 | 1211 | "nr_shmem", |
---|
1172 | 1212 | "nr_shmem_hugepages", |
---|
1173 | 1213 | "nr_shmem_pmdmapped", |
---|
| 1214 | + "nr_file_hugepages", |
---|
| 1215 | + "nr_file_pmdmapped", |
---|
1174 | 1216 | "nr_anon_transparent_hugepages", |
---|
1175 | | - "nr_unstable", |
---|
1176 | 1217 | "nr_vmscan_write", |
---|
1177 | 1218 | "nr_vmscan_immediate_reclaim", |
---|
1178 | 1219 | "nr_dirtied", |
---|
1179 | 1220 | "nr_written", |
---|
1180 | 1221 | "nr_kernel_misc_reclaimable", |
---|
1181 | | - "nr_unreclaimable_pages", |
---|
| 1222 | + "nr_foll_pin_acquired", |
---|
| 1223 | + "nr_foll_pin_released", |
---|
| 1224 | + "nr_kernel_stack", |
---|
| 1225 | +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) |
---|
| 1226 | + "nr_shadow_call_stack", |
---|
| 1227 | +#endif |
---|
1182 | 1228 | |
---|
1183 | | - |
---|
1184 | | - "nr_ion_heap", |
---|
1185 | | - "nr_ion_heap_pool", |
---|
1186 | | - "nr_gpu_heap", |
---|
1187 | 1229 | /* enum writeback_stat_item counters */ |
---|
1188 | 1230 | "nr_dirty_threshold", |
---|
1189 | 1231 | "nr_dirty_background_threshold", |
---|
1190 | 1232 | |
---|
1191 | | -#ifdef CONFIG_VM_EVENT_COUNTERS |
---|
| 1233 | +#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
---|
1192 | 1234 | /* enum vm_event_item counters */ |
---|
1193 | 1235 | "pgpgin", |
---|
1194 | 1236 | "pgpgout", |
---|
1195 | | - "pgpgoutclean", |
---|
1196 | 1237 | "pswpin", |
---|
1197 | 1238 | "pswpout", |
---|
1198 | 1239 | |
---|
.. | .. |
---|
1210 | 1251 | "pglazyfreed", |
---|
1211 | 1252 | |
---|
1212 | 1253 | "pgrefill", |
---|
| 1254 | + "pgreuse", |
---|
1213 | 1255 | "pgsteal_kswapd", |
---|
1214 | 1256 | "pgsteal_direct", |
---|
1215 | 1257 | "pgscan_kswapd", |
---|
1216 | 1258 | "pgscan_direct", |
---|
1217 | 1259 | "pgscan_direct_throttle", |
---|
| 1260 | + "pgscan_anon", |
---|
| 1261 | + "pgscan_file", |
---|
| 1262 | + "pgsteal_anon", |
---|
| 1263 | + "pgsteal_file", |
---|
1218 | 1264 | |
---|
1219 | 1265 | #ifdef CONFIG_NUMA |
---|
1220 | 1266 | "zone_reclaim_failed", |
---|
.. | .. |
---|
1242 | 1288 | #ifdef CONFIG_MIGRATION |
---|
1243 | 1289 | "pgmigrate_success", |
---|
1244 | 1290 | "pgmigrate_fail", |
---|
| 1291 | + "thp_migration_success", |
---|
| 1292 | + "thp_migration_fail", |
---|
| 1293 | + "thp_migration_split", |
---|
1245 | 1294 | #endif |
---|
1246 | 1295 | #ifdef CONFIG_COMPACTION |
---|
1247 | 1296 | "compact_migrate_scanned", |
---|
.. | .. |
---|
1259 | 1308 | "htlb_buddy_alloc_success", |
---|
1260 | 1309 | "htlb_buddy_alloc_fail", |
---|
1261 | 1310 | #endif |
---|
| 1311 | +#ifdef CONFIG_CMA |
---|
| 1312 | + "cma_alloc_success", |
---|
| 1313 | + "cma_alloc_fail", |
---|
| 1314 | +#endif |
---|
1262 | 1315 | "unevictable_pgs_culled", |
---|
1263 | 1316 | "unevictable_pgs_scanned", |
---|
1264 | 1317 | "unevictable_pgs_rescued", |
---|
.. | .. |
---|
1270 | 1323 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
1271 | 1324 | "thp_fault_alloc", |
---|
1272 | 1325 | "thp_fault_fallback", |
---|
| 1326 | + "thp_fault_fallback_charge", |
---|
1273 | 1327 | "thp_collapse_alloc", |
---|
1274 | 1328 | "thp_collapse_alloc_failed", |
---|
1275 | 1329 | "thp_file_alloc", |
---|
| 1330 | + "thp_file_fallback", |
---|
| 1331 | + "thp_file_fallback_charge", |
---|
1276 | 1332 | "thp_file_mapped", |
---|
1277 | 1333 | "thp_split_page", |
---|
1278 | 1334 | "thp_split_page_failed", |
---|
.. | .. |
---|
1308 | 1364 | "swap_ra", |
---|
1309 | 1365 | "swap_ra_hit", |
---|
1310 | 1366 | #endif |
---|
1311 | | -#endif /* CONFIG_VM_EVENTS_COUNTERS */ |
---|
| 1367 | +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT |
---|
| 1368 | + "speculative_pgfault", |
---|
| 1369 | + "speculative_pgfault_file" |
---|
| 1370 | +#endif |
---|
| 1371 | +#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
---|
1312 | 1372 | }; |
---|
1313 | | -#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ |
---|
| 1373 | +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ |
---|
1314 | 1374 | |
---|
1315 | 1375 | #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ |
---|
1316 | 1376 | defined(CONFIG_PROC_FS) |
---|
.. | .. |
---|
1400 | 1460 | unsigned long freecount = 0; |
---|
1401 | 1461 | struct free_area *area; |
---|
1402 | 1462 | struct list_head *curr; |
---|
| 1463 | + bool overflow = false; |
---|
1403 | 1464 | |
---|
1404 | 1465 | area = &(zone->free_area[order]); |
---|
1405 | 1466 | |
---|
1406 | | - list_for_each(curr, &area->free_list[mtype]) |
---|
1407 | | - freecount++; |
---|
1408 | | - seq_printf(m, "%6lu ", freecount); |
---|
| 1467 | + list_for_each(curr, &area->free_list[mtype]) { |
---|
| 1468 | + /* |
---|
| 1469 | + * Cap the free_list iteration because it might |
---|
| 1470 | + * be really large and we are under a spinlock |
---|
| 1471 | + * so a long time spent here could trigger a |
---|
| 1472 | + * hard lockup detector. Anyway this is a |
---|
| 1473 | + * debugging tool so knowing there is a handful |
---|
| 1474 | + * of pages of this order should be more than |
---|
| 1475 | + * sufficient. |
---|
| 1476 | + */ |
---|
| 1477 | + if (++freecount >= 100000) { |
---|
| 1478 | + overflow = true; |
---|
| 1479 | + break; |
---|
| 1480 | + } |
---|
| 1481 | + } |
---|
| 1482 | + seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount); |
---|
1409 | 1483 | spin_unlock_irq(&zone->lock); |
---|
1410 | 1484 | cond_resched(); |
---|
1411 | 1485 | spin_lock_irq(&zone->lock); |
---|
.. | .. |
---|
1445 | 1519 | |
---|
1446 | 1520 | page = pfn_to_online_page(pfn); |
---|
1447 | 1521 | if (!page) |
---|
1448 | | - continue; |
---|
1449 | | - |
---|
1450 | | - /* Watch for unexpected holes punched in the memmap */ |
---|
1451 | | - if (!memmap_valid_within(pfn, page, zone)) |
---|
1452 | 1522 | continue; |
---|
1453 | 1523 | |
---|
1454 | 1524 | if (page_zone(page) != zone) |
---|
.. | .. |
---|
1567 | 1637 | if (is_zone_first_populated(pgdat, zone)) { |
---|
1568 | 1638 | seq_printf(m, "\n per-node stats"); |
---|
1569 | 1639 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { |
---|
1570 | | - /* Skip hidden vmstat items. */ |
---|
1571 | | - if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + |
---|
1572 | | - NR_VM_NUMA_STAT_ITEMS] == '\0') |
---|
1573 | | - continue; |
---|
1574 | | - seq_printf(m, "\n %-12s %lu", |
---|
1575 | | - vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + |
---|
1576 | | - NR_VM_NUMA_STAT_ITEMS], |
---|
1577 | | - node_page_state(pgdat, i)); |
---|
| 1640 | + seq_printf(m, "\n %-12s %lu", node_stat_name(i), |
---|
| 1641 | + node_page_state_pages(pgdat, i)); |
---|
1578 | 1642 | } |
---|
1579 | 1643 | } |
---|
1580 | 1644 | seq_printf(m, |
---|
.. | .. |
---|
1584 | 1648 | "\n high %lu" |
---|
1585 | 1649 | "\n spanned %lu" |
---|
1586 | 1650 | "\n present %lu" |
---|
1587 | | - "\n managed %lu", |
---|
| 1651 | + "\n managed %lu" |
---|
| 1652 | + "\n cma %lu", |
---|
1588 | 1653 | zone_page_state(zone, NR_FREE_PAGES), |
---|
1589 | 1654 | min_wmark_pages(zone), |
---|
1590 | 1655 | low_wmark_pages(zone), |
---|
1591 | 1656 | high_wmark_pages(zone), |
---|
1592 | 1657 | zone->spanned_pages, |
---|
1593 | 1658 | zone->present_pages, |
---|
1594 | | - zone->managed_pages); |
---|
| 1659 | + zone_managed_pages(zone), |
---|
| 1660 | + zone_cma_pages(zone)); |
---|
1595 | 1661 | |
---|
1596 | 1662 | seq_printf(m, |
---|
1597 | 1663 | "\n protection: (%ld", |
---|
.. | .. |
---|
1607 | 1673 | } |
---|
1608 | 1674 | |
---|
1609 | 1675 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
---|
1610 | | - seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
---|
1611 | | - zone_page_state(zone, i)); |
---|
| 1676 | + seq_printf(m, "\n %-12s %lu", zone_stat_name(i), |
---|
| 1677 | + zone_page_state(zone, i)); |
---|
1612 | 1678 | |
---|
1613 | 1679 | #ifdef CONFIG_NUMA |
---|
1614 | 1680 | for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) |
---|
1615 | | - seq_printf(m, "\n %-12s %lu", |
---|
1616 | | - vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], |
---|
1617 | | - zone_numa_state_snapshot(zone, i)); |
---|
| 1681 | + seq_printf(m, "\n %-12s %lu", numa_stat_name(i), |
---|
| 1682 | + zone_numa_state_snapshot(zone, i)); |
---|
1618 | 1683 | #endif |
---|
1619 | 1684 | |
---|
1620 | 1685 | seq_printf(m, "\n pagesets"); |
---|
.. | .. |
---|
1665 | 1730 | .show = zoneinfo_show, |
---|
1666 | 1731 | }; |
---|
1667 | 1732 | |
---|
1668 | | -enum writeback_stat_item { |
---|
1669 | | - NR_DIRTY_THRESHOLD, |
---|
1670 | | - NR_DIRTY_BG_THRESHOLD, |
---|
1671 | | - NR_VM_WRITEBACK_STAT_ITEMS, |
---|
1672 | | -}; |
---|
| 1733 | +#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \ |
---|
| 1734 | + NR_VM_NUMA_STAT_ITEMS + \ |
---|
| 1735 | + NR_VM_NODE_STAT_ITEMS + \ |
---|
| 1736 | + NR_VM_WRITEBACK_STAT_ITEMS + \ |
---|
| 1737 | + (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \ |
---|
| 1738 | + NR_VM_EVENT_ITEMS : 0)) |
---|
1673 | 1739 | |
---|
1674 | 1740 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
---|
1675 | 1741 | { |
---|
1676 | 1742 | unsigned long *v; |
---|
1677 | | - int i, stat_items_size; |
---|
| 1743 | + int i; |
---|
1678 | 1744 | |
---|
1679 | | - if (*pos >= ARRAY_SIZE(vmstat_text)) |
---|
| 1745 | + if (*pos >= NR_VMSTAT_ITEMS) |
---|
1680 | 1746 | return NULL; |
---|
1681 | | - stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + |
---|
1682 | | - NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) + |
---|
1683 | | - NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) + |
---|
1684 | | - NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); |
---|
1685 | 1747 | |
---|
1686 | | -#ifdef CONFIG_VM_EVENT_COUNTERS |
---|
1687 | | - stat_items_size += sizeof(struct vm_event_state); |
---|
1688 | | -#endif |
---|
1689 | | - |
---|
1690 | | - v = kmalloc(stat_items_size, GFP_KERNEL); |
---|
| 1748 | + BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS); |
---|
| 1749 | + v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL); |
---|
1691 | 1750 | m->private = v; |
---|
1692 | 1751 | if (!v) |
---|
1693 | 1752 | return ERR_PTR(-ENOMEM); |
---|
.. | .. |
---|
1702 | 1761 | #endif |
---|
1703 | 1762 | |
---|
1704 | 1763 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) |
---|
1705 | | - v[i] = global_node_page_state(i); |
---|
| 1764 | + v[i] = global_node_page_state_pages(i); |
---|
1706 | 1765 | v += NR_VM_NODE_STAT_ITEMS; |
---|
1707 | 1766 | |
---|
1708 | 1767 | global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, |
---|
.. | .. |
---|
1720 | 1779 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
---|
1721 | 1780 | { |
---|
1722 | 1781 | (*pos)++; |
---|
1723 | | - //nr_gpu_heap is out-of-tree now so we don't want to export it. |
---|
1724 | | - if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP) |
---|
1725 | | - (*pos)++; |
---|
1726 | | - if (*pos >= ARRAY_SIZE(vmstat_text)) |
---|
| 1782 | + if (*pos >= NR_VMSTAT_ITEMS) |
---|
1727 | 1783 | return NULL; |
---|
1728 | 1784 | return (unsigned long *)m->private + *pos; |
---|
1729 | 1785 | } |
---|
.. | .. |
---|
1736 | 1792 | seq_puts(m, vmstat_text[off]); |
---|
1737 | 1793 | seq_put_decimal_ull(m, " ", *l); |
---|
1738 | 1794 | seq_putc(m, '\n'); |
---|
| 1795 | + |
---|
| 1796 | + if (off == NR_VMSTAT_ITEMS - 1) { |
---|
| 1797 | + /* |
---|
| 1798 | + * We've come to the end - add any deprecated counters to avoid |
---|
| 1799 | + * breaking userspace which might depend on them being present. |
---|
| 1800 | + */ |
---|
| 1801 | + seq_puts(m, "nr_unstable 0\n"); |
---|
| 1802 | + } |
---|
1739 | 1803 | return 0; |
---|
1740 | 1804 | } |
---|
1741 | 1805 | |
---|
.. | .. |
---|
1764 | 1828 | } |
---|
1765 | 1829 | |
---|
1766 | 1830 | int vmstat_refresh(struct ctl_table *table, int write, |
---|
1767 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 1831 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
1768 | 1832 | { |
---|
1769 | 1833 | long val; |
---|
1770 | 1834 | int err; |
---|
.. | .. |
---|
1789 | 1853 | val = atomic_long_read(&vm_zone_stat[i]); |
---|
1790 | 1854 | if (val < 0) { |
---|
1791 | 1855 | pr_warn("%s: %s %ld\n", |
---|
1792 | | - __func__, vmstat_text[i], val); |
---|
| 1856 | + __func__, zone_stat_name(i), val); |
---|
1793 | 1857 | err = -EINVAL; |
---|
1794 | 1858 | } |
---|
1795 | 1859 | } |
---|
.. | .. |
---|
1798 | 1862 | val = atomic_long_read(&vm_numa_stat[i]); |
---|
1799 | 1863 | if (val < 0) { |
---|
1800 | 1864 | pr_warn("%s: %s %ld\n", |
---|
1801 | | - __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val); |
---|
| 1865 | + __func__, numa_stat_name(i), val); |
---|
1802 | 1866 | err = -EINVAL; |
---|
1803 | 1867 | } |
---|
1804 | 1868 | } |
---|
.. | .. |
---|
2068 | 2132 | return 0; |
---|
2069 | 2133 | } |
---|
2070 | 2134 | |
---|
2071 | | -static const struct seq_operations unusable_op = { |
---|
| 2135 | +static const struct seq_operations unusable_sops = { |
---|
2072 | 2136 | .start = frag_start, |
---|
2073 | 2137 | .next = frag_next, |
---|
2074 | 2138 | .stop = frag_stop, |
---|
2075 | 2139 | .show = unusable_show, |
---|
2076 | 2140 | }; |
---|
2077 | 2141 | |
---|
2078 | | -static int unusable_open(struct inode *inode, struct file *file) |
---|
2079 | | -{ |
---|
2080 | | - return seq_open(file, &unusable_op); |
---|
2081 | | -} |
---|
2082 | | - |
---|
2083 | | -static const struct file_operations unusable_file_ops = { |
---|
2084 | | - .open = unusable_open, |
---|
2085 | | - .read = seq_read, |
---|
2086 | | - .llseek = seq_lseek, |
---|
2087 | | - .release = seq_release, |
---|
2088 | | -}; |
---|
| 2142 | +DEFINE_SEQ_ATTRIBUTE(unusable); |
---|
2089 | 2143 | |
---|
2090 | 2144 | static void extfrag_show_print(struct seq_file *m, |
---|
2091 | 2145 | pg_data_t *pgdat, struct zone *zone) |
---|
.. | .. |
---|
2120 | 2174 | return 0; |
---|
2121 | 2175 | } |
---|
2122 | 2176 | |
---|
2123 | | -static const struct seq_operations extfrag_op = { |
---|
| 2177 | +static const struct seq_operations extfrag_sops = { |
---|
2124 | 2178 | .start = frag_start, |
---|
2125 | 2179 | .next = frag_next, |
---|
2126 | 2180 | .stop = frag_stop, |
---|
2127 | 2181 | .show = extfrag_show, |
---|
2128 | 2182 | }; |
---|
2129 | 2183 | |
---|
2130 | | -static int extfrag_open(struct inode *inode, struct file *file) |
---|
2131 | | -{ |
---|
2132 | | - return seq_open(file, &extfrag_op); |
---|
2133 | | -} |
---|
2134 | | - |
---|
2135 | | -static const struct file_operations extfrag_file_ops = { |
---|
2136 | | - .open = extfrag_open, |
---|
2137 | | - .read = seq_read, |
---|
2138 | | - .llseek = seq_lseek, |
---|
2139 | | - .release = seq_release, |
---|
2140 | | -}; |
---|
| 2184 | +DEFINE_SEQ_ATTRIBUTE(extfrag); |
---|
2141 | 2185 | |
---|
2142 | 2186 | static int __init extfrag_debug_init(void) |
---|
2143 | 2187 | { |
---|
2144 | 2188 | struct dentry *extfrag_debug_root; |
---|
2145 | 2189 | |
---|
2146 | 2190 | extfrag_debug_root = debugfs_create_dir("extfrag", NULL); |
---|
2147 | | - if (!extfrag_debug_root) |
---|
2148 | | - return -ENOMEM; |
---|
2149 | 2191 | |
---|
2150 | | - if (!debugfs_create_file("unusable_index", 0444, |
---|
2151 | | - extfrag_debug_root, NULL, &unusable_file_ops)) |
---|
2152 | | - goto fail; |
---|
| 2192 | + debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL, |
---|
| 2193 | + &unusable_fops); |
---|
2153 | 2194 | |
---|
2154 | | - if (!debugfs_create_file("extfrag_index", 0444, |
---|
2155 | | - extfrag_debug_root, NULL, &extfrag_file_ops)) |
---|
2156 | | - goto fail; |
---|
| 2195 | + debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL, |
---|
| 2196 | + &extfrag_fops); |
---|
2157 | 2197 | |
---|
2158 | 2198 | return 0; |
---|
2159 | | -fail: |
---|
2160 | | - debugfs_remove_recursive(extfrag_debug_root); |
---|
2161 | | - return -ENOMEM; |
---|
2162 | 2199 | } |
---|
2163 | 2200 | |
---|
2164 | 2201 | module_init(extfrag_debug_init); |
---|