.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/mm/vmstat.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
75 | 76 | static DEFINE_MUTEX(vm_numa_stat_lock); |
---|
76 | 77 | |
---|
77 | 78 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
---|
78 | | - void __user *buffer, size_t *length, loff_t *ppos) |
---|
| 79 | + void *buffer, size_t *length, loff_t *ppos) |
---|
79 | 80 | { |
---|
80 | 81 | int ret, oldval; |
---|
81 | 82 | |
---|
.. | .. |
---|
227 | 228 | * 125 1024 10 16-32 GB 9 |
---|
228 | 229 | */ |
---|
229 | 230 | |
---|
230 | | - mem = zone->managed_pages >> (27 - PAGE_SHIFT); |
---|
| 231 | + mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); |
---|
231 | 232 | |
---|
232 | 233 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
---|
233 | 234 | |
---|
.. | .. |
---|
324 | 325 | |
---|
325 | 326 | t = __this_cpu_read(pcp->stat_threshold); |
---|
326 | 327 | |
---|
327 | | - if (unlikely(x > t || x < -t)) { |
---|
| 328 | + if (unlikely(abs(x) > t)) { |
---|
328 | 329 | zone_page_state_add(x, zone, item); |
---|
329 | 330 | x = 0; |
---|
330 | 331 | } |
---|
.. | .. |
---|
340 | 341 | long x; |
---|
341 | 342 | long t; |
---|
342 | 343 | |
---|
| 344 | + if (vmstat_item_in_bytes(item)) { |
---|
| 345 | + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
---|
| 346 | + delta >>= PAGE_SHIFT; |
---|
| 347 | + } |
---|
| 348 | + |
---|
343 | 349 | x = delta + __this_cpu_read(*p); |
---|
344 | 350 | |
---|
345 | 351 | t = __this_cpu_read(pcp->stat_threshold); |
---|
346 | 352 | |
---|
347 | | - if (unlikely(x > t || x < -t)) { |
---|
| 353 | + if (unlikely(abs(x) > t)) { |
---|
348 | 354 | node_page_state_add(x, pgdat, item); |
---|
349 | 355 | x = 0; |
---|
350 | 356 | } |
---|
.. | .. |
---|
397 | 403 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
398 | 404 | s8 v, t; |
---|
399 | 405 | |
---|
| 406 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
| 407 | + |
---|
400 | 408 | v = __this_cpu_inc_return(*p); |
---|
401 | 409 | t = __this_cpu_read(pcp->stat_threshold); |
---|
402 | 410 | if (unlikely(v > t)) { |
---|
.. | .. |
---|
440 | 448 | struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; |
---|
441 | 449 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
442 | 450 | s8 v, t; |
---|
| 451 | + |
---|
| 452 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
443 | 453 | |
---|
444 | 454 | v = __this_cpu_dec_return(*p); |
---|
445 | 455 | t = __this_cpu_read(pcp->stat_threshold); |
---|
.. | .. |
---|
501 | 511 | o = this_cpu_read(*p); |
---|
502 | 512 | n = delta + o; |
---|
503 | 513 | |
---|
504 | | - if (n > t || n < -t) { |
---|
| 514 | + if (abs(n) > t) { |
---|
505 | 515 | int os = overstep_mode * (t >> 1) ; |
---|
506 | 516 | |
---|
507 | 517 | /* Overflow must be added to zone counters */ |
---|
.. | .. |
---|
540 | 550 | s8 __percpu *p = pcp->vm_node_stat_diff + item; |
---|
541 | 551 | long o, n, t, z; |
---|
542 | 552 | |
---|
| 553 | + if (vmstat_item_in_bytes(item)) { |
---|
| 554 | + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
---|
| 555 | + delta >>= PAGE_SHIFT; |
---|
| 556 | + } |
---|
| 557 | + |
---|
543 | 558 | do { |
---|
544 | 559 | z = 0; /* overflow to node counters */ |
---|
545 | 560 | |
---|
.. | .. |
---|
558 | 573 | o = this_cpu_read(*p); |
---|
559 | 574 | n = delta + o; |
---|
560 | 575 | |
---|
561 | | - if (n > t || n < -t) { |
---|
| 576 | + if (abs(n) > t) { |
---|
562 | 577 | int os = overstep_mode * (t >> 1) ; |
---|
563 | 578 | |
---|
564 | 579 | /* Overflow must be added to node counters */ |
---|
.. | .. |
---|
988 | 1003 | /* |
---|
989 | 1004 | * Determine the per node value of a stat item. |
---|
990 | 1005 | */ |
---|
991 | | -unsigned long node_page_state(struct pglist_data *pgdat, |
---|
992 | | - enum node_stat_item item) |
---|
| 1006 | +unsigned long node_page_state_pages(struct pglist_data *pgdat, |
---|
| 1007 | + enum node_stat_item item) |
---|
993 | 1008 | { |
---|
994 | 1009 | long x = atomic_long_read(&pgdat->vm_stat[item]); |
---|
995 | 1010 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
997 | 1012 | x = 0; |
---|
998 | 1013 | #endif |
---|
999 | 1014 | return x; |
---|
| 1015 | +} |
---|
| 1016 | + |
---|
| 1017 | +unsigned long node_page_state(struct pglist_data *pgdat, |
---|
| 1018 | + enum node_stat_item item) |
---|
| 1019 | +{ |
---|
| 1020 | + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
---|
| 1021 | + |
---|
| 1022 | + return node_page_state_pages(pgdat, item); |
---|
1000 | 1023 | } |
---|
1001 | 1024 | #endif |
---|
1002 | 1025 | |
---|
.. | .. |
---|
1073 | 1096 | return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); |
---|
1074 | 1097 | } |
---|
1075 | 1098 | |
---|
| 1099 | +/* |
---|
| 1100 | + * Calculates external fragmentation within a zone wrt the given order. |
---|
| 1101 | + * It is defined as the percentage of pages found in blocks of size |
---|
| 1102 | + * less than 1 << order. It returns values in range [0, 100]. |
---|
| 1103 | + */ |
---|
| 1104 | +unsigned int extfrag_for_order(struct zone *zone, unsigned int order) |
---|
| 1105 | +{ |
---|
| 1106 | + struct contig_page_info info; |
---|
| 1107 | + |
---|
| 1108 | + fill_contig_page_info(zone, order, &info); |
---|
| 1109 | + if (info.free_pages == 0) |
---|
| 1110 | + return 0; |
---|
| 1111 | + |
---|
| 1112 | + return div_u64((info.free_pages - |
---|
| 1113 | + (info.free_blocks_suitable << order)) * 100, |
---|
| 1114 | + info.free_pages); |
---|
| 1115 | +} |
---|
| 1116 | + |
---|
1076 | 1117 | /* Same as __fragmentation index but allocs contig_page_info on stack */ |
---|
1077 | 1118 | int fragmentation_index(struct zone *zone, unsigned int order) |
---|
1078 | 1119 | { |
---|
.. | .. |
---|
1083 | 1124 | } |
---|
1084 | 1125 | #endif |
---|
1085 | 1126 | |
---|
1086 | | -#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) |
---|
| 1127 | +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \ |
---|
| 1128 | + defined(CONFIG_NUMA) || defined(CONFIG_MEMCG) |
---|
1087 | 1129 | #ifdef CONFIG_ZONE_DMA |
---|
1088 | 1130 | #define TEXT_FOR_DMA(xx) xx "_dma", |
---|
1089 | 1131 | #else |
---|
.. | .. |
---|
1106 | 1148 | TEXT_FOR_HIGHMEM(xx) xx "_movable", |
---|
1107 | 1149 | |
---|
1108 | 1150 | const char * const vmstat_text[] = { |
---|
1109 | | - /* enum zone_stat_item countes */ |
---|
| 1151 | + /* enum zone_stat_item counters */ |
---|
1110 | 1152 | "nr_free_pages", |
---|
1111 | 1153 | "nr_zone_inactive_anon", |
---|
1112 | 1154 | "nr_zone_active_anon", |
---|
.. | .. |
---|
1116 | 1158 | "nr_zone_write_pending", |
---|
1117 | 1159 | "nr_mlock", |
---|
1118 | 1160 | "nr_page_table_pages", |
---|
1119 | | - "nr_kernel_stack", |
---|
1120 | | -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) |
---|
1121 | | - "nr_shadow_call_stack_bytes", |
---|
1122 | | -#endif |
---|
1123 | 1161 | "nr_bounce", |
---|
1124 | | -#if IS_ENABLED(CONFIG_ZSMALLOC) |
---|
1125 | 1162 | "nr_zspages", |
---|
1126 | | -#endif |
---|
1127 | 1163 | "nr_free_cma", |
---|
1128 | 1164 | |
---|
1129 | 1165 | /* enum numa_stat_item counters */ |
---|
.. | .. |
---|
1136 | 1172 | "numa_other", |
---|
1137 | 1173 | #endif |
---|
1138 | 1174 | |
---|
1139 | | - /* Node-based counters */ |
---|
| 1175 | + /* enum node_stat_item counters */ |
---|
1140 | 1176 | "nr_inactive_anon", |
---|
1141 | 1177 | "nr_active_anon", |
---|
1142 | 1178 | "nr_inactive_file", |
---|
.. | .. |
---|
1146 | 1182 | "nr_slab_unreclaimable", |
---|
1147 | 1183 | "nr_isolated_anon", |
---|
1148 | 1184 | "nr_isolated_file", |
---|
1149 | | - "workingset_refault", |
---|
1150 | | - "workingset_activate", |
---|
1151 | | - "workingset_restore", |
---|
| 1185 | + "workingset_nodes", |
---|
| 1186 | + "workingset_refault_anon", |
---|
| 1187 | + "workingset_refault_file", |
---|
| 1188 | + "workingset_activate_anon", |
---|
| 1189 | + "workingset_activate_file", |
---|
| 1190 | + "workingset_restore_anon", |
---|
| 1191 | + "workingset_restore_file", |
---|
1152 | 1192 | "workingset_nodereclaim", |
---|
1153 | 1193 | "nr_anon_pages", |
---|
1154 | 1194 | "nr_mapped", |
---|
.. | .. |
---|
1159 | 1199 | "nr_shmem", |
---|
1160 | 1200 | "nr_shmem_hugepages", |
---|
1161 | 1201 | "nr_shmem_pmdmapped", |
---|
| 1202 | + "nr_file_hugepages", |
---|
| 1203 | + "nr_file_pmdmapped", |
---|
1162 | 1204 | "nr_anon_transparent_hugepages", |
---|
1163 | | - "nr_unstable", |
---|
1164 | 1205 | "nr_vmscan_write", |
---|
1165 | 1206 | "nr_vmscan_immediate_reclaim", |
---|
1166 | 1207 | "nr_dirtied", |
---|
1167 | 1208 | "nr_written", |
---|
1168 | 1209 | "nr_kernel_misc_reclaimable", |
---|
1169 | | - "nr_unreclaimable_pages", |
---|
| 1210 | + "nr_foll_pin_acquired", |
---|
| 1211 | + "nr_foll_pin_released", |
---|
| 1212 | + "nr_kernel_stack", |
---|
| 1213 | +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) |
---|
| 1214 | + "nr_shadow_call_stack", |
---|
| 1215 | +#endif |
---|
1170 | 1216 | |
---|
1171 | | - |
---|
1172 | | - "nr_ion_heap", |
---|
1173 | | - "nr_ion_heap_pool", |
---|
1174 | | - "nr_gpu_heap", |
---|
1175 | 1217 | /* enum writeback_stat_item counters */ |
---|
1176 | 1218 | "nr_dirty_threshold", |
---|
1177 | 1219 | "nr_dirty_background_threshold", |
---|
1178 | 1220 | |
---|
1179 | | -#ifdef CONFIG_VM_EVENT_COUNTERS |
---|
| 1221 | +#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
---|
1180 | 1222 | /* enum vm_event_item counters */ |
---|
1181 | 1223 | "pgpgin", |
---|
1182 | 1224 | "pgpgout", |
---|
1183 | | - "pgpgoutclean", |
---|
1184 | 1225 | "pswpin", |
---|
1185 | 1226 | "pswpout", |
---|
1186 | 1227 | |
---|
.. | .. |
---|
1198 | 1239 | "pglazyfreed", |
---|
1199 | 1240 | |
---|
1200 | 1241 | "pgrefill", |
---|
| 1242 | + "pgreuse", |
---|
1201 | 1243 | "pgsteal_kswapd", |
---|
1202 | 1244 | "pgsteal_direct", |
---|
1203 | 1245 | "pgscan_kswapd", |
---|
1204 | 1246 | "pgscan_direct", |
---|
1205 | 1247 | "pgscan_direct_throttle", |
---|
| 1248 | + "pgscan_anon", |
---|
| 1249 | + "pgscan_file", |
---|
| 1250 | + "pgsteal_anon", |
---|
| 1251 | + "pgsteal_file", |
---|
1206 | 1252 | |
---|
1207 | 1253 | #ifdef CONFIG_NUMA |
---|
1208 | 1254 | "zone_reclaim_failed", |
---|
.. | .. |
---|
1230 | 1276 | #ifdef CONFIG_MIGRATION |
---|
1231 | 1277 | "pgmigrate_success", |
---|
1232 | 1278 | "pgmigrate_fail", |
---|
| 1279 | + "thp_migration_success", |
---|
| 1280 | + "thp_migration_fail", |
---|
| 1281 | + "thp_migration_split", |
---|
1233 | 1282 | #endif |
---|
1234 | 1283 | #ifdef CONFIG_COMPACTION |
---|
1235 | 1284 | "compact_migrate_scanned", |
---|
.. | .. |
---|
1247 | 1296 | "htlb_buddy_alloc_success", |
---|
1248 | 1297 | "htlb_buddy_alloc_fail", |
---|
1249 | 1298 | #endif |
---|
| 1299 | +#ifdef CONFIG_CMA |
---|
| 1300 | + "cma_alloc_success", |
---|
| 1301 | + "cma_alloc_fail", |
---|
| 1302 | +#endif |
---|
1250 | 1303 | "unevictable_pgs_culled", |
---|
1251 | 1304 | "unevictable_pgs_scanned", |
---|
1252 | 1305 | "unevictable_pgs_rescued", |
---|
.. | .. |
---|
1258 | 1311 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
1259 | 1312 | "thp_fault_alloc", |
---|
1260 | 1313 | "thp_fault_fallback", |
---|
| 1314 | + "thp_fault_fallback_charge", |
---|
1261 | 1315 | "thp_collapse_alloc", |
---|
1262 | 1316 | "thp_collapse_alloc_failed", |
---|
1263 | 1317 | "thp_file_alloc", |
---|
| 1318 | + "thp_file_fallback", |
---|
| 1319 | + "thp_file_fallback_charge", |
---|
1264 | 1320 | "thp_file_mapped", |
---|
1265 | 1321 | "thp_split_page", |
---|
1266 | 1322 | "thp_split_page_failed", |
---|
.. | .. |
---|
1296 | 1352 | "swap_ra", |
---|
1297 | 1353 | "swap_ra_hit", |
---|
1298 | 1354 | #endif |
---|
1299 | | -#endif /* CONFIG_VM_EVENTS_COUNTERS */ |
---|
| 1355 | +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT |
---|
| 1356 | + "speculative_pgfault", |
---|
| 1357 | + "speculative_pgfault_file" |
---|
| 1358 | +#endif |
---|
| 1359 | +#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
---|
1300 | 1360 | }; |
---|
1301 | | -#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ |
---|
| 1361 | +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ |
---|
1302 | 1362 | |
---|
1303 | 1363 | #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ |
---|
1304 | 1364 | defined(CONFIG_PROC_FS) |
---|
.. | .. |
---|
1388 | 1448 | unsigned long freecount = 0; |
---|
1389 | 1449 | struct free_area *area; |
---|
1390 | 1450 | struct list_head *curr; |
---|
| 1451 | + bool overflow = false; |
---|
1391 | 1452 | |
---|
1392 | 1453 | area = &(zone->free_area[order]); |
---|
1393 | 1454 | |
---|
1394 | | - list_for_each(curr, &area->free_list[mtype]) |
---|
1395 | | - freecount++; |
---|
1396 | | - seq_printf(m, "%6lu ", freecount); |
---|
| 1455 | + list_for_each(curr, &area->free_list[mtype]) { |
---|
| 1456 | + /* |
---|
| 1457 | + * Cap the free_list iteration because it might |
---|
| 1458 | + * be really large and we are under a spinlock |
---|
| 1459 | + * so a long time spent here could trigger a |
---|
| 1460 | + * hard lockup detector. Anyway this is a |
---|
| 1461 | + * debugging tool so knowing there is a handful |
---|
| 1462 | + * of pages of this order should be more than |
---|
| 1463 | + * sufficient. |
---|
| 1464 | + */ |
---|
| 1465 | + if (++freecount >= 100000) { |
---|
| 1466 | + overflow = true; |
---|
| 1467 | + break; |
---|
| 1468 | + } |
---|
| 1469 | + } |
---|
| 1470 | + seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount); |
---|
1397 | 1471 | spin_unlock_irq(&zone->lock); |
---|
1398 | 1472 | cond_resched(); |
---|
1399 | 1473 | spin_lock_irq(&zone->lock); |
---|
.. | .. |
---|
1433 | 1507 | |
---|
1434 | 1508 | page = pfn_to_online_page(pfn); |
---|
1435 | 1509 | if (!page) |
---|
1436 | | - continue; |
---|
1437 | | - |
---|
1438 | | - /* Watch for unexpected holes punched in the memmap */ |
---|
1439 | | - if (!memmap_valid_within(pfn, page, zone)) |
---|
1440 | 1510 | continue; |
---|
1441 | 1511 | |
---|
1442 | 1512 | if (page_zone(page) != zone) |
---|
.. | .. |
---|
1555 | 1625 | if (is_zone_first_populated(pgdat, zone)) { |
---|
1556 | 1626 | seq_printf(m, "\n per-node stats"); |
---|
1557 | 1627 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { |
---|
1558 | | - /* Skip hidden vmstat items. */ |
---|
1559 | | - if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + |
---|
1560 | | - NR_VM_NUMA_STAT_ITEMS] == '\0') |
---|
1561 | | - continue; |
---|
1562 | | - seq_printf(m, "\n %-12s %lu", |
---|
1563 | | - vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + |
---|
1564 | | - NR_VM_NUMA_STAT_ITEMS], |
---|
1565 | | - node_page_state(pgdat, i)); |
---|
| 1628 | + seq_printf(m, "\n %-12s %lu", node_stat_name(i), |
---|
| 1629 | + node_page_state_pages(pgdat, i)); |
---|
1566 | 1630 | } |
---|
1567 | 1631 | } |
---|
1568 | 1632 | seq_printf(m, |
---|
.. | .. |
---|
1572 | 1636 | "\n high %lu" |
---|
1573 | 1637 | "\n spanned %lu" |
---|
1574 | 1638 | "\n present %lu" |
---|
1575 | | - "\n managed %lu", |
---|
| 1639 | + "\n managed %lu" |
---|
| 1640 | + "\n cma %lu", |
---|
1576 | 1641 | zone_page_state(zone, NR_FREE_PAGES), |
---|
1577 | 1642 | min_wmark_pages(zone), |
---|
1578 | 1643 | low_wmark_pages(zone), |
---|
1579 | 1644 | high_wmark_pages(zone), |
---|
1580 | 1645 | zone->spanned_pages, |
---|
1581 | 1646 | zone->present_pages, |
---|
1582 | | - zone->managed_pages); |
---|
| 1647 | + zone_managed_pages(zone), |
---|
| 1648 | + zone_cma_pages(zone)); |
---|
1583 | 1649 | |
---|
1584 | 1650 | seq_printf(m, |
---|
1585 | 1651 | "\n protection: (%ld", |
---|
.. | .. |
---|
1595 | 1661 | } |
---|
1596 | 1662 | |
---|
1597 | 1663 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
---|
1598 | | - seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
---|
1599 | | - zone_page_state(zone, i)); |
---|
| 1664 | + seq_printf(m, "\n %-12s %lu", zone_stat_name(i), |
---|
| 1665 | + zone_page_state(zone, i)); |
---|
1600 | 1666 | |
---|
1601 | 1667 | #ifdef CONFIG_NUMA |
---|
1602 | 1668 | for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) |
---|
1603 | | - seq_printf(m, "\n %-12s %lu", |
---|
1604 | | - vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], |
---|
1605 | | - zone_numa_state_snapshot(zone, i)); |
---|
| 1669 | + seq_printf(m, "\n %-12s %lu", numa_stat_name(i), |
---|
| 1670 | + zone_numa_state_snapshot(zone, i)); |
---|
1606 | 1671 | #endif |
---|
1607 | 1672 | |
---|
1608 | 1673 | seq_printf(m, "\n pagesets"); |
---|
.. | .. |
---|
1653 | 1718 | .show = zoneinfo_show, |
---|
1654 | 1719 | }; |
---|
1655 | 1720 | |
---|
1656 | | -enum writeback_stat_item { |
---|
1657 | | - NR_DIRTY_THRESHOLD, |
---|
1658 | | - NR_DIRTY_BG_THRESHOLD, |
---|
1659 | | - NR_VM_WRITEBACK_STAT_ITEMS, |
---|
1660 | | -}; |
---|
| 1721 | +#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \ |
---|
| 1722 | + NR_VM_NUMA_STAT_ITEMS + \ |
---|
| 1723 | + NR_VM_NODE_STAT_ITEMS + \ |
---|
| 1724 | + NR_VM_WRITEBACK_STAT_ITEMS + \ |
---|
| 1725 | + (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \ |
---|
| 1726 | + NR_VM_EVENT_ITEMS : 0)) |
---|
1661 | 1727 | |
---|
1662 | 1728 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
---|
1663 | 1729 | { |
---|
1664 | 1730 | unsigned long *v; |
---|
1665 | | - int i, stat_items_size; |
---|
| 1731 | + int i; |
---|
1666 | 1732 | |
---|
1667 | | - if (*pos >= ARRAY_SIZE(vmstat_text)) |
---|
| 1733 | + if (*pos >= NR_VMSTAT_ITEMS) |
---|
1668 | 1734 | return NULL; |
---|
1669 | | - stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + |
---|
1670 | | - NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) + |
---|
1671 | | - NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) + |
---|
1672 | | - NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); |
---|
1673 | 1735 | |
---|
1674 | | -#ifdef CONFIG_VM_EVENT_COUNTERS |
---|
1675 | | - stat_items_size += sizeof(struct vm_event_state); |
---|
1676 | | -#endif |
---|
1677 | | - |
---|
1678 | | - v = kmalloc(stat_items_size, GFP_KERNEL); |
---|
| 1736 | + BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS); |
---|
| 1737 | + v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL); |
---|
1679 | 1738 | m->private = v; |
---|
1680 | 1739 | if (!v) |
---|
1681 | 1740 | return ERR_PTR(-ENOMEM); |
---|
.. | .. |
---|
1690 | 1749 | #endif |
---|
1691 | 1750 | |
---|
1692 | 1751 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) |
---|
1693 | | - v[i] = global_node_page_state(i); |
---|
| 1752 | + v[i] = global_node_page_state_pages(i); |
---|
1694 | 1753 | v += NR_VM_NODE_STAT_ITEMS; |
---|
1695 | 1754 | |
---|
1696 | 1755 | global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, |
---|
.. | .. |
---|
1708 | 1767 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
---|
1709 | 1768 | { |
---|
1710 | 1769 | (*pos)++; |
---|
1711 | | - //nr_gpu_heap is out-of-tree now so we don't want to export it. |
---|
1712 | | - if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP) |
---|
1713 | | - (*pos)++; |
---|
1714 | | - if (*pos >= ARRAY_SIZE(vmstat_text)) |
---|
| 1770 | + if (*pos >= NR_VMSTAT_ITEMS) |
---|
1715 | 1771 | return NULL; |
---|
1716 | 1772 | return (unsigned long *)m->private + *pos; |
---|
1717 | 1773 | } |
---|
.. | .. |
---|
1724 | 1780 | seq_puts(m, vmstat_text[off]); |
---|
1725 | 1781 | seq_put_decimal_ull(m, " ", *l); |
---|
1726 | 1782 | seq_putc(m, '\n'); |
---|
| 1783 | + |
---|
| 1784 | + if (off == NR_VMSTAT_ITEMS - 1) { |
---|
| 1785 | + /* |
---|
| 1786 | + * We've come to the end - add any deprecated counters to avoid |
---|
| 1787 | + * breaking userspace which might depend on them being present. |
---|
| 1788 | + */ |
---|
| 1789 | + seq_puts(m, "nr_unstable 0\n"); |
---|
| 1790 | + } |
---|
1727 | 1791 | return 0; |
---|
1728 | 1792 | } |
---|
1729 | 1793 | |
---|
.. | .. |
---|
1752 | 1816 | } |
---|
1753 | 1817 | |
---|
1754 | 1818 | int vmstat_refresh(struct ctl_table *table, int write, |
---|
1755 | | - void __user *buffer, size_t *lenp, loff_t *ppos) |
---|
| 1819 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
1756 | 1820 | { |
---|
1757 | 1821 | long val; |
---|
1758 | 1822 | int err; |
---|
.. | .. |
---|
1777 | 1841 | val = atomic_long_read(&vm_zone_stat[i]); |
---|
1778 | 1842 | if (val < 0) { |
---|
1779 | 1843 | pr_warn("%s: %s %ld\n", |
---|
1780 | | - __func__, vmstat_text[i], val); |
---|
| 1844 | + __func__, zone_stat_name(i), val); |
---|
1781 | 1845 | err = -EINVAL; |
---|
1782 | 1846 | } |
---|
1783 | 1847 | } |
---|
.. | .. |
---|
1786 | 1850 | val = atomic_long_read(&vm_numa_stat[i]); |
---|
1787 | 1851 | if (val < 0) { |
---|
1788 | 1852 | pr_warn("%s: %s %ld\n", |
---|
1789 | | - __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val); |
---|
| 1853 | + __func__, numa_stat_name(i), val); |
---|
1790 | 1854 | err = -EINVAL; |
---|
1791 | 1855 | } |
---|
1792 | 1856 | } |
---|
.. | .. |
---|
2056 | 2120 | return 0; |
---|
2057 | 2121 | } |
---|
2058 | 2122 | |
---|
2059 | | -static const struct seq_operations unusable_op = { |
---|
| 2123 | +static const struct seq_operations unusable_sops = { |
---|
2060 | 2124 | .start = frag_start, |
---|
2061 | 2125 | .next = frag_next, |
---|
2062 | 2126 | .stop = frag_stop, |
---|
2063 | 2127 | .show = unusable_show, |
---|
2064 | 2128 | }; |
---|
2065 | 2129 | |
---|
2066 | | -static int unusable_open(struct inode *inode, struct file *file) |
---|
2067 | | -{ |
---|
2068 | | - return seq_open(file, &unusable_op); |
---|
2069 | | -} |
---|
2070 | | - |
---|
2071 | | -static const struct file_operations unusable_file_ops = { |
---|
2072 | | - .open = unusable_open, |
---|
2073 | | - .read = seq_read, |
---|
2074 | | - .llseek = seq_lseek, |
---|
2075 | | - .release = seq_release, |
---|
2076 | | -}; |
---|
| 2130 | +DEFINE_SEQ_ATTRIBUTE(unusable); |
---|
2077 | 2131 | |
---|
2078 | 2132 | static void extfrag_show_print(struct seq_file *m, |
---|
2079 | 2133 | pg_data_t *pgdat, struct zone *zone) |
---|
.. | .. |
---|
2108 | 2162 | return 0; |
---|
2109 | 2163 | } |
---|
2110 | 2164 | |
---|
2111 | | -static const struct seq_operations extfrag_op = { |
---|
| 2165 | +static const struct seq_operations extfrag_sops = { |
---|
2112 | 2166 | .start = frag_start, |
---|
2113 | 2167 | .next = frag_next, |
---|
2114 | 2168 | .stop = frag_stop, |
---|
2115 | 2169 | .show = extfrag_show, |
---|
2116 | 2170 | }; |
---|
2117 | 2171 | |
---|
2118 | | -static int extfrag_open(struct inode *inode, struct file *file) |
---|
2119 | | -{ |
---|
2120 | | - return seq_open(file, &extfrag_op); |
---|
2121 | | -} |
---|
2122 | | - |
---|
2123 | | -static const struct file_operations extfrag_file_ops = { |
---|
2124 | | - .open = extfrag_open, |
---|
2125 | | - .read = seq_read, |
---|
2126 | | - .llseek = seq_lseek, |
---|
2127 | | - .release = seq_release, |
---|
2128 | | -}; |
---|
| 2172 | +DEFINE_SEQ_ATTRIBUTE(extfrag); |
---|
2129 | 2173 | |
---|
2130 | 2174 | static int __init extfrag_debug_init(void) |
---|
2131 | 2175 | { |
---|
2132 | 2176 | struct dentry *extfrag_debug_root; |
---|
2133 | 2177 | |
---|
2134 | 2178 | extfrag_debug_root = debugfs_create_dir("extfrag", NULL); |
---|
2135 | | - if (!extfrag_debug_root) |
---|
2136 | | - return -ENOMEM; |
---|
2137 | 2179 | |
---|
2138 | | - if (!debugfs_create_file("unusable_index", 0444, |
---|
2139 | | - extfrag_debug_root, NULL, &unusable_file_ops)) |
---|
2140 | | - goto fail; |
---|
| 2180 | + debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL, |
---|
| 2181 | + &unusable_fops); |
---|
2141 | 2182 | |
---|
2142 | | - if (!debugfs_create_file("extfrag_index", 0444, |
---|
2143 | | - extfrag_debug_root, NULL, &extfrag_file_ops)) |
---|
2144 | | - goto fail; |
---|
| 2183 | + debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL, |
---|
| 2184 | + &extfrag_fops); |
---|
2145 | 2185 | |
---|
2146 | 2186 | return 0; |
---|
2147 | | -fail: |
---|
2148 | | - debugfs_remove_recursive(extfrag_debug_root); |
---|
2149 | | - return -ENOMEM; |
---|
2150 | 2187 | } |
---|
2151 | 2188 | |
---|
2152 | 2189 | module_init(extfrag_debug_init); |
---|