hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/vmstat.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/mm/vmstat.c
34 *
....@@ -75,7 +76,7 @@
7576 static DEFINE_MUTEX(vm_numa_stat_lock);
7677
7778 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78
- void __user *buffer, size_t *length, loff_t *ppos)
79
+ void *buffer, size_t *length, loff_t *ppos)
7980 {
8081 int ret, oldval;
8182
....@@ -227,7 +228,7 @@
227228 * 125 1024 10 16-32 GB 9
228229 */
229230
230
- mem = zone->managed_pages >> (27 - PAGE_SHIFT);
231
+ mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
231232
232233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
233234
....@@ -320,17 +321,15 @@
320321 long x;
321322 long t;
322323
323
- preempt_disable_rt();
324324 x = delta + __this_cpu_read(*p);
325325
326326 t = __this_cpu_read(pcp->stat_threshold);
327327
328
- if (unlikely(x > t || x < -t)) {
328
+ if (unlikely(abs(x) > t)) {
329329 zone_page_state_add(x, zone, item);
330330 x = 0;
331331 }
332332 __this_cpu_write(*p, x);
333
- preempt_enable_rt();
334333 }
335334 EXPORT_SYMBOL(__mod_zone_page_state);
336335
....@@ -342,17 +341,20 @@
342341 long x;
343342 long t;
344343
345
- preempt_disable_rt();
344
+ if (vmstat_item_in_bytes(item)) {
345
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
346
+ delta >>= PAGE_SHIFT;
347
+ }
348
+
346349 x = delta + __this_cpu_read(*p);
347350
348351 t = __this_cpu_read(pcp->stat_threshold);
349352
350
- if (unlikely(x > t || x < -t)) {
353
+ if (unlikely(abs(x) > t)) {
351354 node_page_state_add(x, pgdat, item);
352355 x = 0;
353356 }
354357 __this_cpu_write(*p, x);
355
- preempt_enable_rt();
356358 }
357359 EXPORT_SYMBOL(__mod_node_page_state);
358360
....@@ -385,7 +387,6 @@
385387 s8 __percpu *p = pcp->vm_stat_diff + item;
386388 s8 v, t;
387389
388
- preempt_disable_rt();
389390 v = __this_cpu_inc_return(*p);
390391 t = __this_cpu_read(pcp->stat_threshold);
391392 if (unlikely(v > t)) {
....@@ -394,7 +395,6 @@
394395 zone_page_state_add(v + overstep, zone, item);
395396 __this_cpu_write(*p, -overstep);
396397 }
397
- preempt_enable_rt();
398398 }
399399
400400 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
....@@ -403,7 +403,8 @@
403403 s8 __percpu *p = pcp->vm_node_stat_diff + item;
404404 s8 v, t;
405405
406
- preempt_disable_rt();
406
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
407
+
407408 v = __this_cpu_inc_return(*p);
408409 t = __this_cpu_read(pcp->stat_threshold);
409410 if (unlikely(v > t)) {
....@@ -412,7 +413,6 @@
412413 node_page_state_add(v + overstep, pgdat, item);
413414 __this_cpu_write(*p, -overstep);
414415 }
415
- preempt_enable_rt();
416416 }
417417
418418 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
....@@ -433,7 +433,6 @@
433433 s8 __percpu *p = pcp->vm_stat_diff + item;
434434 s8 v, t;
435435
436
- preempt_disable_rt();
437436 v = __this_cpu_dec_return(*p);
438437 t = __this_cpu_read(pcp->stat_threshold);
439438 if (unlikely(v < - t)) {
....@@ -442,7 +441,6 @@
442441 zone_page_state_add(v - overstep, zone, item);
443442 __this_cpu_write(*p, overstep);
444443 }
445
- preempt_enable_rt();
446444 }
447445
448446 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
....@@ -451,7 +449,8 @@
451449 s8 __percpu *p = pcp->vm_node_stat_diff + item;
452450 s8 v, t;
453451
454
- preempt_disable_rt();
452
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
453
+
455454 v = __this_cpu_dec_return(*p);
456455 t = __this_cpu_read(pcp->stat_threshold);
457456 if (unlikely(v < - t)) {
....@@ -460,7 +459,6 @@
460459 node_page_state_add(v - overstep, pgdat, item);
461460 __this_cpu_write(*p, overstep);
462461 }
463
- preempt_enable_rt();
464462 }
465463
466464 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
....@@ -513,7 +511,7 @@
513511 o = this_cpu_read(*p);
514512 n = delta + o;
515513
516
- if (n > t || n < -t) {
514
+ if (abs(n) > t) {
517515 int os = overstep_mode * (t >> 1) ;
518516
519517 /* Overflow must be added to zone counters */
....@@ -552,6 +550,11 @@
552550 s8 __percpu *p = pcp->vm_node_stat_diff + item;
553551 long o, n, t, z;
554552
553
+ if (vmstat_item_in_bytes(item)) {
554
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
555
+ delta >>= PAGE_SHIFT;
556
+ }
557
+
555558 do {
556559 z = 0; /* overflow to node counters */
557560
....@@ -570,7 +573,7 @@
570573 o = this_cpu_read(*p);
571574 n = delta + o;
572575
573
- if (n > t || n < -t) {
576
+ if (abs(n) > t) {
574577 int os = overstep_mode * (t >> 1) ;
575578
576579 /* Overflow must be added to node counters */
....@@ -1000,8 +1003,8 @@
10001003 /*
10011004 * Determine the per node value of a stat item.
10021005 */
1003
-unsigned long node_page_state(struct pglist_data *pgdat,
1004
- enum node_stat_item item)
1006
+unsigned long node_page_state_pages(struct pglist_data *pgdat,
1007
+ enum node_stat_item item)
10051008 {
10061009 long x = atomic_long_read(&pgdat->vm_stat[item]);
10071010 #ifdef CONFIG_SMP
....@@ -1009,6 +1012,14 @@
10091012 x = 0;
10101013 #endif
10111014 return x;
1015
+}
1016
+
1017
+unsigned long node_page_state(struct pglist_data *pgdat,
1018
+ enum node_stat_item item)
1019
+{
1020
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1021
+
1022
+ return node_page_state_pages(pgdat, item);
10121023 }
10131024 #endif
10141025
....@@ -1085,6 +1096,24 @@
10851096 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
10861097 }
10871098
1099
+/*
1100
+ * Calculates external fragmentation within a zone wrt the given order.
1101
+ * It is defined as the percentage of pages found in blocks of size
1102
+ * less than 1 << order. It returns values in range [0, 100].
1103
+ */
1104
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1105
+{
1106
+ struct contig_page_info info;
1107
+
1108
+ fill_contig_page_info(zone, order, &info);
1109
+ if (info.free_pages == 0)
1110
+ return 0;
1111
+
1112
+ return div_u64((info.free_pages -
1113
+ (info.free_blocks_suitable << order)) * 100,
1114
+ info.free_pages);
1115
+}
1116
+
10881117 /* Same as __fragmentation index but allocs contig_page_info on stack */
10891118 int fragmentation_index(struct zone *zone, unsigned int order)
10901119 {
....@@ -1095,7 +1124,8 @@
10951124 }
10961125 #endif
10971126
1098
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1127
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1128
+ defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
10991129 #ifdef CONFIG_ZONE_DMA
11001130 #define TEXT_FOR_DMA(xx) xx "_dma",
11011131 #else
....@@ -1118,7 +1148,7 @@
11181148 TEXT_FOR_HIGHMEM(xx) xx "_movable",
11191149
11201150 const char * const vmstat_text[] = {
1121
- /* enum zone_stat_item countes */
1151
+ /* enum zone_stat_item counters */
11221152 "nr_free_pages",
11231153 "nr_zone_inactive_anon",
11241154 "nr_zone_active_anon",
....@@ -1128,14 +1158,8 @@
11281158 "nr_zone_write_pending",
11291159 "nr_mlock",
11301160 "nr_page_table_pages",
1131
- "nr_kernel_stack",
1132
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1133
- "nr_shadow_call_stack_bytes",
1134
-#endif
11351161 "nr_bounce",
1136
-#if IS_ENABLED(CONFIG_ZSMALLOC)
11371162 "nr_zspages",
1138
-#endif
11391163 "nr_free_cma",
11401164
11411165 /* enum numa_stat_item counters */
....@@ -1148,7 +1172,7 @@
11481172 "numa_other",
11491173 #endif
11501174
1151
- /* Node-based counters */
1175
+ /* enum node_stat_item counters */
11521176 "nr_inactive_anon",
11531177 "nr_active_anon",
11541178 "nr_inactive_file",
....@@ -1158,9 +1182,13 @@
11581182 "nr_slab_unreclaimable",
11591183 "nr_isolated_anon",
11601184 "nr_isolated_file",
1161
- "workingset_refault",
1162
- "workingset_activate",
1163
- "workingset_restore",
1185
+ "workingset_nodes",
1186
+ "workingset_refault_anon",
1187
+ "workingset_refault_file",
1188
+ "workingset_activate_anon",
1189
+ "workingset_activate_file",
1190
+ "workingset_restore_anon",
1191
+ "workingset_restore_file",
11641192 "workingset_nodereclaim",
11651193 "nr_anon_pages",
11661194 "nr_mapped",
....@@ -1171,28 +1199,29 @@
11711199 "nr_shmem",
11721200 "nr_shmem_hugepages",
11731201 "nr_shmem_pmdmapped",
1202
+ "nr_file_hugepages",
1203
+ "nr_file_pmdmapped",
11741204 "nr_anon_transparent_hugepages",
1175
- "nr_unstable",
11761205 "nr_vmscan_write",
11771206 "nr_vmscan_immediate_reclaim",
11781207 "nr_dirtied",
11791208 "nr_written",
11801209 "nr_kernel_misc_reclaimable",
1181
- "nr_unreclaimable_pages",
1210
+ "nr_foll_pin_acquired",
1211
+ "nr_foll_pin_released",
1212
+ "nr_kernel_stack",
1213
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1214
+ "nr_shadow_call_stack",
1215
+#endif
11821216
1183
-
1184
- "nr_ion_heap",
1185
- "nr_ion_heap_pool",
1186
- "nr_gpu_heap",
11871217 /* enum writeback_stat_item counters */
11881218 "nr_dirty_threshold",
11891219 "nr_dirty_background_threshold",
11901220
1191
-#ifdef CONFIG_VM_EVENT_COUNTERS
1221
+#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
11921222 /* enum vm_event_item counters */
11931223 "pgpgin",
11941224 "pgpgout",
1195
- "pgpgoutclean",
11961225 "pswpin",
11971226 "pswpout",
11981227
....@@ -1210,11 +1239,16 @@
12101239 "pglazyfreed",
12111240
12121241 "pgrefill",
1242
+ "pgreuse",
12131243 "pgsteal_kswapd",
12141244 "pgsteal_direct",
12151245 "pgscan_kswapd",
12161246 "pgscan_direct",
12171247 "pgscan_direct_throttle",
1248
+ "pgscan_anon",
1249
+ "pgscan_file",
1250
+ "pgsteal_anon",
1251
+ "pgsteal_file",
12181252
12191253 #ifdef CONFIG_NUMA
12201254 "zone_reclaim_failed",
....@@ -1242,6 +1276,9 @@
12421276 #ifdef CONFIG_MIGRATION
12431277 "pgmigrate_success",
12441278 "pgmigrate_fail",
1279
+ "thp_migration_success",
1280
+ "thp_migration_fail",
1281
+ "thp_migration_split",
12451282 #endif
12461283 #ifdef CONFIG_COMPACTION
12471284 "compact_migrate_scanned",
....@@ -1259,6 +1296,10 @@
12591296 "htlb_buddy_alloc_success",
12601297 "htlb_buddy_alloc_fail",
12611298 #endif
1299
+#ifdef CONFIG_CMA
1300
+ "cma_alloc_success",
1301
+ "cma_alloc_fail",
1302
+#endif
12621303 "unevictable_pgs_culled",
12631304 "unevictable_pgs_scanned",
12641305 "unevictable_pgs_rescued",
....@@ -1270,9 +1311,12 @@
12701311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
12711312 "thp_fault_alloc",
12721313 "thp_fault_fallback",
1314
+ "thp_fault_fallback_charge",
12731315 "thp_collapse_alloc",
12741316 "thp_collapse_alloc_failed",
12751317 "thp_file_alloc",
1318
+ "thp_file_fallback",
1319
+ "thp_file_fallback_charge",
12761320 "thp_file_mapped",
12771321 "thp_split_page",
12781322 "thp_split_page_failed",
....@@ -1308,9 +1352,13 @@
13081352 "swap_ra",
13091353 "swap_ra_hit",
13101354 #endif
1311
-#endif /* CONFIG_VM_EVENTS_COUNTERS */
1355
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1356
+ "speculative_pgfault",
1357
+ "speculative_pgfault_file"
1358
+#endif
1359
+#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
13121360 };
1313
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1361
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
13141362
13151363 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
13161364 defined(CONFIG_PROC_FS)
....@@ -1400,12 +1448,26 @@
14001448 unsigned long freecount = 0;
14011449 struct free_area *area;
14021450 struct list_head *curr;
1451
+ bool overflow = false;
14031452
14041453 area = &(zone->free_area[order]);
14051454
1406
- list_for_each(curr, &area->free_list[mtype])
1407
- freecount++;
1408
- seq_printf(m, "%6lu ", freecount);
1455
+ list_for_each(curr, &area->free_list[mtype]) {
1456
+ /*
1457
+ * Cap the free_list iteration because it might
1458
+ * be really large and we are under a spinlock
1459
+ * so a long time spent here could trigger a
1460
+ * hard lockup detector. Anyway this is a
1461
+ * debugging tool so knowing there is a handful
1462
+ * of pages of this order should be more than
1463
+ * sufficient.
1464
+ */
1465
+ if (++freecount >= 100000) {
1466
+ overflow = true;
1467
+ break;
1468
+ }
1469
+ }
1470
+ seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
14091471 spin_unlock_irq(&zone->lock);
14101472 cond_resched();
14111473 spin_lock_irq(&zone->lock);
....@@ -1445,10 +1507,6 @@
14451507
14461508 page = pfn_to_online_page(pfn);
14471509 if (!page)
1448
- continue;
1449
-
1450
- /* Watch for unexpected holes punched in the memmap */
1451
- if (!memmap_valid_within(pfn, page, zone))
14521510 continue;
14531511
14541512 if (page_zone(page) != zone)
....@@ -1567,14 +1625,8 @@
15671625 if (is_zone_first_populated(pgdat, zone)) {
15681626 seq_printf(m, "\n per-node stats");
15691627 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1570
- /* Skip hidden vmstat items. */
1571
- if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1572
- NR_VM_NUMA_STAT_ITEMS] == '\0')
1573
- continue;
1574
- seq_printf(m, "\n %-12s %lu",
1575
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1576
- NR_VM_NUMA_STAT_ITEMS],
1577
- node_page_state(pgdat, i));
1628
+ seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1629
+ node_page_state_pages(pgdat, i));
15781630 }
15791631 }
15801632 seq_printf(m,
....@@ -1584,14 +1636,16 @@
15841636 "\n high %lu"
15851637 "\n spanned %lu"
15861638 "\n present %lu"
1587
- "\n managed %lu",
1639
+ "\n managed %lu"
1640
+ "\n cma %lu",
15881641 zone_page_state(zone, NR_FREE_PAGES),
15891642 min_wmark_pages(zone),
15901643 low_wmark_pages(zone),
15911644 high_wmark_pages(zone),
15921645 zone->spanned_pages,
15931646 zone->present_pages,
1594
- zone->managed_pages);
1647
+ zone_managed_pages(zone),
1648
+ zone_cma_pages(zone));
15951649
15961650 seq_printf(m,
15971651 "\n protection: (%ld",
....@@ -1607,14 +1661,13 @@
16071661 }
16081662
16091663 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1610
- seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1611
- zone_page_state(zone, i));
1664
+ seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1665
+ zone_page_state(zone, i));
16121666
16131667 #ifdef CONFIG_NUMA
16141668 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1615
- seq_printf(m, "\n %-12s %lu",
1616
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1617
- zone_numa_state_snapshot(zone, i));
1669
+ seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1670
+ zone_numa_state_snapshot(zone, i));
16181671 #endif
16191672
16201673 seq_printf(m, "\n pagesets");
....@@ -1665,29 +1718,23 @@
16651718 .show = zoneinfo_show,
16661719 };
16671720
1668
-enum writeback_stat_item {
1669
- NR_DIRTY_THRESHOLD,
1670
- NR_DIRTY_BG_THRESHOLD,
1671
- NR_VM_WRITEBACK_STAT_ITEMS,
1672
-};
1721
+#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1722
+ NR_VM_NUMA_STAT_ITEMS + \
1723
+ NR_VM_NODE_STAT_ITEMS + \
1724
+ NR_VM_WRITEBACK_STAT_ITEMS + \
1725
+ (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1726
+ NR_VM_EVENT_ITEMS : 0))
16731727
16741728 static void *vmstat_start(struct seq_file *m, loff_t *pos)
16751729 {
16761730 unsigned long *v;
1677
- int i, stat_items_size;
1731
+ int i;
16781732
1679
- if (*pos >= ARRAY_SIZE(vmstat_text))
1733
+ if (*pos >= NR_VMSTAT_ITEMS)
16801734 return NULL;
1681
- stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1682
- NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1683
- NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1684
- NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
16851735
1686
-#ifdef CONFIG_VM_EVENT_COUNTERS
1687
- stat_items_size += sizeof(struct vm_event_state);
1688
-#endif
1689
-
1690
- v = kmalloc(stat_items_size, GFP_KERNEL);
1736
+ BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1737
+ v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
16911738 m->private = v;
16921739 if (!v)
16931740 return ERR_PTR(-ENOMEM);
....@@ -1702,7 +1749,7 @@
17021749 #endif
17031750
17041751 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1705
- v[i] = global_node_page_state(i);
1752
+ v[i] = global_node_page_state_pages(i);
17061753 v += NR_VM_NODE_STAT_ITEMS;
17071754
17081755 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
....@@ -1720,10 +1767,7 @@
17201767 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
17211768 {
17221769 (*pos)++;
1723
- //nr_gpu_heap is out-of-tree now so we don't want to export it.
1724
- if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP)
1725
- (*pos)++;
1726
- if (*pos >= ARRAY_SIZE(vmstat_text))
1770
+ if (*pos >= NR_VMSTAT_ITEMS)
17271771 return NULL;
17281772 return (unsigned long *)m->private + *pos;
17291773 }
....@@ -1736,6 +1780,14 @@
17361780 seq_puts(m, vmstat_text[off]);
17371781 seq_put_decimal_ull(m, " ", *l);
17381782 seq_putc(m, '\n');
1783
+
1784
+ if (off == NR_VMSTAT_ITEMS - 1) {
1785
+ /*
1786
+ * We've come to the end - add any deprecated counters to avoid
1787
+ * breaking userspace which might depend on them being present.
1788
+ */
1789
+ seq_puts(m, "nr_unstable 0\n");
1790
+ }
17391791 return 0;
17401792 }
17411793
....@@ -1764,7 +1816,7 @@
17641816 }
17651817
17661818 int vmstat_refresh(struct ctl_table *table, int write,
1767
- void __user *buffer, size_t *lenp, loff_t *ppos)
1819
+ void *buffer, size_t *lenp, loff_t *ppos)
17681820 {
17691821 long val;
17701822 int err;
....@@ -1789,7 +1841,7 @@
17891841 val = atomic_long_read(&vm_zone_stat[i]);
17901842 if (val < 0) {
17911843 pr_warn("%s: %s %ld\n",
1792
- __func__, vmstat_text[i], val);
1844
+ __func__, zone_stat_name(i), val);
17931845 err = -EINVAL;
17941846 }
17951847 }
....@@ -1798,7 +1850,7 @@
17981850 val = atomic_long_read(&vm_numa_stat[i]);
17991851 if (val < 0) {
18001852 pr_warn("%s: %s %ld\n",
1801
- __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1853
+ __func__, numa_stat_name(i), val);
18021854 err = -EINVAL;
18031855 }
18041856 }
....@@ -2068,24 +2120,14 @@
20682120 return 0;
20692121 }
20702122
2071
-static const struct seq_operations unusable_op = {
2123
+static const struct seq_operations unusable_sops = {
20722124 .start = frag_start,
20732125 .next = frag_next,
20742126 .stop = frag_stop,
20752127 .show = unusable_show,
20762128 };
20772129
2078
-static int unusable_open(struct inode *inode, struct file *file)
2079
-{
2080
- return seq_open(file, &unusable_op);
2081
-}
2082
-
2083
-static const struct file_operations unusable_file_ops = {
2084
- .open = unusable_open,
2085
- .read = seq_read,
2086
- .llseek = seq_lseek,
2087
- .release = seq_release,
2088
-};
2130
+DEFINE_SEQ_ATTRIBUTE(unusable);
20892131
20902132 static void extfrag_show_print(struct seq_file *m,
20912133 pg_data_t *pgdat, struct zone *zone)
....@@ -2120,45 +2162,28 @@
21202162 return 0;
21212163 }
21222164
2123
-static const struct seq_operations extfrag_op = {
2165
+static const struct seq_operations extfrag_sops = {
21242166 .start = frag_start,
21252167 .next = frag_next,
21262168 .stop = frag_stop,
21272169 .show = extfrag_show,
21282170 };
21292171
2130
-static int extfrag_open(struct inode *inode, struct file *file)
2131
-{
2132
- return seq_open(file, &extfrag_op);
2133
-}
2134
-
2135
-static const struct file_operations extfrag_file_ops = {
2136
- .open = extfrag_open,
2137
- .read = seq_read,
2138
- .llseek = seq_lseek,
2139
- .release = seq_release,
2140
-};
2172
+DEFINE_SEQ_ATTRIBUTE(extfrag);
21412173
21422174 static int __init extfrag_debug_init(void)
21432175 {
21442176 struct dentry *extfrag_debug_root;
21452177
21462178 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2147
- if (!extfrag_debug_root)
2148
- return -ENOMEM;
21492179
2150
- if (!debugfs_create_file("unusable_index", 0444,
2151
- extfrag_debug_root, NULL, &unusable_file_ops))
2152
- goto fail;
2180
+ debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2181
+ &unusable_fops);
21532182
2154
- if (!debugfs_create_file("extfrag_index", 0444,
2155
- extfrag_debug_root, NULL, &extfrag_file_ops))
2156
- goto fail;
2183
+ debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2184
+ &extfrag_fops);
21572185
21582186 return 0;
2159
-fail:
2160
- debugfs_remove_recursive(extfrag_debug_root);
2161
- return -ENOMEM;
21622187 }
21632188
21642189 module_init(extfrag_debug_init);