hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/mm/vmstat.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/mm/vmstat.c
34 *
....@@ -75,7 +76,7 @@
7576 static DEFINE_MUTEX(vm_numa_stat_lock);
7677
7778 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78
- void __user *buffer, size_t *length, loff_t *ppos)
79
+ void *buffer, size_t *length, loff_t *ppos)
7980 {
8081 int ret, oldval;
8182
....@@ -227,7 +228,7 @@
227228 * 125 1024 10 16-32 GB 9
228229 */
229230
230
- mem = zone->managed_pages >> (27 - PAGE_SHIFT);
231
+ mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
231232
232233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
233234
....@@ -320,15 +321,17 @@
320321 long x;
321322 long t;
322323
324
+ preempt_disable_rt();
323325 x = delta + __this_cpu_read(*p);
324326
325327 t = __this_cpu_read(pcp->stat_threshold);
326328
327
- if (unlikely(x > t || x < -t)) {
329
+ if (unlikely(abs(x) > t)) {
328330 zone_page_state_add(x, zone, item);
329331 x = 0;
330332 }
331333 __this_cpu_write(*p, x);
334
+ preempt_enable_rt();
332335 }
333336 EXPORT_SYMBOL(__mod_zone_page_state);
334337
....@@ -340,15 +343,22 @@
340343 long x;
341344 long t;
342345
346
+ if (vmstat_item_in_bytes(item)) {
347
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
348
+ delta >>= PAGE_SHIFT;
349
+ }
350
+
351
+ preempt_disable_rt();
343352 x = delta + __this_cpu_read(*p);
344353
345354 t = __this_cpu_read(pcp->stat_threshold);
346355
347
- if (unlikely(x > t || x < -t)) {
356
+ if (unlikely(abs(x) > t)) {
348357 node_page_state_add(x, pgdat, item);
349358 x = 0;
350359 }
351360 __this_cpu_write(*p, x);
361
+ preempt_enable_rt();
352362 }
353363 EXPORT_SYMBOL(__mod_node_page_state);
354364
....@@ -381,6 +391,7 @@
381391 s8 __percpu *p = pcp->vm_stat_diff + item;
382392 s8 v, t;
383393
394
+ preempt_disable_rt();
384395 v = __this_cpu_inc_return(*p);
385396 t = __this_cpu_read(pcp->stat_threshold);
386397 if (unlikely(v > t)) {
....@@ -389,6 +400,7 @@
389400 zone_page_state_add(v + overstep, zone, item);
390401 __this_cpu_write(*p, -overstep);
391402 }
403
+ preempt_enable_rt();
392404 }
393405
394406 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
....@@ -397,6 +409,9 @@
397409 s8 __percpu *p = pcp->vm_node_stat_diff + item;
398410 s8 v, t;
399411
412
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
413
+
414
+ preempt_disable_rt();
400415 v = __this_cpu_inc_return(*p);
401416 t = __this_cpu_read(pcp->stat_threshold);
402417 if (unlikely(v > t)) {
....@@ -405,6 +420,7 @@
405420 node_page_state_add(v + overstep, pgdat, item);
406421 __this_cpu_write(*p, -overstep);
407422 }
423
+ preempt_enable_rt();
408424 }
409425
410426 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
....@@ -425,6 +441,7 @@
425441 s8 __percpu *p = pcp->vm_stat_diff + item;
426442 s8 v, t;
427443
444
+ preempt_disable_rt();
428445 v = __this_cpu_dec_return(*p);
429446 t = __this_cpu_read(pcp->stat_threshold);
430447 if (unlikely(v < - t)) {
....@@ -433,6 +450,7 @@
433450 zone_page_state_add(v - overstep, zone, item);
434451 __this_cpu_write(*p, overstep);
435452 }
453
+ preempt_enable_rt();
436454 }
437455
438456 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
....@@ -441,6 +459,9 @@
441459 s8 __percpu *p = pcp->vm_node_stat_diff + item;
442460 s8 v, t;
443461
462
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
463
+
464
+ preempt_disable_rt();
444465 v = __this_cpu_dec_return(*p);
445466 t = __this_cpu_read(pcp->stat_threshold);
446467 if (unlikely(v < - t)) {
....@@ -449,6 +470,7 @@
449470 node_page_state_add(v - overstep, pgdat, item);
450471 __this_cpu_write(*p, overstep);
451472 }
473
+ preempt_enable_rt();
452474 }
453475
454476 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
....@@ -501,7 +523,7 @@
501523 o = this_cpu_read(*p);
502524 n = delta + o;
503525
504
- if (n > t || n < -t) {
526
+ if (abs(n) > t) {
505527 int os = overstep_mode * (t >> 1) ;
506528
507529 /* Overflow must be added to zone counters */
....@@ -540,6 +562,11 @@
540562 s8 __percpu *p = pcp->vm_node_stat_diff + item;
541563 long o, n, t, z;
542564
565
+ if (vmstat_item_in_bytes(item)) {
566
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
567
+ delta >>= PAGE_SHIFT;
568
+ }
569
+
543570 do {
544571 z = 0; /* overflow to node counters */
545572
....@@ -558,7 +585,7 @@
558585 o = this_cpu_read(*p);
559586 n = delta + o;
560587
561
- if (n > t || n < -t) {
588
+ if (abs(n) > t) {
562589 int os = overstep_mode * (t >> 1) ;
563590
564591 /* Overflow must be added to node counters */
....@@ -988,8 +1015,8 @@
9881015 /*
9891016 * Determine the per node value of a stat item.
9901017 */
991
-unsigned long node_page_state(struct pglist_data *pgdat,
992
- enum node_stat_item item)
1018
+unsigned long node_page_state_pages(struct pglist_data *pgdat,
1019
+ enum node_stat_item item)
9931020 {
9941021 long x = atomic_long_read(&pgdat->vm_stat[item]);
9951022 #ifdef CONFIG_SMP
....@@ -997,6 +1024,14 @@
9971024 x = 0;
9981025 #endif
9991026 return x;
1027
+}
1028
+
1029
+unsigned long node_page_state(struct pglist_data *pgdat,
1030
+ enum node_stat_item item)
1031
+{
1032
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1033
+
1034
+ return node_page_state_pages(pgdat, item);
10001035 }
10011036 #endif
10021037
....@@ -1073,6 +1108,24 @@
10731108 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
10741109 }
10751110
1111
+/*
1112
+ * Calculates external fragmentation within a zone wrt the given order.
1113
+ * It is defined as the percentage of pages found in blocks of size
1114
+ * less than 1 << order. It returns values in range [0, 100].
1115
+ */
1116
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1117
+{
1118
+ struct contig_page_info info;
1119
+
1120
+ fill_contig_page_info(zone, order, &info);
1121
+ if (info.free_pages == 0)
1122
+ return 0;
1123
+
1124
+ return div_u64((info.free_pages -
1125
+ (info.free_blocks_suitable << order)) * 100,
1126
+ info.free_pages);
1127
+}
1128
+
10761129 /* Same as __fragmentation index but allocs contig_page_info on stack */
10771130 int fragmentation_index(struct zone *zone, unsigned int order)
10781131 {
....@@ -1083,7 +1136,8 @@
10831136 }
10841137 #endif
10851138
1086
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1139
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1140
+ defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
10871141 #ifdef CONFIG_ZONE_DMA
10881142 #define TEXT_FOR_DMA(xx) xx "_dma",
10891143 #else
....@@ -1106,7 +1160,7 @@
11061160 TEXT_FOR_HIGHMEM(xx) xx "_movable",
11071161
11081162 const char * const vmstat_text[] = {
1109
- /* enum zone_stat_item countes */
1163
+ /* enum zone_stat_item counters */
11101164 "nr_free_pages",
11111165 "nr_zone_inactive_anon",
11121166 "nr_zone_active_anon",
....@@ -1116,14 +1170,8 @@
11161170 "nr_zone_write_pending",
11171171 "nr_mlock",
11181172 "nr_page_table_pages",
1119
- "nr_kernel_stack",
1120
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1121
- "nr_shadow_call_stack_bytes",
1122
-#endif
11231173 "nr_bounce",
1124
-#if IS_ENABLED(CONFIG_ZSMALLOC)
11251174 "nr_zspages",
1126
-#endif
11271175 "nr_free_cma",
11281176
11291177 /* enum numa_stat_item counters */
....@@ -1136,7 +1184,7 @@
11361184 "numa_other",
11371185 #endif
11381186
1139
- /* Node-based counters */
1187
+ /* enum node_stat_item counters */
11401188 "nr_inactive_anon",
11411189 "nr_active_anon",
11421190 "nr_inactive_file",
....@@ -1146,9 +1194,13 @@
11461194 "nr_slab_unreclaimable",
11471195 "nr_isolated_anon",
11481196 "nr_isolated_file",
1149
- "workingset_refault",
1150
- "workingset_activate",
1151
- "workingset_restore",
1197
+ "workingset_nodes",
1198
+ "workingset_refault_anon",
1199
+ "workingset_refault_file",
1200
+ "workingset_activate_anon",
1201
+ "workingset_activate_file",
1202
+ "workingset_restore_anon",
1203
+ "workingset_restore_file",
11521204 "workingset_nodereclaim",
11531205 "nr_anon_pages",
11541206 "nr_mapped",
....@@ -1159,28 +1211,29 @@
11591211 "nr_shmem",
11601212 "nr_shmem_hugepages",
11611213 "nr_shmem_pmdmapped",
1214
+ "nr_file_hugepages",
1215
+ "nr_file_pmdmapped",
11621216 "nr_anon_transparent_hugepages",
1163
- "nr_unstable",
11641217 "nr_vmscan_write",
11651218 "nr_vmscan_immediate_reclaim",
11661219 "nr_dirtied",
11671220 "nr_written",
11681221 "nr_kernel_misc_reclaimable",
1169
- "nr_unreclaimable_pages",
1222
+ "nr_foll_pin_acquired",
1223
+ "nr_foll_pin_released",
1224
+ "nr_kernel_stack",
1225
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1226
+ "nr_shadow_call_stack",
1227
+#endif
11701228
1171
-
1172
- "nr_ion_heap",
1173
- "nr_ion_heap_pool",
1174
- "nr_gpu_heap",
11751229 /* enum writeback_stat_item counters */
11761230 "nr_dirty_threshold",
11771231 "nr_dirty_background_threshold",
11781232
1179
-#ifdef CONFIG_VM_EVENT_COUNTERS
1233
+#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
11801234 /* enum vm_event_item counters */
11811235 "pgpgin",
11821236 "pgpgout",
1183
- "pgpgoutclean",
11841237 "pswpin",
11851238 "pswpout",
11861239
....@@ -1198,11 +1251,16 @@
11981251 "pglazyfreed",
11991252
12001253 "pgrefill",
1254
+ "pgreuse",
12011255 "pgsteal_kswapd",
12021256 "pgsteal_direct",
12031257 "pgscan_kswapd",
12041258 "pgscan_direct",
12051259 "pgscan_direct_throttle",
1260
+ "pgscan_anon",
1261
+ "pgscan_file",
1262
+ "pgsteal_anon",
1263
+ "pgsteal_file",
12061264
12071265 #ifdef CONFIG_NUMA
12081266 "zone_reclaim_failed",
....@@ -1230,6 +1288,9 @@
12301288 #ifdef CONFIG_MIGRATION
12311289 "pgmigrate_success",
12321290 "pgmigrate_fail",
1291
+ "thp_migration_success",
1292
+ "thp_migration_fail",
1293
+ "thp_migration_split",
12331294 #endif
12341295 #ifdef CONFIG_COMPACTION
12351296 "compact_migrate_scanned",
....@@ -1247,6 +1308,10 @@
12471308 "htlb_buddy_alloc_success",
12481309 "htlb_buddy_alloc_fail",
12491310 #endif
1311
+#ifdef CONFIG_CMA
1312
+ "cma_alloc_success",
1313
+ "cma_alloc_fail",
1314
+#endif
12501315 "unevictable_pgs_culled",
12511316 "unevictable_pgs_scanned",
12521317 "unevictable_pgs_rescued",
....@@ -1258,9 +1323,12 @@
12581323 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
12591324 "thp_fault_alloc",
12601325 "thp_fault_fallback",
1326
+ "thp_fault_fallback_charge",
12611327 "thp_collapse_alloc",
12621328 "thp_collapse_alloc_failed",
12631329 "thp_file_alloc",
1330
+ "thp_file_fallback",
1331
+ "thp_file_fallback_charge",
12641332 "thp_file_mapped",
12651333 "thp_split_page",
12661334 "thp_split_page_failed",
....@@ -1296,9 +1364,13 @@
12961364 "swap_ra",
12971365 "swap_ra_hit",
12981366 #endif
1299
-#endif /* CONFIG_VM_EVENTS_COUNTERS */
1367
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1368
+ "speculative_pgfault",
1369
+ "speculative_pgfault_file"
1370
+#endif
1371
+#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
13001372 };
1301
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1373
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
13021374
13031375 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
13041376 defined(CONFIG_PROC_FS)
....@@ -1388,12 +1460,26 @@
13881460 unsigned long freecount = 0;
13891461 struct free_area *area;
13901462 struct list_head *curr;
1463
+ bool overflow = false;
13911464
13921465 area = &(zone->free_area[order]);
13931466
1394
- list_for_each(curr, &area->free_list[mtype])
1395
- freecount++;
1396
- seq_printf(m, "%6lu ", freecount);
1467
+ list_for_each(curr, &area->free_list[mtype]) {
1468
+ /*
1469
+ * Cap the free_list iteration because it might
1470
+ * be really large and we are under a spinlock
1471
+ * so a long time spent here could trigger a
1472
+ * hard lockup detector. Anyway this is a
1473
+ * debugging tool so knowing there is a handful
1474
+ * of pages of this order should be more than
1475
+ * sufficient.
1476
+ */
1477
+ if (++freecount >= 100000) {
1478
+ overflow = true;
1479
+ break;
1480
+ }
1481
+ }
1482
+ seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
13971483 spin_unlock_irq(&zone->lock);
13981484 cond_resched();
13991485 spin_lock_irq(&zone->lock);
....@@ -1433,10 +1519,6 @@
14331519
14341520 page = pfn_to_online_page(pfn);
14351521 if (!page)
1436
- continue;
1437
-
1438
- /* Watch for unexpected holes punched in the memmap */
1439
- if (!memmap_valid_within(pfn, page, zone))
14401522 continue;
14411523
14421524 if (page_zone(page) != zone)
....@@ -1555,14 +1637,8 @@
15551637 if (is_zone_first_populated(pgdat, zone)) {
15561638 seq_printf(m, "\n per-node stats");
15571639 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1558
- /* Skip hidden vmstat items. */
1559
- if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1560
- NR_VM_NUMA_STAT_ITEMS] == '\0')
1561
- continue;
1562
- seq_printf(m, "\n %-12s %lu",
1563
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1564
- NR_VM_NUMA_STAT_ITEMS],
1565
- node_page_state(pgdat, i));
1640
+ seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1641
+ node_page_state_pages(pgdat, i));
15661642 }
15671643 }
15681644 seq_printf(m,
....@@ -1572,14 +1648,16 @@
15721648 "\n high %lu"
15731649 "\n spanned %lu"
15741650 "\n present %lu"
1575
- "\n managed %lu",
1651
+ "\n managed %lu"
1652
+ "\n cma %lu",
15761653 zone_page_state(zone, NR_FREE_PAGES),
15771654 min_wmark_pages(zone),
15781655 low_wmark_pages(zone),
15791656 high_wmark_pages(zone),
15801657 zone->spanned_pages,
15811658 zone->present_pages,
1582
- zone->managed_pages);
1659
+ zone_managed_pages(zone),
1660
+ zone_cma_pages(zone));
15831661
15841662 seq_printf(m,
15851663 "\n protection: (%ld",
....@@ -1595,14 +1673,13 @@
15951673 }
15961674
15971675 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1598
- seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1599
- zone_page_state(zone, i));
1676
+ seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1677
+ zone_page_state(zone, i));
16001678
16011679 #ifdef CONFIG_NUMA
16021680 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1603
- seq_printf(m, "\n %-12s %lu",
1604
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1605
- zone_numa_state_snapshot(zone, i));
1681
+ seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1682
+ zone_numa_state_snapshot(zone, i));
16061683 #endif
16071684
16081685 seq_printf(m, "\n pagesets");
....@@ -1653,29 +1730,23 @@
16531730 .show = zoneinfo_show,
16541731 };
16551732
1656
-enum writeback_stat_item {
1657
- NR_DIRTY_THRESHOLD,
1658
- NR_DIRTY_BG_THRESHOLD,
1659
- NR_VM_WRITEBACK_STAT_ITEMS,
1660
-};
1733
+#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1734
+ NR_VM_NUMA_STAT_ITEMS + \
1735
+ NR_VM_NODE_STAT_ITEMS + \
1736
+ NR_VM_WRITEBACK_STAT_ITEMS + \
1737
+ (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1738
+ NR_VM_EVENT_ITEMS : 0))
16611739
16621740 static void *vmstat_start(struct seq_file *m, loff_t *pos)
16631741 {
16641742 unsigned long *v;
1665
- int i, stat_items_size;
1743
+ int i;
16661744
1667
- if (*pos >= ARRAY_SIZE(vmstat_text))
1745
+ if (*pos >= NR_VMSTAT_ITEMS)
16681746 return NULL;
1669
- stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1670
- NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1671
- NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1672
- NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
16731747
1674
-#ifdef CONFIG_VM_EVENT_COUNTERS
1675
- stat_items_size += sizeof(struct vm_event_state);
1676
-#endif
1677
-
1678
- v = kmalloc(stat_items_size, GFP_KERNEL);
1748
+ BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1749
+ v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
16791750 m->private = v;
16801751 if (!v)
16811752 return ERR_PTR(-ENOMEM);
....@@ -1690,7 +1761,7 @@
16901761 #endif
16911762
16921763 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1693
- v[i] = global_node_page_state(i);
1764
+ v[i] = global_node_page_state_pages(i);
16941765 v += NR_VM_NODE_STAT_ITEMS;
16951766
16961767 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
....@@ -1708,10 +1779,7 @@
17081779 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
17091780 {
17101781 (*pos)++;
1711
- //nr_gpu_heap is out-of-tree now so we don't want to export it.
1712
- if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP)
1713
- (*pos)++;
1714
- if (*pos >= ARRAY_SIZE(vmstat_text))
1782
+ if (*pos >= NR_VMSTAT_ITEMS)
17151783 return NULL;
17161784 return (unsigned long *)m->private + *pos;
17171785 }
....@@ -1724,6 +1792,14 @@
17241792 seq_puts(m, vmstat_text[off]);
17251793 seq_put_decimal_ull(m, " ", *l);
17261794 seq_putc(m, '\n');
1795
+
1796
+ if (off == NR_VMSTAT_ITEMS - 1) {
1797
+ /*
1798
+ * We've come to the end - add any deprecated counters to avoid
1799
+ * breaking userspace which might depend on them being present.
1800
+ */
1801
+ seq_puts(m, "nr_unstable 0\n");
1802
+ }
17271803 return 0;
17281804 }
17291805
....@@ -1752,7 +1828,7 @@
17521828 }
17531829
17541830 int vmstat_refresh(struct ctl_table *table, int write,
1755
- void __user *buffer, size_t *lenp, loff_t *ppos)
1831
+ void *buffer, size_t *lenp, loff_t *ppos)
17561832 {
17571833 long val;
17581834 int err;
....@@ -1777,7 +1853,7 @@
17771853 val = atomic_long_read(&vm_zone_stat[i]);
17781854 if (val < 0) {
17791855 pr_warn("%s: %s %ld\n",
1780
- __func__, vmstat_text[i], val);
1856
+ __func__, zone_stat_name(i), val);
17811857 err = -EINVAL;
17821858 }
17831859 }
....@@ -1786,7 +1862,7 @@
17861862 val = atomic_long_read(&vm_numa_stat[i]);
17871863 if (val < 0) {
17881864 pr_warn("%s: %s %ld\n",
1789
- __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1865
+ __func__, numa_stat_name(i), val);
17901866 err = -EINVAL;
17911867 }
17921868 }
....@@ -2056,24 +2132,14 @@
20562132 return 0;
20572133 }
20582134
2059
-static const struct seq_operations unusable_op = {
2135
+static const struct seq_operations unusable_sops = {
20602136 .start = frag_start,
20612137 .next = frag_next,
20622138 .stop = frag_stop,
20632139 .show = unusable_show,
20642140 };
20652141
2066
-static int unusable_open(struct inode *inode, struct file *file)
2067
-{
2068
- return seq_open(file, &unusable_op);
2069
-}
2070
-
2071
-static const struct file_operations unusable_file_ops = {
2072
- .open = unusable_open,
2073
- .read = seq_read,
2074
- .llseek = seq_lseek,
2075
- .release = seq_release,
2076
-};
2142
+DEFINE_SEQ_ATTRIBUTE(unusable);
20772143
20782144 static void extfrag_show_print(struct seq_file *m,
20792145 pg_data_t *pgdat, struct zone *zone)
....@@ -2108,45 +2174,28 @@
21082174 return 0;
21092175 }
21102176
2111
-static const struct seq_operations extfrag_op = {
2177
+static const struct seq_operations extfrag_sops = {
21122178 .start = frag_start,
21132179 .next = frag_next,
21142180 .stop = frag_stop,
21152181 .show = extfrag_show,
21162182 };
21172183
2118
-static int extfrag_open(struct inode *inode, struct file *file)
2119
-{
2120
- return seq_open(file, &extfrag_op);
2121
-}
2122
-
2123
-static const struct file_operations extfrag_file_ops = {
2124
- .open = extfrag_open,
2125
- .read = seq_read,
2126
- .llseek = seq_lseek,
2127
- .release = seq_release,
2128
-};
2184
+DEFINE_SEQ_ATTRIBUTE(extfrag);
21292185
21302186 static int __init extfrag_debug_init(void)
21312187 {
21322188 struct dentry *extfrag_debug_root;
21332189
21342190 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2135
- if (!extfrag_debug_root)
2136
- return -ENOMEM;
21372191
2138
- if (!debugfs_create_file("unusable_index", 0444,
2139
- extfrag_debug_root, NULL, &unusable_file_ops))
2140
- goto fail;
2192
+ debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2193
+ &unusable_fops);
21412194
2142
- if (!debugfs_create_file("extfrag_index", 0444,
2143
- extfrag_debug_root, NULL, &extfrag_file_ops))
2144
- goto fail;
2195
+ debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2196
+ &extfrag_fops);
21452197
21462198 return 0;
2147
-fail:
2148
- debugfs_remove_recursive(extfrag_debug_root);
2149
- return -ENOMEM;
21502199 }
21512200
21522201 module_init(extfrag_debug_init);