hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/mm/vmstat.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/mm/vmstat.c
34 *
....@@ -75,7 +76,7 @@
7576 static DEFINE_MUTEX(vm_numa_stat_lock);
7677
7778 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78
- void __user *buffer, size_t *length, loff_t *ppos)
79
+ void *buffer, size_t *length, loff_t *ppos)
7980 {
8081 int ret, oldval;
8182
....@@ -227,7 +228,7 @@
227228 * 125 1024 10 16-32 GB 9
228229 */
229230
230
- mem = zone->managed_pages >> (27 - PAGE_SHIFT);
231
+ mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
231232
232233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
233234
....@@ -325,7 +326,7 @@
325326
326327 t = __this_cpu_read(pcp->stat_threshold);
327328
328
- if (unlikely(x > t || x < -t)) {
329
+ if (unlikely(abs(x) > t)) {
329330 zone_page_state_add(x, zone, item);
330331 x = 0;
331332 }
....@@ -342,12 +343,17 @@
342343 long x;
343344 long t;
344345
346
+ if (vmstat_item_in_bytes(item)) {
347
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
348
+ delta >>= PAGE_SHIFT;
349
+ }
350
+
345351 preempt_disable_rt();
346352 x = delta + __this_cpu_read(*p);
347353
348354 t = __this_cpu_read(pcp->stat_threshold);
349355
350
- if (unlikely(x > t || x < -t)) {
356
+ if (unlikely(abs(x) > t)) {
351357 node_page_state_add(x, pgdat, item);
352358 x = 0;
353359 }
....@@ -403,6 +409,8 @@
403409 s8 __percpu *p = pcp->vm_node_stat_diff + item;
404410 s8 v, t;
405411
412
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
413
+
406414 preempt_disable_rt();
407415 v = __this_cpu_inc_return(*p);
408416 t = __this_cpu_read(pcp->stat_threshold);
....@@ -450,6 +458,8 @@
450458 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
451459 s8 __percpu *p = pcp->vm_node_stat_diff + item;
452460 s8 v, t;
461
+
462
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
453463
454464 preempt_disable_rt();
455465 v = __this_cpu_dec_return(*p);
....@@ -513,7 +523,7 @@
513523 o = this_cpu_read(*p);
514524 n = delta + o;
515525
516
- if (n > t || n < -t) {
526
+ if (abs(n) > t) {
517527 int os = overstep_mode * (t >> 1) ;
518528
519529 /* Overflow must be added to zone counters */
....@@ -552,6 +562,11 @@
552562 s8 __percpu *p = pcp->vm_node_stat_diff + item;
553563 long o, n, t, z;
554564
565
+ if (vmstat_item_in_bytes(item)) {
566
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
567
+ delta >>= PAGE_SHIFT;
568
+ }
569
+
555570 do {
556571 z = 0; /* overflow to node counters */
557572
....@@ -570,7 +585,7 @@
570585 o = this_cpu_read(*p);
571586 n = delta + o;
572587
573
- if (n > t || n < -t) {
588
+ if (abs(n) > t) {
574589 int os = overstep_mode * (t >> 1) ;
575590
576591 /* Overflow must be added to node counters */
....@@ -1000,8 +1015,8 @@
10001015 /*
10011016 * Determine the per node value of a stat item.
10021017 */
1003
-unsigned long node_page_state(struct pglist_data *pgdat,
1004
- enum node_stat_item item)
1018
+unsigned long node_page_state_pages(struct pglist_data *pgdat,
1019
+ enum node_stat_item item)
10051020 {
10061021 long x = atomic_long_read(&pgdat->vm_stat[item]);
10071022 #ifdef CONFIG_SMP
....@@ -1009,6 +1024,14 @@
10091024 x = 0;
10101025 #endif
10111026 return x;
1027
+}
1028
+
1029
+unsigned long node_page_state(struct pglist_data *pgdat,
1030
+ enum node_stat_item item)
1031
+{
1032
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1033
+
1034
+ return node_page_state_pages(pgdat, item);
10121035 }
10131036 #endif
10141037
....@@ -1085,6 +1108,24 @@
10851108 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
10861109 }
10871110
1111
+/*
1112
+ * Calculates external fragmentation within a zone wrt the given order.
1113
+ * It is defined as the percentage of pages found in blocks of size
1114
+ * less than 1 << order. It returns values in range [0, 100].
1115
+ */
1116
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1117
+{
1118
+ struct contig_page_info info;
1119
+
1120
+ fill_contig_page_info(zone, order, &info);
1121
+ if (info.free_pages == 0)
1122
+ return 0;
1123
+
1124
+ return div_u64((info.free_pages -
1125
+ (info.free_blocks_suitable << order)) * 100,
1126
+ info.free_pages);
1127
+}
1128
+
10881129 /* Same as __fragmentation index but allocs contig_page_info on stack */
10891130 int fragmentation_index(struct zone *zone, unsigned int order)
10901131 {
....@@ -1095,7 +1136,8 @@
10951136 }
10961137 #endif
10971138
1098
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1139
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1140
+ defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
10991141 #ifdef CONFIG_ZONE_DMA
11001142 #define TEXT_FOR_DMA(xx) xx "_dma",
11011143 #else
....@@ -1118,7 +1160,7 @@
11181160 TEXT_FOR_HIGHMEM(xx) xx "_movable",
11191161
11201162 const char * const vmstat_text[] = {
1121
- /* enum zone_stat_item countes */
1163
+ /* enum zone_stat_item counters */
11221164 "nr_free_pages",
11231165 "nr_zone_inactive_anon",
11241166 "nr_zone_active_anon",
....@@ -1128,14 +1170,8 @@
11281170 "nr_zone_write_pending",
11291171 "nr_mlock",
11301172 "nr_page_table_pages",
1131
- "nr_kernel_stack",
1132
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1133
- "nr_shadow_call_stack_bytes",
1134
-#endif
11351173 "nr_bounce",
1136
-#if IS_ENABLED(CONFIG_ZSMALLOC)
11371174 "nr_zspages",
1138
-#endif
11391175 "nr_free_cma",
11401176
11411177 /* enum numa_stat_item counters */
....@@ -1148,7 +1184,7 @@
11481184 "numa_other",
11491185 #endif
11501186
1151
- /* Node-based counters */
1187
+ /* enum node_stat_item counters */
11521188 "nr_inactive_anon",
11531189 "nr_active_anon",
11541190 "nr_inactive_file",
....@@ -1158,9 +1194,13 @@
11581194 "nr_slab_unreclaimable",
11591195 "nr_isolated_anon",
11601196 "nr_isolated_file",
1161
- "workingset_refault",
1162
- "workingset_activate",
1163
- "workingset_restore",
1197
+ "workingset_nodes",
1198
+ "workingset_refault_anon",
1199
+ "workingset_refault_file",
1200
+ "workingset_activate_anon",
1201
+ "workingset_activate_file",
1202
+ "workingset_restore_anon",
1203
+ "workingset_restore_file",
11641204 "workingset_nodereclaim",
11651205 "nr_anon_pages",
11661206 "nr_mapped",
....@@ -1171,28 +1211,29 @@
11711211 "nr_shmem",
11721212 "nr_shmem_hugepages",
11731213 "nr_shmem_pmdmapped",
1214
+ "nr_file_hugepages",
1215
+ "nr_file_pmdmapped",
11741216 "nr_anon_transparent_hugepages",
1175
- "nr_unstable",
11761217 "nr_vmscan_write",
11771218 "nr_vmscan_immediate_reclaim",
11781219 "nr_dirtied",
11791220 "nr_written",
11801221 "nr_kernel_misc_reclaimable",
1181
- "nr_unreclaimable_pages",
1222
+ "nr_foll_pin_acquired",
1223
+ "nr_foll_pin_released",
1224
+ "nr_kernel_stack",
1225
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1226
+ "nr_shadow_call_stack",
1227
+#endif
11821228
1183
-
1184
- "nr_ion_heap",
1185
- "nr_ion_heap_pool",
1186
- "nr_gpu_heap",
11871229 /* enum writeback_stat_item counters */
11881230 "nr_dirty_threshold",
11891231 "nr_dirty_background_threshold",
11901232
1191
-#ifdef CONFIG_VM_EVENT_COUNTERS
1233
+#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
11921234 /* enum vm_event_item counters */
11931235 "pgpgin",
11941236 "pgpgout",
1195
- "pgpgoutclean",
11961237 "pswpin",
11971238 "pswpout",
11981239
....@@ -1210,11 +1251,16 @@
12101251 "pglazyfreed",
12111252
12121253 "pgrefill",
1254
+ "pgreuse",
12131255 "pgsteal_kswapd",
12141256 "pgsteal_direct",
12151257 "pgscan_kswapd",
12161258 "pgscan_direct",
12171259 "pgscan_direct_throttle",
1260
+ "pgscan_anon",
1261
+ "pgscan_file",
1262
+ "pgsteal_anon",
1263
+ "pgsteal_file",
12181264
12191265 #ifdef CONFIG_NUMA
12201266 "zone_reclaim_failed",
....@@ -1242,6 +1288,9 @@
12421288 #ifdef CONFIG_MIGRATION
12431289 "pgmigrate_success",
12441290 "pgmigrate_fail",
1291
+ "thp_migration_success",
1292
+ "thp_migration_fail",
1293
+ "thp_migration_split",
12451294 #endif
12461295 #ifdef CONFIG_COMPACTION
12471296 "compact_migrate_scanned",
....@@ -1259,6 +1308,10 @@
12591308 "htlb_buddy_alloc_success",
12601309 "htlb_buddy_alloc_fail",
12611310 #endif
1311
+#ifdef CONFIG_CMA
1312
+ "cma_alloc_success",
1313
+ "cma_alloc_fail",
1314
+#endif
12621315 "unevictable_pgs_culled",
12631316 "unevictable_pgs_scanned",
12641317 "unevictable_pgs_rescued",
....@@ -1270,9 +1323,12 @@
12701323 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
12711324 "thp_fault_alloc",
12721325 "thp_fault_fallback",
1326
+ "thp_fault_fallback_charge",
12731327 "thp_collapse_alloc",
12741328 "thp_collapse_alloc_failed",
12751329 "thp_file_alloc",
1330
+ "thp_file_fallback",
1331
+ "thp_file_fallback_charge",
12761332 "thp_file_mapped",
12771333 "thp_split_page",
12781334 "thp_split_page_failed",
....@@ -1308,9 +1364,13 @@
13081364 "swap_ra",
13091365 "swap_ra_hit",
13101366 #endif
1311
-#endif /* CONFIG_VM_EVENTS_COUNTERS */
1367
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1368
+ "speculative_pgfault",
1369
+ "speculative_pgfault_file"
1370
+#endif
1371
+#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
13121372 };
1313
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1373
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
13141374
13151375 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
13161376 defined(CONFIG_PROC_FS)
....@@ -1400,12 +1460,26 @@
14001460 unsigned long freecount = 0;
14011461 struct free_area *area;
14021462 struct list_head *curr;
1463
+ bool overflow = false;
14031464
14041465 area = &(zone->free_area[order]);
14051466
1406
- list_for_each(curr, &area->free_list[mtype])
1407
- freecount++;
1408
- seq_printf(m, "%6lu ", freecount);
1467
+ list_for_each(curr, &area->free_list[mtype]) {
1468
+ /*
1469
+ * Cap the free_list iteration because it might
1470
+ * be really large and we are under a spinlock
1471
+ * so a long time spent here could trigger a
1472
+ * hard lockup detector. Anyway this is a
1473
+ * debugging tool so knowing there is a handful
1474
+ * of pages of this order should be more than
1475
+ * sufficient.
1476
+ */
1477
+ if (++freecount >= 100000) {
1478
+ overflow = true;
1479
+ break;
1480
+ }
1481
+ }
1482
+ seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
14091483 spin_unlock_irq(&zone->lock);
14101484 cond_resched();
14111485 spin_lock_irq(&zone->lock);
....@@ -1445,10 +1519,6 @@
14451519
14461520 page = pfn_to_online_page(pfn);
14471521 if (!page)
1448
- continue;
1449
-
1450
- /* Watch for unexpected holes punched in the memmap */
1451
- if (!memmap_valid_within(pfn, page, zone))
14521522 continue;
14531523
14541524 if (page_zone(page) != zone)
....@@ -1567,14 +1637,8 @@
15671637 if (is_zone_first_populated(pgdat, zone)) {
15681638 seq_printf(m, "\n per-node stats");
15691639 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1570
- /* Skip hidden vmstat items. */
1571
- if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1572
- NR_VM_NUMA_STAT_ITEMS] == '\0')
1573
- continue;
1574
- seq_printf(m, "\n %-12s %lu",
1575
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1576
- NR_VM_NUMA_STAT_ITEMS],
1577
- node_page_state(pgdat, i));
1640
+ seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1641
+ node_page_state_pages(pgdat, i));
15781642 }
15791643 }
15801644 seq_printf(m,
....@@ -1584,14 +1648,16 @@
15841648 "\n high %lu"
15851649 "\n spanned %lu"
15861650 "\n present %lu"
1587
- "\n managed %lu",
1651
+ "\n managed %lu"
1652
+ "\n cma %lu",
15881653 zone_page_state(zone, NR_FREE_PAGES),
15891654 min_wmark_pages(zone),
15901655 low_wmark_pages(zone),
15911656 high_wmark_pages(zone),
15921657 zone->spanned_pages,
15931658 zone->present_pages,
1594
- zone->managed_pages);
1659
+ zone_managed_pages(zone),
1660
+ zone_cma_pages(zone));
15951661
15961662 seq_printf(m,
15971663 "\n protection: (%ld",
....@@ -1607,14 +1673,13 @@
16071673 }
16081674
16091675 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1610
- seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1611
- zone_page_state(zone, i));
1676
+ seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1677
+ zone_page_state(zone, i));
16121678
16131679 #ifdef CONFIG_NUMA
16141680 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1615
- seq_printf(m, "\n %-12s %lu",
1616
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1617
- zone_numa_state_snapshot(zone, i));
1681
+ seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1682
+ zone_numa_state_snapshot(zone, i));
16181683 #endif
16191684
16201685 seq_printf(m, "\n pagesets");
....@@ -1665,29 +1730,23 @@
16651730 .show = zoneinfo_show,
16661731 };
16671732
1668
-enum writeback_stat_item {
1669
- NR_DIRTY_THRESHOLD,
1670
- NR_DIRTY_BG_THRESHOLD,
1671
- NR_VM_WRITEBACK_STAT_ITEMS,
1672
-};
1733
+#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1734
+ NR_VM_NUMA_STAT_ITEMS + \
1735
+ NR_VM_NODE_STAT_ITEMS + \
1736
+ NR_VM_WRITEBACK_STAT_ITEMS + \
1737
+ (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1738
+ NR_VM_EVENT_ITEMS : 0))
16731739
16741740 static void *vmstat_start(struct seq_file *m, loff_t *pos)
16751741 {
16761742 unsigned long *v;
1677
- int i, stat_items_size;
1743
+ int i;
16781744
1679
- if (*pos >= ARRAY_SIZE(vmstat_text))
1745
+ if (*pos >= NR_VMSTAT_ITEMS)
16801746 return NULL;
1681
- stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1682
- NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1683
- NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1684
- NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
16851747
1686
-#ifdef CONFIG_VM_EVENT_COUNTERS
1687
- stat_items_size += sizeof(struct vm_event_state);
1688
-#endif
1689
-
1690
- v = kmalloc(stat_items_size, GFP_KERNEL);
1748
+ BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1749
+ v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
16911750 m->private = v;
16921751 if (!v)
16931752 return ERR_PTR(-ENOMEM);
....@@ -1702,7 +1761,7 @@
17021761 #endif
17031762
17041763 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1705
- v[i] = global_node_page_state(i);
1764
+ v[i] = global_node_page_state_pages(i);
17061765 v += NR_VM_NODE_STAT_ITEMS;
17071766
17081767 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
....@@ -1720,10 +1779,7 @@
17201779 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
17211780 {
17221781 (*pos)++;
1723
- //nr_gpu_heap is out-of-tree now so we don't want to export it.
1724
- if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP)
1725
- (*pos)++;
1726
- if (*pos >= ARRAY_SIZE(vmstat_text))
1782
+ if (*pos >= NR_VMSTAT_ITEMS)
17271783 return NULL;
17281784 return (unsigned long *)m->private + *pos;
17291785 }
....@@ -1736,6 +1792,14 @@
17361792 seq_puts(m, vmstat_text[off]);
17371793 seq_put_decimal_ull(m, " ", *l);
17381794 seq_putc(m, '\n');
1795
+
1796
+ if (off == NR_VMSTAT_ITEMS - 1) {
1797
+ /*
1798
+ * We've come to the end - add any deprecated counters to avoid
1799
+ * breaking userspace which might depend on them being present.
1800
+ */
1801
+ seq_puts(m, "nr_unstable 0\n");
1802
+ }
17391803 return 0;
17401804 }
17411805
....@@ -1764,7 +1828,7 @@
17641828 }
17651829
17661830 int vmstat_refresh(struct ctl_table *table, int write,
1767
- void __user *buffer, size_t *lenp, loff_t *ppos)
1831
+ void *buffer, size_t *lenp, loff_t *ppos)
17681832 {
17691833 long val;
17701834 int err;
....@@ -1789,7 +1853,7 @@
17891853 val = atomic_long_read(&vm_zone_stat[i]);
17901854 if (val < 0) {
17911855 pr_warn("%s: %s %ld\n",
1792
- __func__, vmstat_text[i], val);
1856
+ __func__, zone_stat_name(i), val);
17931857 err = -EINVAL;
17941858 }
17951859 }
....@@ -1798,7 +1862,7 @@
17981862 val = atomic_long_read(&vm_numa_stat[i]);
17991863 if (val < 0) {
18001864 pr_warn("%s: %s %ld\n",
1801
- __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1865
+ __func__, numa_stat_name(i), val);
18021866 err = -EINVAL;
18031867 }
18041868 }
....@@ -2068,24 +2132,14 @@
20682132 return 0;
20692133 }
20702134
2071
-static const struct seq_operations unusable_op = {
2135
+static const struct seq_operations unusable_sops = {
20722136 .start = frag_start,
20732137 .next = frag_next,
20742138 .stop = frag_stop,
20752139 .show = unusable_show,
20762140 };
20772141
2078
-static int unusable_open(struct inode *inode, struct file *file)
2079
-{
2080
- return seq_open(file, &unusable_op);
2081
-}
2082
-
2083
-static const struct file_operations unusable_file_ops = {
2084
- .open = unusable_open,
2085
- .read = seq_read,
2086
- .llseek = seq_lseek,
2087
- .release = seq_release,
2088
-};
2142
+DEFINE_SEQ_ATTRIBUTE(unusable);
20892143
20902144 static void extfrag_show_print(struct seq_file *m,
20912145 pg_data_t *pgdat, struct zone *zone)
....@@ -2120,45 +2174,28 @@
21202174 return 0;
21212175 }
21222176
2123
-static const struct seq_operations extfrag_op = {
2177
+static const struct seq_operations extfrag_sops = {
21242178 .start = frag_start,
21252179 .next = frag_next,
21262180 .stop = frag_stop,
21272181 .show = extfrag_show,
21282182 };
21292183
2130
-static int extfrag_open(struct inode *inode, struct file *file)
2131
-{
2132
- return seq_open(file, &extfrag_op);
2133
-}
2134
-
2135
-static const struct file_operations extfrag_file_ops = {
2136
- .open = extfrag_open,
2137
- .read = seq_read,
2138
- .llseek = seq_lseek,
2139
- .release = seq_release,
2140
-};
2184
+DEFINE_SEQ_ATTRIBUTE(extfrag);
21412185
21422186 static int __init extfrag_debug_init(void)
21432187 {
21442188 struct dentry *extfrag_debug_root;
21452189
21462190 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2147
- if (!extfrag_debug_root)
2148
- return -ENOMEM;
21492191
2150
- if (!debugfs_create_file("unusable_index", 0444,
2151
- extfrag_debug_root, NULL, &unusable_file_ops))
2152
- goto fail;
2192
+ debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2193
+ &unusable_fops);
21532194
2154
- if (!debugfs_create_file("extfrag_index", 0444,
2155
- extfrag_debug_root, NULL, &extfrag_file_ops))
2156
- goto fail;
2195
+ debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2196
+ &extfrag_fops);
21572197
21582198 return 0;
2159
-fail:
2160
- debugfs_remove_recursive(extfrag_debug_root);
2161
- return -ENOMEM;
21622199 }
21632200
21642201 module_init(extfrag_debug_init);