hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/mm/vmstat.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/mm/vmstat.c
34 *
....@@ -75,7 +76,7 @@
7576 static DEFINE_MUTEX(vm_numa_stat_lock);
7677
7778 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78
- void __user *buffer, size_t *length, loff_t *ppos)
79
+ void *buffer, size_t *length, loff_t *ppos)
7980 {
8081 int ret, oldval;
8182
....@@ -227,7 +228,7 @@
227228 * 125 1024 10 16-32 GB 9
228229 */
229230
230
- mem = zone->managed_pages >> (27 - PAGE_SHIFT);
231
+ mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
231232
232233 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
233234
....@@ -324,7 +325,7 @@
324325
325326 t = __this_cpu_read(pcp->stat_threshold);
326327
327
- if (unlikely(x > t || x < -t)) {
328
+ if (unlikely(abs(x) > t)) {
328329 zone_page_state_add(x, zone, item);
329330 x = 0;
330331 }
....@@ -340,11 +341,16 @@
340341 long x;
341342 long t;
342343
344
+ if (vmstat_item_in_bytes(item)) {
345
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
346
+ delta >>= PAGE_SHIFT;
347
+ }
348
+
343349 x = delta + __this_cpu_read(*p);
344350
345351 t = __this_cpu_read(pcp->stat_threshold);
346352
347
- if (unlikely(x > t || x < -t)) {
353
+ if (unlikely(abs(x) > t)) {
348354 node_page_state_add(x, pgdat, item);
349355 x = 0;
350356 }
....@@ -397,6 +403,8 @@
397403 s8 __percpu *p = pcp->vm_node_stat_diff + item;
398404 s8 v, t;
399405
406
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
407
+
400408 v = __this_cpu_inc_return(*p);
401409 t = __this_cpu_read(pcp->stat_threshold);
402410 if (unlikely(v > t)) {
....@@ -440,6 +448,8 @@
440448 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
441449 s8 __percpu *p = pcp->vm_node_stat_diff + item;
442450 s8 v, t;
451
+
452
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
443453
444454 v = __this_cpu_dec_return(*p);
445455 t = __this_cpu_read(pcp->stat_threshold);
....@@ -501,7 +511,7 @@
501511 o = this_cpu_read(*p);
502512 n = delta + o;
503513
504
- if (n > t || n < -t) {
514
+ if (abs(n) > t) {
505515 int os = overstep_mode * (t >> 1) ;
506516
507517 /* Overflow must be added to zone counters */
....@@ -540,6 +550,11 @@
540550 s8 __percpu *p = pcp->vm_node_stat_diff + item;
541551 long o, n, t, z;
542552
553
+ if (vmstat_item_in_bytes(item)) {
554
+ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
555
+ delta >>= PAGE_SHIFT;
556
+ }
557
+
543558 do {
544559 z = 0; /* overflow to node counters */
545560
....@@ -558,7 +573,7 @@
558573 o = this_cpu_read(*p);
559574 n = delta + o;
560575
561
- if (n > t || n < -t) {
576
+ if (abs(n) > t) {
562577 int os = overstep_mode * (t >> 1) ;
563578
564579 /* Overflow must be added to node counters */
....@@ -988,8 +1003,8 @@
9881003 /*
9891004 * Determine the per node value of a stat item.
9901005 */
991
-unsigned long node_page_state(struct pglist_data *pgdat,
992
- enum node_stat_item item)
1006
+unsigned long node_page_state_pages(struct pglist_data *pgdat,
1007
+ enum node_stat_item item)
9931008 {
9941009 long x = atomic_long_read(&pgdat->vm_stat[item]);
9951010 #ifdef CONFIG_SMP
....@@ -997,6 +1012,14 @@
9971012 x = 0;
9981013 #endif
9991014 return x;
1015
+}
1016
+
1017
+unsigned long node_page_state(struct pglist_data *pgdat,
1018
+ enum node_stat_item item)
1019
+{
1020
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1021
+
1022
+ return node_page_state_pages(pgdat, item);
10001023 }
10011024 #endif
10021025
....@@ -1073,6 +1096,24 @@
10731096 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
10741097 }
10751098
1099
+/*
1100
+ * Calculates external fragmentation within a zone wrt the given order.
1101
+ * It is defined as the percentage of pages found in blocks of size
1102
+ * less than 1 << order. It returns values in range [0, 100].
1103
+ */
1104
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1105
+{
1106
+ struct contig_page_info info;
1107
+
1108
+ fill_contig_page_info(zone, order, &info);
1109
+ if (info.free_pages == 0)
1110
+ return 0;
1111
+
1112
+ return div_u64((info.free_pages -
1113
+ (info.free_blocks_suitable << order)) * 100,
1114
+ info.free_pages);
1115
+}
1116
+
10761117 /* Same as __fragmentation index but allocs contig_page_info on stack */
10771118 int fragmentation_index(struct zone *zone, unsigned int order)
10781119 {
....@@ -1083,7 +1124,8 @@
10831124 }
10841125 #endif
10851126
1086
-#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
1127
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1128
+ defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
10871129 #ifdef CONFIG_ZONE_DMA
10881130 #define TEXT_FOR_DMA(xx) xx "_dma",
10891131 #else
....@@ -1106,7 +1148,7 @@
11061148 TEXT_FOR_HIGHMEM(xx) xx "_movable",
11071149
11081150 const char * const vmstat_text[] = {
1109
- /* enum zone_stat_item countes */
1151
+ /* enum zone_stat_item counters */
11101152 "nr_free_pages",
11111153 "nr_zone_inactive_anon",
11121154 "nr_zone_active_anon",
....@@ -1116,14 +1158,8 @@
11161158 "nr_zone_write_pending",
11171159 "nr_mlock",
11181160 "nr_page_table_pages",
1119
- "nr_kernel_stack",
1120
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1121
- "nr_shadow_call_stack_bytes",
1122
-#endif
11231161 "nr_bounce",
1124
-#if IS_ENABLED(CONFIG_ZSMALLOC)
11251162 "nr_zspages",
1126
-#endif
11271163 "nr_free_cma",
11281164
11291165 /* enum numa_stat_item counters */
....@@ -1136,7 +1172,7 @@
11361172 "numa_other",
11371173 #endif
11381174
1139
- /* Node-based counters */
1175
+ /* enum node_stat_item counters */
11401176 "nr_inactive_anon",
11411177 "nr_active_anon",
11421178 "nr_inactive_file",
....@@ -1146,9 +1182,13 @@
11461182 "nr_slab_unreclaimable",
11471183 "nr_isolated_anon",
11481184 "nr_isolated_file",
1149
- "workingset_refault",
1150
- "workingset_activate",
1151
- "workingset_restore",
1185
+ "workingset_nodes",
1186
+ "workingset_refault_anon",
1187
+ "workingset_refault_file",
1188
+ "workingset_activate_anon",
1189
+ "workingset_activate_file",
1190
+ "workingset_restore_anon",
1191
+ "workingset_restore_file",
11521192 "workingset_nodereclaim",
11531193 "nr_anon_pages",
11541194 "nr_mapped",
....@@ -1159,28 +1199,29 @@
11591199 "nr_shmem",
11601200 "nr_shmem_hugepages",
11611201 "nr_shmem_pmdmapped",
1202
+ "nr_file_hugepages",
1203
+ "nr_file_pmdmapped",
11621204 "nr_anon_transparent_hugepages",
1163
- "nr_unstable",
11641205 "nr_vmscan_write",
11651206 "nr_vmscan_immediate_reclaim",
11661207 "nr_dirtied",
11671208 "nr_written",
11681209 "nr_kernel_misc_reclaimable",
1169
- "nr_unreclaimable_pages",
1210
+ "nr_foll_pin_acquired",
1211
+ "nr_foll_pin_released",
1212
+ "nr_kernel_stack",
1213
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1214
+ "nr_shadow_call_stack",
1215
+#endif
11701216
1171
-
1172
- "nr_ion_heap",
1173
- "nr_ion_heap_pool",
1174
- "nr_gpu_heap",
11751217 /* enum writeback_stat_item counters */
11761218 "nr_dirty_threshold",
11771219 "nr_dirty_background_threshold",
11781220
1179
-#ifdef CONFIG_VM_EVENT_COUNTERS
1221
+#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
11801222 /* enum vm_event_item counters */
11811223 "pgpgin",
11821224 "pgpgout",
1183
- "pgpgoutclean",
11841225 "pswpin",
11851226 "pswpout",
11861227
....@@ -1198,11 +1239,16 @@
11981239 "pglazyfreed",
11991240
12001241 "pgrefill",
1242
+ "pgreuse",
12011243 "pgsteal_kswapd",
12021244 "pgsteal_direct",
12031245 "pgscan_kswapd",
12041246 "pgscan_direct",
12051247 "pgscan_direct_throttle",
1248
+ "pgscan_anon",
1249
+ "pgscan_file",
1250
+ "pgsteal_anon",
1251
+ "pgsteal_file",
12061252
12071253 #ifdef CONFIG_NUMA
12081254 "zone_reclaim_failed",
....@@ -1230,6 +1276,9 @@
12301276 #ifdef CONFIG_MIGRATION
12311277 "pgmigrate_success",
12321278 "pgmigrate_fail",
1279
+ "thp_migration_success",
1280
+ "thp_migration_fail",
1281
+ "thp_migration_split",
12331282 #endif
12341283 #ifdef CONFIG_COMPACTION
12351284 "compact_migrate_scanned",
....@@ -1247,6 +1296,10 @@
12471296 "htlb_buddy_alloc_success",
12481297 "htlb_buddy_alloc_fail",
12491298 #endif
1299
+#ifdef CONFIG_CMA
1300
+ "cma_alloc_success",
1301
+ "cma_alloc_fail",
1302
+#endif
12501303 "unevictable_pgs_culled",
12511304 "unevictable_pgs_scanned",
12521305 "unevictable_pgs_rescued",
....@@ -1258,9 +1311,12 @@
12581311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
12591312 "thp_fault_alloc",
12601313 "thp_fault_fallback",
1314
+ "thp_fault_fallback_charge",
12611315 "thp_collapse_alloc",
12621316 "thp_collapse_alloc_failed",
12631317 "thp_file_alloc",
1318
+ "thp_file_fallback",
1319
+ "thp_file_fallback_charge",
12641320 "thp_file_mapped",
12651321 "thp_split_page",
12661322 "thp_split_page_failed",
....@@ -1296,9 +1352,13 @@
12961352 "swap_ra",
12971353 "swap_ra_hit",
12981354 #endif
1299
-#endif /* CONFIG_VM_EVENTS_COUNTERS */
1355
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1356
+ "speculative_pgfault",
1357
+ "speculative_pgfault_file"
1358
+#endif
1359
+#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
13001360 };
1301
-#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1361
+#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
13021362
13031363 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
13041364 defined(CONFIG_PROC_FS)
....@@ -1388,12 +1448,26 @@
13881448 unsigned long freecount = 0;
13891449 struct free_area *area;
13901450 struct list_head *curr;
1451
+ bool overflow = false;
13911452
13921453 area = &(zone->free_area[order]);
13931454
1394
- list_for_each(curr, &area->free_list[mtype])
1395
- freecount++;
1396
- seq_printf(m, "%6lu ", freecount);
1455
+ list_for_each(curr, &area->free_list[mtype]) {
1456
+ /*
1457
+ * Cap the free_list iteration because it might
1458
+ * be really large and we are under a spinlock
1459
+ * so a long time spent here could trigger a
1460
+ * hard lockup detector. Anyway this is a
1461
+ * debugging tool so knowing there is a handful
1462
+ * of pages of this order should be more than
1463
+ * sufficient.
1464
+ */
1465
+ if (++freecount >= 100000) {
1466
+ overflow = true;
1467
+ break;
1468
+ }
1469
+ }
1470
+ seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
13971471 spin_unlock_irq(&zone->lock);
13981472 cond_resched();
13991473 spin_lock_irq(&zone->lock);
....@@ -1433,10 +1507,6 @@
14331507
14341508 page = pfn_to_online_page(pfn);
14351509 if (!page)
1436
- continue;
1437
-
1438
- /* Watch for unexpected holes punched in the memmap */
1439
- if (!memmap_valid_within(pfn, page, zone))
14401510 continue;
14411511
14421512 if (page_zone(page) != zone)
....@@ -1555,14 +1625,8 @@
15551625 if (is_zone_first_populated(pgdat, zone)) {
15561626 seq_printf(m, "\n per-node stats");
15571627 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1558
- /* Skip hidden vmstat items. */
1559
- if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1560
- NR_VM_NUMA_STAT_ITEMS] == '\0')
1561
- continue;
1562
- seq_printf(m, "\n %-12s %lu",
1563
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
1564
- NR_VM_NUMA_STAT_ITEMS],
1565
- node_page_state(pgdat, i));
1628
+ seq_printf(m, "\n %-12s %lu", node_stat_name(i),
1629
+ node_page_state_pages(pgdat, i));
15661630 }
15671631 }
15681632 seq_printf(m,
....@@ -1572,14 +1636,16 @@
15721636 "\n high %lu"
15731637 "\n spanned %lu"
15741638 "\n present %lu"
1575
- "\n managed %lu",
1639
+ "\n managed %lu"
1640
+ "\n cma %lu",
15761641 zone_page_state(zone, NR_FREE_PAGES),
15771642 min_wmark_pages(zone),
15781643 low_wmark_pages(zone),
15791644 high_wmark_pages(zone),
15801645 zone->spanned_pages,
15811646 zone->present_pages,
1582
- zone->managed_pages);
1647
+ zone_managed_pages(zone),
1648
+ zone_cma_pages(zone));
15831649
15841650 seq_printf(m,
15851651 "\n protection: (%ld",
....@@ -1595,14 +1661,13 @@
15951661 }
15961662
15971663 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1598
- seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1599
- zone_page_state(zone, i));
1664
+ seq_printf(m, "\n %-12s %lu", zone_stat_name(i),
1665
+ zone_page_state(zone, i));
16001666
16011667 #ifdef CONFIG_NUMA
16021668 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1603
- seq_printf(m, "\n %-12s %lu",
1604
- vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
1605
- zone_numa_state_snapshot(zone, i));
1669
+ seq_printf(m, "\n %-12s %lu", numa_stat_name(i),
1670
+ zone_numa_state_snapshot(zone, i));
16061671 #endif
16071672
16081673 seq_printf(m, "\n pagesets");
....@@ -1653,29 +1718,23 @@
16531718 .show = zoneinfo_show,
16541719 };
16551720
1656
-enum writeback_stat_item {
1657
- NR_DIRTY_THRESHOLD,
1658
- NR_DIRTY_BG_THRESHOLD,
1659
- NR_VM_WRITEBACK_STAT_ITEMS,
1660
-};
1721
+#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1722
+ NR_VM_NUMA_STAT_ITEMS + \
1723
+ NR_VM_NODE_STAT_ITEMS + \
1724
+ NR_VM_WRITEBACK_STAT_ITEMS + \
1725
+ (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1726
+ NR_VM_EVENT_ITEMS : 0))
16611727
16621728 static void *vmstat_start(struct seq_file *m, loff_t *pos)
16631729 {
16641730 unsigned long *v;
1665
- int i, stat_items_size;
1731
+ int i;
16661732
1667
- if (*pos >= ARRAY_SIZE(vmstat_text))
1733
+ if (*pos >= NR_VMSTAT_ITEMS)
16681734 return NULL;
1669
- stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1670
- NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
1671
- NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
1672
- NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
16731735
1674
-#ifdef CONFIG_VM_EVENT_COUNTERS
1675
- stat_items_size += sizeof(struct vm_event_state);
1676
-#endif
1677
-
1678
- v = kmalloc(stat_items_size, GFP_KERNEL);
1736
+ BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1737
+ v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
16791738 m->private = v;
16801739 if (!v)
16811740 return ERR_PTR(-ENOMEM);
....@@ -1690,7 +1749,7 @@
16901749 #endif
16911750
16921751 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1693
- v[i] = global_node_page_state(i);
1752
+ v[i] = global_node_page_state_pages(i);
16941753 v += NR_VM_NODE_STAT_ITEMS;
16951754
16961755 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
....@@ -1708,10 +1767,7 @@
17081767 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
17091768 {
17101769 (*pos)++;
1711
- //nr_gpu_heap is out-of-tree now so we don't want to export it.
1712
- if (*pos == NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS + NR_GPU_HEAP)
1713
- (*pos)++;
1714
- if (*pos >= ARRAY_SIZE(vmstat_text))
1770
+ if (*pos >= NR_VMSTAT_ITEMS)
17151771 return NULL;
17161772 return (unsigned long *)m->private + *pos;
17171773 }
....@@ -1724,6 +1780,14 @@
17241780 seq_puts(m, vmstat_text[off]);
17251781 seq_put_decimal_ull(m, " ", *l);
17261782 seq_putc(m, '\n');
1783
+
1784
+ if (off == NR_VMSTAT_ITEMS - 1) {
1785
+ /*
1786
+ * We've come to the end - add any deprecated counters to avoid
1787
+ * breaking userspace which might depend on them being present.
1788
+ */
1789
+ seq_puts(m, "nr_unstable 0\n");
1790
+ }
17271791 return 0;
17281792 }
17291793
....@@ -1752,7 +1816,7 @@
17521816 }
17531817
17541818 int vmstat_refresh(struct ctl_table *table, int write,
1755
- void __user *buffer, size_t *lenp, loff_t *ppos)
1819
+ void *buffer, size_t *lenp, loff_t *ppos)
17561820 {
17571821 long val;
17581822 int err;
....@@ -1777,7 +1841,7 @@
17771841 val = atomic_long_read(&vm_zone_stat[i]);
17781842 if (val < 0) {
17791843 pr_warn("%s: %s %ld\n",
1780
- __func__, vmstat_text[i], val);
1844
+ __func__, zone_stat_name(i), val);
17811845 err = -EINVAL;
17821846 }
17831847 }
....@@ -1786,7 +1850,7 @@
17861850 val = atomic_long_read(&vm_numa_stat[i]);
17871851 if (val < 0) {
17881852 pr_warn("%s: %s %ld\n",
1789
- __func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
1853
+ __func__, numa_stat_name(i), val);
17901854 err = -EINVAL;
17911855 }
17921856 }
....@@ -2056,24 +2120,14 @@
20562120 return 0;
20572121 }
20582122
2059
-static const struct seq_operations unusable_op = {
2123
+static const struct seq_operations unusable_sops = {
20602124 .start = frag_start,
20612125 .next = frag_next,
20622126 .stop = frag_stop,
20632127 .show = unusable_show,
20642128 };
20652129
2066
-static int unusable_open(struct inode *inode, struct file *file)
2067
-{
2068
- return seq_open(file, &unusable_op);
2069
-}
2070
-
2071
-static const struct file_operations unusable_file_ops = {
2072
- .open = unusable_open,
2073
- .read = seq_read,
2074
- .llseek = seq_lseek,
2075
- .release = seq_release,
2076
-};
2130
+DEFINE_SEQ_ATTRIBUTE(unusable);
20772131
20782132 static void extfrag_show_print(struct seq_file *m,
20792133 pg_data_t *pgdat, struct zone *zone)
....@@ -2108,45 +2162,28 @@
21082162 return 0;
21092163 }
21102164
2111
-static const struct seq_operations extfrag_op = {
2165
+static const struct seq_operations extfrag_sops = {
21122166 .start = frag_start,
21132167 .next = frag_next,
21142168 .stop = frag_stop,
21152169 .show = extfrag_show,
21162170 };
21172171
2118
-static int extfrag_open(struct inode *inode, struct file *file)
2119
-{
2120
- return seq_open(file, &extfrag_op);
2121
-}
2122
-
2123
-static const struct file_operations extfrag_file_ops = {
2124
- .open = extfrag_open,
2125
- .read = seq_read,
2126
- .llseek = seq_lseek,
2127
- .release = seq_release,
2128
-};
2172
+DEFINE_SEQ_ATTRIBUTE(extfrag);
21292173
21302174 static int __init extfrag_debug_init(void)
21312175 {
21322176 struct dentry *extfrag_debug_root;
21332177
21342178 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2135
- if (!extfrag_debug_root)
2136
- return -ENOMEM;
21372179
2138
- if (!debugfs_create_file("unusable_index", 0444,
2139
- extfrag_debug_root, NULL, &unusable_file_ops))
2140
- goto fail;
2180
+ debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2181
+ &unusable_fops);
21412182
2142
- if (!debugfs_create_file("extfrag_index", 0444,
2143
- extfrag_debug_root, NULL, &extfrag_file_ops))
2144
- goto fail;
2183
+ debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2184
+ &extfrag_fops);
21452185
21462186 return 0;
2147
-fail:
2148
- debugfs_remove_recursive(extfrag_debug_root);
2149
- return -ENOMEM;
21502187 }
21512188
21522189 module_init(extfrag_debug_init);