hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/tools/testing/radix-tree/regression1.c
....@@ -44,7 +44,6 @@
4444 #include "regression.h"
4545
4646 static RADIX_TREE(mt_tree, GFP_KERNEL);
47
-static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
4847
4948 struct page {
5049 pthread_mutex_t lock;
....@@ -53,12 +52,12 @@
5352 unsigned long index;
5453 };
5554
56
-static struct page *page_alloc(void)
55
+static struct page *page_alloc(int index)
5756 {
5857 struct page *p;
5958 p = malloc(sizeof(struct page));
6059 p->count = 1;
61
- p->index = 1;
60
+ p->index = index;
6261 pthread_mutex_init(&p->lock, NULL);
6362
6463 return p;
....@@ -80,53 +79,33 @@
8079 static unsigned find_get_pages(unsigned long start,
8180 unsigned int nr_pages, struct page **pages)
8281 {
83
- unsigned int i;
84
- unsigned int ret;
85
- unsigned int nr_found;
82
+ XA_STATE(xas, &mt_tree, start);
83
+ struct page *page;
84
+ unsigned int ret = 0;
8685
8786 rcu_read_lock();
88
-restart:
89
- nr_found = radix_tree_gang_lookup_slot(&mt_tree,
90
- (void ***)pages, NULL, start, nr_pages);
91
- ret = 0;
92
- for (i = 0; i < nr_found; i++) {
93
- struct page *page;
94
-repeat:
95
- page = radix_tree_deref_slot((void **)pages[i]);
96
- if (unlikely(!page))
87
+ xas_for_each(&xas, page, ULONG_MAX) {
88
+ if (xas_retry(&xas, page))
9789 continue;
9890
99
- if (radix_tree_exception(page)) {
100
- if (radix_tree_deref_retry(page)) {
101
- /*
102
- * Transient condition which can only trigger
103
- * when entry at index 0 moves out of or back
104
- * to root: none yet gotten, safe to restart.
105
- */
106
- assert((start | i) == 0);
107
- goto restart;
108
- }
109
- /*
110
- * No exceptional entries are inserted in this test.
111
- */
112
- assert(0);
113
- }
114
-
11591 pthread_mutex_lock(&page->lock);
116
- if (!page->count) {
117
- pthread_mutex_unlock(&page->lock);
118
- goto repeat;
119
- }
92
+ if (!page->count)
93
+ goto unlock;
94
+
12095 /* don't actually update page refcount */
12196 pthread_mutex_unlock(&page->lock);
12297
12398 /* Has the page moved? */
124
- if (unlikely(page != *((void **)pages[i]))) {
125
- goto repeat;
126
- }
99
+ if (unlikely(page != xas_reload(&xas)))
100
+ goto put_page;
127101
128102 pages[ret] = page;
129103 ret++;
104
+ continue;
105
+unlock:
106
+ pthread_mutex_unlock(&page->lock);
107
+put_page:
108
+ xas_reset(&xas);
130109 }
131110 rcu_read_unlock();
132111 return ret;
....@@ -145,30 +124,30 @@
145124 for (j = 0; j < 1000000; j++) {
146125 struct page *p;
147126
148
- p = page_alloc();
149
- pthread_mutex_lock(&mt_lock);
127
+ p = page_alloc(0);
128
+ xa_lock(&mt_tree);
150129 radix_tree_insert(&mt_tree, 0, p);
151
- pthread_mutex_unlock(&mt_lock);
130
+ xa_unlock(&mt_tree);
152131
153
- p = page_alloc();
154
- pthread_mutex_lock(&mt_lock);
132
+ p = page_alloc(1);
133
+ xa_lock(&mt_tree);
155134 radix_tree_insert(&mt_tree, 1, p);
156
- pthread_mutex_unlock(&mt_lock);
135
+ xa_unlock(&mt_tree);
157136
158
- pthread_mutex_lock(&mt_lock);
137
+ xa_lock(&mt_tree);
159138 p = radix_tree_delete(&mt_tree, 1);
160139 pthread_mutex_lock(&p->lock);
161140 p->count--;
162141 pthread_mutex_unlock(&p->lock);
163
- pthread_mutex_unlock(&mt_lock);
142
+ xa_unlock(&mt_tree);
164143 page_free(p);
165144
166
- pthread_mutex_lock(&mt_lock);
145
+ xa_lock(&mt_tree);
167146 p = radix_tree_delete(&mt_tree, 0);
168147 pthread_mutex_lock(&p->lock);
169148 p->count--;
170149 pthread_mutex_unlock(&p->lock);
171
- pthread_mutex_unlock(&mt_lock);
150
+ xa_unlock(&mt_tree);
172151 page_free(p);
173152 }
174153 } else {
....@@ -198,7 +177,7 @@
198177 nr_threads = 2;
199178 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
200179
201
- threads = malloc(nr_threads * sizeof(pthread_t *));
180
+ threads = malloc(nr_threads * sizeof(*threads));
202181
203182 for (i = 0; i < nr_threads; i++) {
204183 arg = i;