.. | .. |
---|
44 | 44 | #include "regression.h" |
---|
45 | 45 | |
---|
46 | 46 | static RADIX_TREE(mt_tree, GFP_KERNEL); |
---|
47 | | -static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER; |
---|
48 | 47 | |
---|
49 | 48 | struct page { |
---|
50 | 49 | pthread_mutex_t lock; |
---|
.. | .. |
---|
53 | 52 | unsigned long index; |
---|
54 | 53 | }; |
---|
55 | 54 | |
---|
56 | | -static struct page *page_alloc(void) |
---|
| 55 | +static struct page *page_alloc(int index) |
---|
57 | 56 | { |
---|
58 | 57 | struct page *p; |
---|
59 | 58 | p = malloc(sizeof(struct page)); |
---|
60 | 59 | p->count = 1; |
---|
61 | | - p->index = 1; |
---|
| 60 | + p->index = index; |
---|
62 | 61 | pthread_mutex_init(&p->lock, NULL); |
---|
63 | 62 | |
---|
64 | 63 | return p; |
---|
.. | .. |
---|
80 | 79 | static unsigned find_get_pages(unsigned long start, |
---|
81 | 80 | unsigned int nr_pages, struct page **pages) |
---|
82 | 81 | { |
---|
83 | | - unsigned int i; |
---|
84 | | - unsigned int ret; |
---|
85 | | - unsigned int nr_found; |
---|
| 82 | + XA_STATE(xas, &mt_tree, start); |
---|
| 83 | + struct page *page; |
---|
| 84 | + unsigned int ret = 0; |
---|
86 | 85 | |
---|
87 | 86 | rcu_read_lock(); |
---|
88 | | -restart: |
---|
89 | | - nr_found = radix_tree_gang_lookup_slot(&mt_tree, |
---|
90 | | - (void ***)pages, NULL, start, nr_pages); |
---|
91 | | - ret = 0; |
---|
92 | | - for (i = 0; i < nr_found; i++) { |
---|
93 | | - struct page *page; |
---|
94 | | -repeat: |
---|
95 | | - page = radix_tree_deref_slot((void **)pages[i]); |
---|
96 | | - if (unlikely(!page)) |
---|
| 87 | + xas_for_each(&xas, page, ULONG_MAX) { |
---|
| 88 | + if (xas_retry(&xas, page)) |
---|
97 | 89 | continue; |
---|
98 | 90 | |
---|
99 | | - if (radix_tree_exception(page)) { |
---|
100 | | - if (radix_tree_deref_retry(page)) { |
---|
101 | | - /* |
---|
102 | | - * Transient condition which can only trigger |
---|
103 | | - * when entry at index 0 moves out of or back |
---|
104 | | - * to root: none yet gotten, safe to restart. |
---|
105 | | - */ |
---|
106 | | - assert((start | i) == 0); |
---|
107 | | - goto restart; |
---|
108 | | - } |
---|
109 | | - /* |
---|
110 | | - * No exceptional entries are inserted in this test. |
---|
111 | | - */ |
---|
112 | | - assert(0); |
---|
113 | | - } |
---|
114 | | - |
---|
115 | 91 | pthread_mutex_lock(&page->lock); |
---|
116 | | - if (!page->count) { |
---|
117 | | - pthread_mutex_unlock(&page->lock); |
---|
118 | | - goto repeat; |
---|
119 | | - } |
---|
| 92 | + if (!page->count) |
---|
| 93 | + goto unlock; |
---|
| 94 | + |
---|
120 | 95 | /* don't actually update page refcount */ |
---|
121 | 96 | pthread_mutex_unlock(&page->lock); |
---|
122 | 97 | |
---|
123 | 98 | /* Has the page moved? */ |
---|
124 | | - if (unlikely(page != *((void **)pages[i]))) { |
---|
125 | | - goto repeat; |
---|
126 | | - } |
---|
| 99 | + if (unlikely(page != xas_reload(&xas))) |
---|
| 100 | + goto put_page; |
---|
127 | 101 | |
---|
128 | 102 | pages[ret] = page; |
---|
129 | 103 | ret++; |
---|
| 104 | + continue; |
---|
| 105 | +unlock: |
---|
| 106 | + pthread_mutex_unlock(&page->lock); |
---|
| 107 | +put_page: |
---|
| 108 | + xas_reset(&xas); |
---|
130 | 109 | } |
---|
131 | 110 | rcu_read_unlock(); |
---|
132 | 111 | return ret; |
---|
.. | .. |
---|
145 | 124 | for (j = 0; j < 1000000; j++) { |
---|
146 | 125 | struct page *p; |
---|
147 | 126 | |
---|
148 | | - p = page_alloc(); |
---|
149 | | - pthread_mutex_lock(&mt_lock); |
---|
| 127 | + p = page_alloc(0); |
---|
| 128 | + xa_lock(&mt_tree); |
---|
150 | 129 | radix_tree_insert(&mt_tree, 0, p); |
---|
151 | | - pthread_mutex_unlock(&mt_lock); |
---|
| 130 | + xa_unlock(&mt_tree); |
---|
152 | 131 | |
---|
153 | | - p = page_alloc(); |
---|
154 | | - pthread_mutex_lock(&mt_lock); |
---|
| 132 | + p = page_alloc(1); |
---|
| 133 | + xa_lock(&mt_tree); |
---|
155 | 134 | radix_tree_insert(&mt_tree, 1, p); |
---|
156 | | - pthread_mutex_unlock(&mt_lock); |
---|
| 135 | + xa_unlock(&mt_tree); |
---|
157 | 136 | |
---|
158 | | - pthread_mutex_lock(&mt_lock); |
---|
| 137 | + xa_lock(&mt_tree); |
---|
159 | 138 | p = radix_tree_delete(&mt_tree, 1); |
---|
160 | 139 | pthread_mutex_lock(&p->lock); |
---|
161 | 140 | p->count--; |
---|
162 | 141 | pthread_mutex_unlock(&p->lock); |
---|
163 | | - pthread_mutex_unlock(&mt_lock); |
---|
| 142 | + xa_unlock(&mt_tree); |
---|
164 | 143 | page_free(p); |
---|
165 | 144 | |
---|
166 | | - pthread_mutex_lock(&mt_lock); |
---|
| 145 | + xa_lock(&mt_tree); |
---|
167 | 146 | p = radix_tree_delete(&mt_tree, 0); |
---|
168 | 147 | pthread_mutex_lock(&p->lock); |
---|
169 | 148 | p->count--; |
---|
170 | 149 | pthread_mutex_unlock(&p->lock); |
---|
171 | | - pthread_mutex_unlock(&mt_lock); |
---|
| 150 | + xa_unlock(&mt_tree); |
---|
172 | 151 | page_free(p); |
---|
173 | 152 | } |
---|
174 | 153 | } else { |
---|
.. | .. |
---|
198 | 177 | nr_threads = 2; |
---|
199 | 178 | pthread_barrier_init(&worker_barrier, NULL, nr_threads); |
---|
200 | 179 | |
---|
201 | | - threads = malloc(nr_threads * sizeof(pthread_t *)); |
---|
| 180 | + threads = malloc(nr_threads * sizeof(*threads)); |
---|
202 | 181 | |
---|
203 | 182 | for (i = 0; i < nr_threads; i++) { |
---|
204 | 183 | arg = i; |
---|