.. | .. |
---|
40 | 40 | #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) |
---|
41 | 41 | /* Max number of counters to query in bulk read is 32K */ |
---|
42 | 42 | #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) |
---|
| 43 | +#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) |
---|
| 44 | +#define MLX5_FC_POOL_USED_BUFF_RATIO 10 |
---|
| 45 | + |
---|
| 46 | +struct mlx5_fc_cache { |
---|
| 47 | + u64 packets; |
---|
| 48 | + u64 bytes; |
---|
| 49 | + u64 lastuse; |
---|
| 50 | +}; |
---|
| 51 | + |
---|
| 52 | +struct mlx5_fc { |
---|
| 53 | + struct list_head list; |
---|
| 54 | + struct llist_node addlist; |
---|
| 55 | + struct llist_node dellist; |
---|
| 56 | + |
---|
| 57 | + /* last{packets,bytes} members are used when calculating the delta since |
---|
| 58 | + * last reading |
---|
| 59 | + */ |
---|
| 60 | + u64 lastpackets; |
---|
| 61 | + u64 lastbytes; |
---|
| 62 | + |
---|
| 63 | + struct mlx5_fc_bulk *bulk; |
---|
| 64 | + u32 id; |
---|
| 65 | + bool aging; |
---|
| 66 | + |
---|
| 67 | + struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; |
---|
| 68 | +}; |
---|
| 69 | + |
---|
| 70 | +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); |
---|
| 71 | +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); |
---|
| 72 | +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); |
---|
| 73 | +static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); |
---|
43 | 74 | |
---|
44 | 75 | /* locking scheme: |
---|
45 | 76 | * |
---|
.. | .. |
---|
52 | 83 | * access to counter list: |
---|
53 | 84 | * - create (user context) |
---|
54 | 85 | * - mlx5_fc_create() only adds to an addlist to be used by |
---|
55 | | - * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. |
---|
| 86 | + * mlx5_fc_stats_work(). addlist is a lockless single linked list |
---|
| 87 | + * that doesn't require any additional synchronization when adding single |
---|
| 88 | + * node. |
---|
56 | 89 | * - spawn thread to do the actual destroy |
---|
57 | 90 | * |
---|
58 | 91 | * - destroy (user context) |
---|
59 | | - * - mark a counter as deleted |
---|
| 92 | + * - add a counter to lockless dellist |
---|
60 | 93 | * - spawn thread to do the actual del |
---|
61 | 94 | * |
---|
62 | 95 | * - dump (user context) |
---|
.. | .. |
---|
71 | 104 | * elapsed, the thread will actually query the hardware. |
---|
72 | 105 | */ |
---|
73 | 106 | |
---|
74 | | -static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) |
---|
| 107 | +static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, |
---|
| 108 | + u32 id) |
---|
75 | 109 | { |
---|
76 | | - struct rb_node **new = &root->rb_node; |
---|
77 | | - struct rb_node *parent = NULL; |
---|
| 110 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 111 | + unsigned long next_id = (unsigned long)id + 1; |
---|
| 112 | + struct mlx5_fc *counter; |
---|
| 113 | + unsigned long tmp; |
---|
78 | 114 | |
---|
79 | | - while (*new) { |
---|
80 | | - struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); |
---|
81 | | - int result = counter->id - this->id; |
---|
82 | | - |
---|
83 | | - parent = *new; |
---|
84 | | - if (result < 0) |
---|
85 | | - new = &((*new)->rb_left); |
---|
86 | | - else |
---|
87 | | - new = &((*new)->rb_right); |
---|
| 115 | + rcu_read_lock(); |
---|
| 116 | + /* skip counters that are in idr, but not yet in counters list */ |
---|
| 117 | + idr_for_each_entry_continue_ul(&fc_stats->counters_idr, |
---|
| 118 | + counter, tmp, next_id) { |
---|
| 119 | + if (!list_empty(&counter->list)) |
---|
| 120 | + break; |
---|
88 | 121 | } |
---|
| 122 | + rcu_read_unlock(); |
---|
89 | 123 | |
---|
90 | | - /* Add new node and rebalance tree. */ |
---|
91 | | - rb_link_node(&counter->node, parent, new); |
---|
92 | | - rb_insert_color(&counter->node, root); |
---|
| 124 | + return counter ? &counter->list : &fc_stats->counters; |
---|
93 | 125 | } |
---|
94 | 126 | |
---|
95 | | -/* The function returns the last node that was queried so the caller |
---|
96 | | - * function can continue calling it till all counters are queried. |
---|
97 | | - */ |
---|
98 | | -static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, |
---|
99 | | - struct mlx5_fc *first, |
---|
100 | | - u32 last_id) |
---|
| 127 | +static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, |
---|
| 128 | + struct mlx5_fc *counter) |
---|
101 | 129 | { |
---|
102 | | - struct mlx5_cmd_fc_bulk *b; |
---|
103 | | - struct rb_node *node = NULL; |
---|
104 | | - u32 afirst_id; |
---|
105 | | - int num; |
---|
| 130 | + struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); |
---|
| 131 | + |
---|
| 132 | + list_add_tail(&counter->list, next); |
---|
| 133 | +} |
---|
| 134 | + |
---|
| 135 | +static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, |
---|
| 136 | + struct mlx5_fc *counter) |
---|
| 137 | +{ |
---|
| 138 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 139 | + |
---|
| 140 | + list_del(&counter->list); |
---|
| 141 | + |
---|
| 142 | + spin_lock(&fc_stats->counters_idr_lock); |
---|
| 143 | + WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); |
---|
| 144 | + spin_unlock(&fc_stats->counters_idr_lock); |
---|
| 145 | +} |
---|
| 146 | + |
---|
| 147 | +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) |
---|
| 148 | +{ |
---|
| 149 | + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, |
---|
| 150 | + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); |
---|
| 151 | +} |
---|
| 152 | + |
---|
| 153 | +static void update_counter_cache(int index, u32 *bulk_raw_data, |
---|
| 154 | + struct mlx5_fc_cache *cache) |
---|
| 155 | +{ |
---|
| 156 | + void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data, |
---|
| 157 | + flow_statistics[index]); |
---|
| 158 | + u64 packets = MLX5_GET64(traffic_counter, stats, packets); |
---|
| 159 | + u64 bytes = MLX5_GET64(traffic_counter, stats, octets); |
---|
| 160 | + |
---|
| 161 | + if (cache->packets == packets) |
---|
| 162 | + return; |
---|
| 163 | + |
---|
| 164 | + cache->packets = packets; |
---|
| 165 | + cache->bytes = bytes; |
---|
| 166 | + cache->lastuse = jiffies; |
---|
| 167 | +} |
---|
| 168 | + |
---|
| 169 | +static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, |
---|
| 170 | + struct mlx5_fc *first, |
---|
| 171 | + u32 last_id) |
---|
| 172 | +{ |
---|
| 173 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 174 | + bool query_more_counters = (first->id <= last_id); |
---|
| 175 | + int max_bulk_len = get_max_bulk_query_len(dev); |
---|
| 176 | + u32 *data = fc_stats->bulk_query_out; |
---|
| 177 | + struct mlx5_fc *counter = first; |
---|
| 178 | + u32 bulk_base_id; |
---|
| 179 | + int bulk_len; |
---|
106 | 180 | int err; |
---|
107 | 181 | |
---|
108 | | - int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, |
---|
109 | | - (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); |
---|
| 182 | + while (query_more_counters) { |
---|
| 183 | + /* first id must be aligned to 4 when using bulk query */ |
---|
| 184 | + bulk_base_id = counter->id & ~0x3; |
---|
110 | 185 | |
---|
111 | | - /* first id must be aligned to 4 when using bulk query */ |
---|
112 | | - afirst_id = first->id & ~0x3; |
---|
| 186 | + /* number of counters to query inc. the last counter */ |
---|
| 187 | + bulk_len = min_t(int, max_bulk_len, |
---|
| 188 | + ALIGN(last_id - bulk_base_id + 1, 4)); |
---|
113 | 189 | |
---|
114 | | - /* number of counters to query inc. the last counter */ |
---|
115 | | - num = ALIGN(last_id - afirst_id + 1, 4); |
---|
116 | | - if (num > max_bulk) { |
---|
117 | | - num = max_bulk; |
---|
118 | | - last_id = afirst_id + num - 1; |
---|
| 190 | + err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, |
---|
| 191 | + data); |
---|
| 192 | + if (err) { |
---|
| 193 | + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); |
---|
| 194 | + return; |
---|
| 195 | + } |
---|
| 196 | + query_more_counters = false; |
---|
| 197 | + |
---|
| 198 | + list_for_each_entry_from(counter, &fc_stats->counters, list) { |
---|
| 199 | + int counter_index = counter->id - bulk_base_id; |
---|
| 200 | + struct mlx5_fc_cache *cache = &counter->cache; |
---|
| 201 | + |
---|
| 202 | + if (counter->id >= bulk_base_id + bulk_len) { |
---|
| 203 | + query_more_counters = true; |
---|
| 204 | + break; |
---|
| 205 | + } |
---|
| 206 | + |
---|
| 207 | + update_counter_cache(counter_index, data, cache); |
---|
| 208 | + } |
---|
119 | 209 | } |
---|
| 210 | +} |
---|
120 | 211 | |
---|
121 | | - b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num); |
---|
122 | | - if (!b) { |
---|
123 | | - mlx5_core_err(dev, "Error allocating resources for bulk query\n"); |
---|
124 | | - return NULL; |
---|
125 | | - } |
---|
| 212 | +static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) |
---|
| 213 | +{ |
---|
| 214 | + mlx5_cmd_fc_free(dev, counter->id); |
---|
| 215 | + kfree(counter); |
---|
| 216 | +} |
---|
126 | 217 | |
---|
127 | | - err = mlx5_cmd_fc_bulk_query(dev, b); |
---|
128 | | - if (err) { |
---|
129 | | - mlx5_core_err(dev, "Error doing bulk query: %d\n", err); |
---|
130 | | - goto out; |
---|
131 | | - } |
---|
| 218 | +static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) |
---|
| 219 | +{ |
---|
| 220 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
132 | 221 | |
---|
133 | | - for (node = &first->node; node; node = rb_next(node)) { |
---|
134 | | - struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); |
---|
135 | | - struct mlx5_fc_cache *c = &counter->cache; |
---|
136 | | - u64 packets; |
---|
137 | | - u64 bytes; |
---|
138 | | - |
---|
139 | | - if (counter->id > last_id) |
---|
140 | | - break; |
---|
141 | | - |
---|
142 | | - mlx5_cmd_fc_bulk_get(dev, b, |
---|
143 | | - counter->id, &packets, &bytes); |
---|
144 | | - |
---|
145 | | - if (c->packets == packets) |
---|
146 | | - continue; |
---|
147 | | - |
---|
148 | | - c->packets = packets; |
---|
149 | | - c->bytes = bytes; |
---|
150 | | - c->lastuse = jiffies; |
---|
151 | | - } |
---|
152 | | - |
---|
153 | | -out: |
---|
154 | | - mlx5_cmd_fc_bulk_free(b); |
---|
155 | | - |
---|
156 | | - return node; |
---|
| 222 | + if (counter->bulk) |
---|
| 223 | + mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); |
---|
| 224 | + else |
---|
| 225 | + mlx5_fc_free(dev, counter); |
---|
157 | 226 | } |
---|
158 | 227 | |
---|
159 | 228 | static void mlx5_fc_stats_work(struct work_struct *work) |
---|
.. | .. |
---|
161 | 230 | struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, |
---|
162 | 231 | priv.fc_stats.work.work); |
---|
163 | 232 | struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 233 | + /* Take dellist first to ensure that counters cannot be deleted before |
---|
| 234 | + * they are inserted. |
---|
| 235 | + */ |
---|
| 236 | + struct llist_node *dellist = llist_del_all(&fc_stats->dellist); |
---|
| 237 | + struct llist_node *addlist = llist_del_all(&fc_stats->addlist); |
---|
| 238 | + struct mlx5_fc *counter = NULL, *last = NULL, *tmp; |
---|
164 | 239 | unsigned long now = jiffies; |
---|
165 | | - struct mlx5_fc *counter = NULL; |
---|
166 | | - struct mlx5_fc *last = NULL; |
---|
167 | | - struct rb_node *node; |
---|
168 | | - LIST_HEAD(tmplist); |
---|
169 | 240 | |
---|
170 | | - spin_lock(&fc_stats->addlist_lock); |
---|
171 | | - |
---|
172 | | - list_splice_tail_init(&fc_stats->addlist, &tmplist); |
---|
173 | | - |
---|
174 | | - if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) |
---|
| 241 | + if (addlist || !list_empty(&fc_stats->counters)) |
---|
175 | 242 | queue_delayed_work(fc_stats->wq, &fc_stats->work, |
---|
176 | 243 | fc_stats->sampling_interval); |
---|
177 | 244 | |
---|
178 | | - spin_unlock(&fc_stats->addlist_lock); |
---|
| 245 | + llist_for_each_entry(counter, addlist, addlist) |
---|
| 246 | + mlx5_fc_stats_insert(dev, counter); |
---|
179 | 247 | |
---|
180 | | - list_for_each_entry(counter, &tmplist, list) |
---|
181 | | - mlx5_fc_stats_insert(&fc_stats->counters, counter); |
---|
| 248 | + llist_for_each_entry_safe(counter, tmp, dellist, dellist) { |
---|
| 249 | + mlx5_fc_stats_remove(dev, counter); |
---|
182 | 250 | |
---|
183 | | - node = rb_first(&fc_stats->counters); |
---|
184 | | - while (node) { |
---|
185 | | - counter = rb_entry(node, struct mlx5_fc, node); |
---|
186 | | - |
---|
187 | | - node = rb_next(node); |
---|
188 | | - |
---|
189 | | - if (counter->deleted) { |
---|
190 | | - rb_erase(&counter->node, &fc_stats->counters); |
---|
191 | | - |
---|
192 | | - mlx5_cmd_fc_free(dev, counter->id); |
---|
193 | | - |
---|
194 | | - kfree(counter); |
---|
195 | | - continue; |
---|
196 | | - } |
---|
197 | | - |
---|
198 | | - last = counter; |
---|
| 251 | + mlx5_fc_release(dev, counter); |
---|
199 | 252 | } |
---|
200 | 253 | |
---|
201 | | - if (time_before(now, fc_stats->next_query) || !last) |
---|
| 254 | + if (time_before(now, fc_stats->next_query) || |
---|
| 255 | + list_empty(&fc_stats->counters)) |
---|
202 | 256 | return; |
---|
| 257 | + last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); |
---|
203 | 258 | |
---|
204 | | - node = rb_first(&fc_stats->counters); |
---|
205 | | - while (node) { |
---|
206 | | - counter = rb_entry(node, struct mlx5_fc, node); |
---|
207 | | - |
---|
208 | | - node = mlx5_fc_stats_query(dev, counter, last->id); |
---|
209 | | - } |
---|
| 259 | + counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, |
---|
| 260 | + list); |
---|
| 261 | + if (counter) |
---|
| 262 | + mlx5_fc_stats_query_counter_range(dev, counter, last->id); |
---|
210 | 263 | |
---|
211 | 264 | fc_stats->next_query = now + fc_stats->sampling_interval; |
---|
212 | 265 | } |
---|
213 | 266 | |
---|
214 | | -struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) |
---|
| 267 | +static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) |
---|
215 | 268 | { |
---|
216 | | - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
217 | 269 | struct mlx5_fc *counter; |
---|
218 | 270 | int err; |
---|
219 | 271 | |
---|
.. | .. |
---|
222 | 274 | return ERR_PTR(-ENOMEM); |
---|
223 | 275 | |
---|
224 | 276 | err = mlx5_cmd_fc_alloc(dev, &counter->id); |
---|
225 | | - if (err) |
---|
226 | | - goto err_out; |
---|
| 277 | + if (err) { |
---|
| 278 | + kfree(counter); |
---|
| 279 | + return ERR_PTR(err); |
---|
| 280 | + } |
---|
| 281 | + |
---|
| 282 | + return counter; |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) |
---|
| 286 | +{ |
---|
| 287 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 288 | + struct mlx5_fc *counter; |
---|
| 289 | + |
---|
| 290 | + if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) { |
---|
| 291 | + counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool); |
---|
| 292 | + if (!IS_ERR(counter)) |
---|
| 293 | + return counter; |
---|
| 294 | + } |
---|
| 295 | + |
---|
| 296 | + return mlx5_fc_single_alloc(dev); |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) |
---|
| 300 | +{ |
---|
| 301 | + struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); |
---|
| 302 | + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 303 | + int err; |
---|
| 304 | + |
---|
| 305 | + if (IS_ERR(counter)) |
---|
| 306 | + return counter; |
---|
| 307 | + |
---|
| 308 | + INIT_LIST_HEAD(&counter->list); |
---|
| 309 | + counter->aging = aging; |
---|
227 | 310 | |
---|
228 | 311 | if (aging) { |
---|
229 | | - counter->cache.lastuse = jiffies; |
---|
230 | | - counter->aging = true; |
---|
| 312 | + u32 id = counter->id; |
---|
231 | 313 | |
---|
232 | | - spin_lock(&fc_stats->addlist_lock); |
---|
233 | | - list_add(&counter->list, &fc_stats->addlist); |
---|
234 | | - spin_unlock(&fc_stats->addlist_lock); |
---|
| 314 | + counter->cache.lastuse = jiffies; |
---|
| 315 | + counter->lastbytes = counter->cache.bytes; |
---|
| 316 | + counter->lastpackets = counter->cache.packets; |
---|
| 317 | + |
---|
| 318 | + idr_preload(GFP_KERNEL); |
---|
| 319 | + spin_lock(&fc_stats->counters_idr_lock); |
---|
| 320 | + |
---|
| 321 | + err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id, |
---|
| 322 | + GFP_NOWAIT); |
---|
| 323 | + |
---|
| 324 | + spin_unlock(&fc_stats->counters_idr_lock); |
---|
| 325 | + idr_preload_end(); |
---|
| 326 | + if (err) |
---|
| 327 | + goto err_out_alloc; |
---|
| 328 | + |
---|
| 329 | + llist_add(&counter->addlist, &fc_stats->addlist); |
---|
235 | 330 | |
---|
236 | 331 | mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); |
---|
237 | 332 | } |
---|
238 | 333 | |
---|
239 | 334 | return counter; |
---|
240 | 335 | |
---|
241 | | -err_out: |
---|
242 | | - kfree(counter); |
---|
243 | | - |
---|
| 336 | +err_out_alloc: |
---|
| 337 | + mlx5_fc_release(dev, counter); |
---|
244 | 338 | return ERR_PTR(err); |
---|
245 | 339 | } |
---|
246 | 340 | EXPORT_SYMBOL(mlx5_fc_create); |
---|
| 341 | + |
---|
| 342 | +u32 mlx5_fc_id(struct mlx5_fc *counter) |
---|
| 343 | +{ |
---|
| 344 | + return counter->id; |
---|
| 345 | +} |
---|
| 346 | +EXPORT_SYMBOL(mlx5_fc_id); |
---|
247 | 347 | |
---|
248 | 348 | void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) |
---|
249 | 349 | { |
---|
.. | .. |
---|
253 | 353 | return; |
---|
254 | 354 | |
---|
255 | 355 | if (counter->aging) { |
---|
256 | | - counter->deleted = true; |
---|
| 356 | + llist_add(&counter->dellist, &fc_stats->dellist); |
---|
257 | 357 | mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); |
---|
258 | 358 | return; |
---|
259 | 359 | } |
---|
260 | 360 | |
---|
261 | | - mlx5_cmd_fc_free(dev, counter->id); |
---|
262 | | - kfree(counter); |
---|
| 361 | + mlx5_fc_release(dev, counter); |
---|
263 | 362 | } |
---|
264 | 363 | EXPORT_SYMBOL(mlx5_fc_destroy); |
---|
265 | 364 | |
---|
266 | 365 | int mlx5_init_fc_stats(struct mlx5_core_dev *dev) |
---|
267 | 366 | { |
---|
268 | 367 | struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 368 | + int max_bulk_len; |
---|
| 369 | + int max_out_len; |
---|
269 | 370 | |
---|
270 | | - fc_stats->counters = RB_ROOT; |
---|
271 | | - INIT_LIST_HEAD(&fc_stats->addlist); |
---|
272 | | - spin_lock_init(&fc_stats->addlist_lock); |
---|
| 371 | + spin_lock_init(&fc_stats->counters_idr_lock); |
---|
| 372 | + idr_init(&fc_stats->counters_idr); |
---|
| 373 | + INIT_LIST_HEAD(&fc_stats->counters); |
---|
| 374 | + init_llist_head(&fc_stats->addlist); |
---|
| 375 | + init_llist_head(&fc_stats->dellist); |
---|
| 376 | + |
---|
| 377 | + max_bulk_len = get_max_bulk_query_len(dev); |
---|
| 378 | + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); |
---|
| 379 | + fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL); |
---|
| 380 | + if (!fc_stats->bulk_query_out) |
---|
| 381 | + return -ENOMEM; |
---|
273 | 382 | |
---|
274 | 383 | fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); |
---|
275 | 384 | if (!fc_stats->wq) |
---|
276 | | - return -ENOMEM; |
---|
| 385 | + goto err_wq_create; |
---|
277 | 386 | |
---|
278 | 387 | fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; |
---|
279 | 388 | INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); |
---|
280 | 389 | |
---|
| 390 | + mlx5_fc_pool_init(&fc_stats->fc_pool, dev); |
---|
281 | 391 | return 0; |
---|
| 392 | + |
---|
| 393 | +err_wq_create: |
---|
| 394 | + kfree(fc_stats->bulk_query_out); |
---|
| 395 | + return -ENOMEM; |
---|
282 | 396 | } |
---|
283 | 397 | |
---|
284 | 398 | void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) |
---|
285 | 399 | { |
---|
286 | 400 | struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; |
---|
| 401 | + struct llist_node *tmplist; |
---|
287 | 402 | struct mlx5_fc *counter; |
---|
288 | 403 | struct mlx5_fc *tmp; |
---|
289 | | - struct rb_node *node; |
---|
290 | 404 | |
---|
291 | 405 | cancel_delayed_work_sync(&dev->priv.fc_stats.work); |
---|
292 | 406 | destroy_workqueue(dev->priv.fc_stats.wq); |
---|
293 | 407 | dev->priv.fc_stats.wq = NULL; |
---|
294 | 408 | |
---|
295 | | - list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { |
---|
296 | | - list_del(&counter->list); |
---|
| 409 | + tmplist = llist_del_all(&fc_stats->addlist); |
---|
| 410 | + llist_for_each_entry_safe(counter, tmp, tmplist, addlist) |
---|
| 411 | + mlx5_fc_release(dev, counter); |
---|
297 | 412 | |
---|
298 | | - mlx5_cmd_fc_free(dev, counter->id); |
---|
| 413 | + list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) |
---|
| 414 | + mlx5_fc_release(dev, counter); |
---|
299 | 415 | |
---|
300 | | - kfree(counter); |
---|
301 | | - } |
---|
302 | | - |
---|
303 | | - node = rb_first(&fc_stats->counters); |
---|
304 | | - while (node) { |
---|
305 | | - counter = rb_entry(node, struct mlx5_fc, node); |
---|
306 | | - |
---|
307 | | - node = rb_next(node); |
---|
308 | | - |
---|
309 | | - rb_erase(&counter->node, &fc_stats->counters); |
---|
310 | | - |
---|
311 | | - mlx5_cmd_fc_free(dev, counter->id); |
---|
312 | | - |
---|
313 | | - kfree(counter); |
---|
314 | | - } |
---|
| 416 | + mlx5_fc_pool_cleanup(&fc_stats->fc_pool); |
---|
| 417 | + idr_destroy(&fc_stats->counters_idr); |
---|
| 418 | + kfree(fc_stats->bulk_query_out); |
---|
315 | 419 | } |
---|
316 | 420 | |
---|
317 | 421 | int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, |
---|
.. | .. |
---|
358 | 462 | fc_stats->sampling_interval = min_t(unsigned long, interval, |
---|
359 | 463 | fc_stats->sampling_interval); |
---|
360 | 464 | } |
---|
| 465 | + |
---|
| 466 | +/* Flow counter bluks */ |
---|
| 467 | + |
---|
| 468 | +struct mlx5_fc_bulk { |
---|
| 469 | + struct list_head pool_list; |
---|
| 470 | + u32 base_id; |
---|
| 471 | + int bulk_len; |
---|
| 472 | + unsigned long *bitmask; |
---|
| 473 | + struct mlx5_fc fcs[]; |
---|
| 474 | +}; |
---|
| 475 | + |
---|
| 476 | +static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, |
---|
| 477 | + u32 id) |
---|
| 478 | +{ |
---|
| 479 | + counter->bulk = bulk; |
---|
| 480 | + counter->id = id; |
---|
| 481 | +} |
---|
| 482 | + |
---|
| 483 | +static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) |
---|
| 484 | +{ |
---|
| 485 | + return bitmap_weight(bulk->bitmask, bulk->bulk_len); |
---|
| 486 | +} |
---|
| 487 | + |
---|
| 488 | +static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) |
---|
| 489 | +{ |
---|
| 490 | + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; |
---|
| 491 | + struct mlx5_fc_bulk *bulk; |
---|
| 492 | + int err = -ENOMEM; |
---|
| 493 | + int bulk_len; |
---|
| 494 | + u32 base_id; |
---|
| 495 | + int i; |
---|
| 496 | + |
---|
| 497 | + alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); |
---|
| 498 | + bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; |
---|
| 499 | + |
---|
| 500 | + bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), |
---|
| 501 | + GFP_KERNEL); |
---|
| 502 | + if (!bulk) |
---|
| 503 | + goto err_alloc_bulk; |
---|
| 504 | + |
---|
| 505 | + bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), |
---|
| 506 | + GFP_KERNEL); |
---|
| 507 | + if (!bulk->bitmask) |
---|
| 508 | + goto err_alloc_bitmask; |
---|
| 509 | + |
---|
| 510 | + err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); |
---|
| 511 | + if (err) |
---|
| 512 | + goto err_mlx5_cmd_bulk_alloc; |
---|
| 513 | + |
---|
| 514 | + bulk->base_id = base_id; |
---|
| 515 | + bulk->bulk_len = bulk_len; |
---|
| 516 | + for (i = 0; i < bulk_len; i++) { |
---|
| 517 | + mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); |
---|
| 518 | + set_bit(i, bulk->bitmask); |
---|
| 519 | + } |
---|
| 520 | + |
---|
| 521 | + return bulk; |
---|
| 522 | + |
---|
| 523 | +err_mlx5_cmd_bulk_alloc: |
---|
| 524 | + kfree(bulk->bitmask); |
---|
| 525 | +err_alloc_bitmask: |
---|
| 526 | + kfree(bulk); |
---|
| 527 | +err_alloc_bulk: |
---|
| 528 | + return ERR_PTR(err); |
---|
| 529 | +} |
---|
| 530 | + |
---|
| 531 | +static int |
---|
| 532 | +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) |
---|
| 533 | +{ |
---|
| 534 | + if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { |
---|
| 535 | + mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); |
---|
| 536 | + return -EBUSY; |
---|
| 537 | + } |
---|
| 538 | + |
---|
| 539 | + mlx5_cmd_fc_free(dev, bulk->base_id); |
---|
| 540 | + kfree(bulk->bitmask); |
---|
| 541 | + kfree(bulk); |
---|
| 542 | + |
---|
| 543 | + return 0; |
---|
| 544 | +} |
---|
| 545 | + |
---|
| 546 | +static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) |
---|
| 547 | +{ |
---|
| 548 | + int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); |
---|
| 549 | + |
---|
| 550 | + if (free_fc_index >= bulk->bulk_len) |
---|
| 551 | + return ERR_PTR(-ENOSPC); |
---|
| 552 | + |
---|
| 553 | + clear_bit(free_fc_index, bulk->bitmask); |
---|
| 554 | + return &bulk->fcs[free_fc_index]; |
---|
| 555 | +} |
---|
| 556 | + |
---|
| 557 | +static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) |
---|
| 558 | +{ |
---|
| 559 | + int fc_index = fc->id - bulk->base_id; |
---|
| 560 | + |
---|
| 561 | + if (test_bit(fc_index, bulk->bitmask)) |
---|
| 562 | + return -EINVAL; |
---|
| 563 | + |
---|
| 564 | + set_bit(fc_index, bulk->bitmask); |
---|
| 565 | + return 0; |
---|
| 566 | +} |
---|
| 567 | + |
---|
| 568 | +/* Flow counters pool API */ |
---|
| 569 | + |
---|
| 570 | +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) |
---|
| 571 | +{ |
---|
| 572 | + fc_pool->dev = dev; |
---|
| 573 | + mutex_init(&fc_pool->pool_lock); |
---|
| 574 | + INIT_LIST_HEAD(&fc_pool->fully_used); |
---|
| 575 | + INIT_LIST_HEAD(&fc_pool->partially_used); |
---|
| 576 | + INIT_LIST_HEAD(&fc_pool->unused); |
---|
| 577 | + fc_pool->available_fcs = 0; |
---|
| 578 | + fc_pool->used_fcs = 0; |
---|
| 579 | + fc_pool->threshold = 0; |
---|
| 580 | +} |
---|
| 581 | + |
---|
| 582 | +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) |
---|
| 583 | +{ |
---|
| 584 | + struct mlx5_core_dev *dev = fc_pool->dev; |
---|
| 585 | + struct mlx5_fc_bulk *bulk; |
---|
| 586 | + struct mlx5_fc_bulk *tmp; |
---|
| 587 | + |
---|
| 588 | + list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) |
---|
| 589 | + mlx5_fc_bulk_destroy(dev, bulk); |
---|
| 590 | + list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) |
---|
| 591 | + mlx5_fc_bulk_destroy(dev, bulk); |
---|
| 592 | + list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) |
---|
| 593 | + mlx5_fc_bulk_destroy(dev, bulk); |
---|
| 594 | +} |
---|
| 595 | + |
---|
| 596 | +static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) |
---|
| 597 | +{ |
---|
| 598 | + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, |
---|
| 599 | + fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); |
---|
| 600 | +} |
---|
| 601 | + |
---|
| 602 | +static struct mlx5_fc_bulk * |
---|
| 603 | +mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) |
---|
| 604 | +{ |
---|
| 605 | + struct mlx5_core_dev *dev = fc_pool->dev; |
---|
| 606 | + struct mlx5_fc_bulk *new_bulk; |
---|
| 607 | + |
---|
| 608 | + new_bulk = mlx5_fc_bulk_create(dev); |
---|
| 609 | + if (!IS_ERR(new_bulk)) |
---|
| 610 | + fc_pool->available_fcs += new_bulk->bulk_len; |
---|
| 611 | + mlx5_fc_pool_update_threshold(fc_pool); |
---|
| 612 | + return new_bulk; |
---|
| 613 | +} |
---|
| 614 | + |
---|
| 615 | +static void |
---|
| 616 | +mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) |
---|
| 617 | +{ |
---|
| 618 | + struct mlx5_core_dev *dev = fc_pool->dev; |
---|
| 619 | + |
---|
| 620 | + fc_pool->available_fcs -= bulk->bulk_len; |
---|
| 621 | + mlx5_fc_bulk_destroy(dev, bulk); |
---|
| 622 | + mlx5_fc_pool_update_threshold(fc_pool); |
---|
| 623 | +} |
---|
| 624 | + |
---|
| 625 | +static struct mlx5_fc * |
---|
| 626 | +mlx5_fc_pool_acquire_from_list(struct list_head *src_list, |
---|
| 627 | + struct list_head *next_list, |
---|
| 628 | + bool move_non_full_bulk) |
---|
| 629 | +{ |
---|
| 630 | + struct mlx5_fc_bulk *bulk; |
---|
| 631 | + struct mlx5_fc *fc; |
---|
| 632 | + |
---|
| 633 | + if (list_empty(src_list)) |
---|
| 634 | + return ERR_PTR(-ENODATA); |
---|
| 635 | + |
---|
| 636 | + bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); |
---|
| 637 | + fc = mlx5_fc_bulk_acquire_fc(bulk); |
---|
| 638 | + if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) |
---|
| 639 | + list_move(&bulk->pool_list, next_list); |
---|
| 640 | + return fc; |
---|
| 641 | +} |
---|
| 642 | + |
---|
| 643 | +static struct mlx5_fc * |
---|
| 644 | +mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) |
---|
| 645 | +{ |
---|
| 646 | + struct mlx5_fc_bulk *new_bulk; |
---|
| 647 | + struct mlx5_fc *fc; |
---|
| 648 | + |
---|
| 649 | + mutex_lock(&fc_pool->pool_lock); |
---|
| 650 | + |
---|
| 651 | + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, |
---|
| 652 | + &fc_pool->fully_used, false); |
---|
| 653 | + if (IS_ERR(fc)) |
---|
| 654 | + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, |
---|
| 655 | + &fc_pool->partially_used, |
---|
| 656 | + true); |
---|
| 657 | + if (IS_ERR(fc)) { |
---|
| 658 | + new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); |
---|
| 659 | + if (IS_ERR(new_bulk)) { |
---|
| 660 | + fc = ERR_CAST(new_bulk); |
---|
| 661 | + goto out; |
---|
| 662 | + } |
---|
| 663 | + fc = mlx5_fc_bulk_acquire_fc(new_bulk); |
---|
| 664 | + list_add(&new_bulk->pool_list, &fc_pool->partially_used); |
---|
| 665 | + } |
---|
| 666 | + fc_pool->available_fcs--; |
---|
| 667 | + fc_pool->used_fcs++; |
---|
| 668 | + |
---|
| 669 | +out: |
---|
| 670 | + mutex_unlock(&fc_pool->pool_lock); |
---|
| 671 | + return fc; |
---|
| 672 | +} |
---|
| 673 | + |
---|
| 674 | +static void |
---|
| 675 | +mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) |
---|
| 676 | +{ |
---|
| 677 | + struct mlx5_core_dev *dev = fc_pool->dev; |
---|
| 678 | + struct mlx5_fc_bulk *bulk = fc->bulk; |
---|
| 679 | + int bulk_free_fcs_amount; |
---|
| 680 | + |
---|
| 681 | + mutex_lock(&fc_pool->pool_lock); |
---|
| 682 | + |
---|
| 683 | + if (mlx5_fc_bulk_release_fc(bulk, fc)) { |
---|
| 684 | + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); |
---|
| 685 | + goto unlock; |
---|
| 686 | + } |
---|
| 687 | + |
---|
| 688 | + fc_pool->available_fcs++; |
---|
| 689 | + fc_pool->used_fcs--; |
---|
| 690 | + |
---|
| 691 | + bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); |
---|
| 692 | + if (bulk_free_fcs_amount == 1) |
---|
| 693 | + list_move_tail(&bulk->pool_list, &fc_pool->partially_used); |
---|
| 694 | + if (bulk_free_fcs_amount == bulk->bulk_len) { |
---|
| 695 | + list_del(&bulk->pool_list); |
---|
| 696 | + if (fc_pool->available_fcs > fc_pool->threshold) |
---|
| 697 | + mlx5_fc_pool_free_bulk(fc_pool, bulk); |
---|
| 698 | + else |
---|
| 699 | + list_add(&bulk->pool_list, &fc_pool->unused); |
---|
| 700 | + } |
---|
| 701 | + |
---|
| 702 | +unlock: |
---|
| 703 | + mutex_unlock(&fc_pool->pool_lock); |
---|
| 704 | +} |
---|