forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
....@@ -35,8 +35,9 @@
3535 #include <linux/module.h>
3636 #include <linux/delay.h>
3737 #include <linux/mlx5/driver.h>
38
-#include <linux/mlx5/cmd.h>
38
+#include <linux/xarray.h>
3939 #include "mlx5_core.h"
40
+#include "lib/eq.h"
4041
4142 enum {
4243 MLX5_PAGES_CANT_GIVE = 0,
....@@ -47,15 +48,17 @@
4748 struct mlx5_pages_req {
4849 struct mlx5_core_dev *dev;
4950 u16 func_id;
51
+ u8 ec_function;
5052 s32 npages;
5153 struct work_struct work;
54
+ u8 release_all;
5255 };
5356
5457 struct fw_page {
5558 struct rb_node rb_node;
5659 u64 addr;
5760 struct page *page;
58
- u16 func_id;
61
+ u32 function;
5962 unsigned long bitmask;
6063 struct list_head list;
6164 unsigned free_count;
....@@ -71,14 +74,49 @@
7174 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
7275 };
7376
74
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
77
+static u32 get_function(u16 func_id, bool ec_function)
7578 {
76
- struct rb_root *root = &dev->priv.page_root;
77
- struct rb_node **new = &root->rb_node;
79
+ return (u32)func_id | (ec_function << 16);
80
+}
81
+
82
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
83
+{
84
+ struct rb_root *root;
85
+ int err;
86
+
87
+ root = xa_load(&dev->priv.page_root_xa, function);
88
+ if (root)
89
+ return root;
90
+
91
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
92
+ if (!root)
93
+ return ERR_PTR(-ENOMEM);
94
+
95
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
96
+ if (err) {
97
+ kfree(root);
98
+ return ERR_PTR(err);
99
+ }
100
+
101
+ *root = RB_ROOT;
102
+
103
+ return root;
104
+}
105
+
106
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
107
+{
78108 struct rb_node *parent = NULL;
109
+ struct rb_root *root;
110
+ struct rb_node **new;
79111 struct fw_page *nfp;
80112 struct fw_page *tfp;
81113 int i;
114
+
115
+ root = page_root_per_function(dev, function);
116
+ if (IS_ERR(root))
117
+ return PTR_ERR(root);
118
+
119
+ new = &root->rb_node;
82120
83121 while (*new) {
84122 parent = *new;
....@@ -97,7 +135,7 @@
97135
98136 nfp->addr = addr;
99137 nfp->page = page;
100
- nfp->func_id = func_id;
138
+ nfp->function = function;
101139 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
102140 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
103141 set_bit(i, &nfp->bitmask);
....@@ -109,12 +147,19 @@
109147 return 0;
110148 }
111149
112
-static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
150
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
151
+ u32 function)
113152 {
114
- struct rb_root *root = &dev->priv.page_root;
115
- struct rb_node *tmp = root->rb_node;
116153 struct fw_page *result = NULL;
154
+ struct rb_root *root;
155
+ struct rb_node *tmp;
117156 struct fw_page *tfp;
157
+
158
+ root = xa_load(&dev->priv.page_root_xa, function);
159
+ if (WARN_ON_ONCE(!root))
160
+ return NULL;
161
+
162
+ tmp = root->rb_node;
118163
119164 while (tmp) {
120165 tfp = rb_entry(tmp, struct fw_page, rb_node);
....@@ -134,16 +179,17 @@
134179 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
135180 s32 *npages, int boot)
136181 {
137
- u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
138
- u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
182
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
183
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
139184 int err;
140185
141186 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142187 MLX5_SET(query_pages_in, in, op_mod, boot ?
143188 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
144189 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
190
+ MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
145191
146
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
192
+ err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
147193 if (err)
148194 return err;
149195
....@@ -153,18 +199,25 @@
153199 return err;
154200 }
155201
156
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
202
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
157203 {
158
- struct fw_page *fp;
204
+ struct fw_page *fp = NULL;
205
+ struct fw_page *iter;
159206 unsigned n;
160207
161
- if (list_empty(&dev->priv.free_list))
208
+ list_for_each_entry(iter, &dev->priv.free_list, list) {
209
+ if (iter->function != function)
210
+ continue;
211
+ fp = iter;
212
+ }
213
+
214
+ if (list_empty(&dev->priv.free_list) || !fp)
162215 return -ENOMEM;
163216
164
- fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165217 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166218 if (n >= MLX5_NUM_4K_IN_PAGE) {
167
- mlx5_core_warn(dev, "alloc 4k bug\n");
219
+ mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
220
+ fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
168221 return -ENOENT;
169222 }
170223 clear_bit(n, &fp->bitmask);
....@@ -179,40 +232,51 @@
179232
180233 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
181234
182
-static void free_4k(struct mlx5_core_dev *dev, u64 addr)
235
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
236
+ bool in_free_list)
237
+{
238
+ struct rb_root *root;
239
+
240
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
241
+ if (WARN_ON_ONCE(!root))
242
+ return;
243
+
244
+ rb_erase(&fwp->rb_node, root);
245
+ if (in_free_list)
246
+ list_del(&fwp->list);
247
+ dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
248
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
249
+ __free_page(fwp->page);
250
+ kfree(fwp);
251
+}
252
+
253
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
183254 {
184255 struct fw_page *fwp;
185256 int n;
186257
187
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
258
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
188259 if (!fwp) {
189
- mlx5_core_warn(dev, "page not found\n");
260
+ mlx5_core_warn_rl(dev, "page not found\n");
190261 return;
191262 }
192
-
193263 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
194264 fwp->free_count++;
195265 set_bit(n, &fwp->bitmask);
196
- if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
197
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
198
- if (fwp->free_count != 1)
199
- list_del(&fwp->list);
200
- dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
201
- PAGE_SIZE, DMA_BIDIRECTIONAL);
202
- __free_page(fwp->page);
203
- kfree(fwp);
204
- } else if (fwp->free_count == 1) {
266
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
267
+ free_fwp(dev, fwp, fwp->free_count != 1);
268
+ else if (fwp->free_count == 1)
205269 list_add(&fwp->list, &dev->priv.free_list);
206
- }
207270 }
208271
209
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
272
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
210273 {
274
+ struct device *device = mlx5_core_dma_dev(dev);
275
+ int nid = dev_to_node(device);
211276 struct page *page;
212277 u64 zero_addr = 1;
213278 u64 addr;
214279 int err;
215
- int nid = dev_to_node(&dev->pdev->dev);
216280
217281 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
218282 if (!page) {
....@@ -220,9 +284,8 @@
220284 return -ENOMEM;
221285 }
222286 map:
223
- addr = dma_map_page(&dev->pdev->dev, page, 0,
224
- PAGE_SIZE, DMA_BIDIRECTIONAL);
225
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
287
+ addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
288
+ if (dma_mapping_error(device, addr)) {
226289 mlx5_core_warn(dev, "failed dma mapping page\n");
227290 err = -ENOMEM;
228291 goto err_mapping;
....@@ -234,11 +297,10 @@
234297 goto map;
235298 }
236299
237
- err = insert_page(dev, addr, page, func_id);
300
+ err = insert_page(dev, addr, page, function);
238301 if (err) {
239302 mlx5_core_err(dev, "failed to track allocated page\n");
240
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
241
- DMA_BIDIRECTIONAL);
303
+ dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
242304 }
243305
244306 err_mapping:
....@@ -246,31 +308,33 @@
246308 __free_page(page);
247309
248310 if (zero_addr == 0)
249
- dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
311
+ dma_unmap_page(device, zero_addr, PAGE_SIZE,
250312 DMA_BIDIRECTIONAL);
251313
252314 return err;
253315 }
254316
255
-static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
317
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
318
+ bool ec_function)
256319 {
257
- u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
258
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
320
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
259321 int err;
260322
261323 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
262324 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
263325 MLX5_SET(manage_pages_in, in, function_id, func_id);
326
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
264327
265
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
328
+ err = mlx5_cmd_exec_in(dev, manage_pages, in);
266329 if (err)
267330 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
268331 func_id, err);
269332 }
270333
271334 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
272
- int notify_fail)
335
+ int notify_fail, bool ec_function)
273336 {
337
+ u32 function = get_function(func_id, ec_function);
274338 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
275339 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
276340 u64 addr;
....@@ -288,10 +352,10 @@
288352
289353 for (i = 0; i < npages; i++) {
290354 retry:
291
- err = alloc_4k(dev, &addr);
355
+ err = alloc_4k(dev, &addr, function);
292356 if (err) {
293357 if (err == -ENOMEM)
294
- err = alloc_system_page(dev, func_id);
358
+ err = alloc_system_page(dev, function);
295359 if (err)
296360 goto out_4k;
297361
....@@ -304,6 +368,7 @@
304368 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
305369 MLX5_SET(manage_pages_in, in, function_id, func_id);
306370 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
371
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
307372
308373 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
309374 if (err) {
....@@ -315,20 +380,54 @@
315380 dev->priv.fw_pages += npages;
316381 if (func_id)
317382 dev->priv.vfs_pages += npages;
383
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
384
+ dev->priv.peer_pf_pages += npages;
318385
319
- mlx5_core_dbg(dev, "err %d\n", err);
386
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
387
+ npages, ec_function, func_id, err);
320388
321389 kvfree(in);
322390 return 0;
323391
324392 out_4k:
325393 for (i--; i >= 0; i--)
326
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
394
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
327395 out_free:
328396 kvfree(in);
329397 if (notify_fail)
330
- page_notify_fail(dev, func_id);
398
+ page_notify_fail(dev, func_id, ec_function);
331399 return err;
400
+}
401
+
402
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
403
+ bool ec_function)
404
+{
405
+ u32 function = get_function(func_id, ec_function);
406
+ struct rb_root *root;
407
+ struct rb_node *p;
408
+ int npages = 0;
409
+
410
+ root = xa_load(&dev->priv.page_root_xa, function);
411
+ if (WARN_ON_ONCE(!root))
412
+ return;
413
+
414
+ p = rb_first(root);
415
+ while (p) {
416
+ struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
417
+
418
+ p = rb_next(p);
419
+ npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
420
+ free_fwp(dev, fwp, fwp->free_count);
421
+ }
422
+
423
+ dev->priv.fw_pages -= npages;
424
+ if (func_id)
425
+ dev->priv.vfs_pages -= npages;
426
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
427
+ dev->priv.peer_pf_pages -= npages;
428
+
429
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
430
+ npages, ec_function, func_id);
332431 }
333432
334433 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
....@@ -352,25 +451,30 @@
352451 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
353452 u32 *in, int in_size, u32 *out, int out_size)
354453 {
454
+ struct rb_root *root;
355455 struct fw_page *fwp;
356456 struct rb_node *p;
457
+ bool ec_function;
357458 u32 func_id;
358459 u32 npages;
359460 u32 i = 0;
360461
361
- if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
462
+ if (!mlx5_cmd_is_down(dev))
362463 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
363464
364465 /* No hard feelings, we want our pages back! */
365466 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
366467 func_id = MLX5_GET(manage_pages_in, in, function_id);
468
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
367469
368
- p = rb_first(&dev->priv.page_root);
470
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
471
+ if (WARN_ON_ONCE(!root))
472
+ return -EEXIST;
473
+
474
+ p = rb_first(root);
369475 while (p && i < npages) {
370476 fwp = rb_entry(p, struct fw_page, rb_node);
371477 p = rb_next(p);
372
- if (fwp->func_id != func_id)
373
- continue;
374478
375479 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
376480 }
....@@ -379,11 +483,12 @@
379483 return 0;
380484 }
381485
382
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
383
- int *nclaimed)
486
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
487
+ int *nclaimed, bool ec_function)
384488 {
489
+ u32 function = get_function(func_id, ec_function);
385490 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
386
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
491
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
387492 int num_claimed;
388493 u32 *out;
389494 int err;
....@@ -401,8 +506,10 @@
401506 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
402507 MLX5_SET(manage_pages_in, in, function_id, func_id);
403508 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
509
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
404510
405
- mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
511
+ mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
512
+ func_id, npages, outlen);
406513 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
407514 if (err) {
408515 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
....@@ -418,7 +525,7 @@
418525 }
419526
420527 for (i = 0; i < num_claimed; i++)
421
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
528
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
422529
423530 if (nclaimed)
424531 *nclaimed = num_claimed;
....@@ -426,6 +533,8 @@
426533 dev->priv.fw_pages -= num_claimed;
427534 if (func_id)
428535 dev->priv.vfs_pages -= num_claimed;
536
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
537
+ dev->priv.peer_pf_pages -= num_claimed;
429538
430539 out_free:
431540 kvfree(out);
....@@ -438,10 +547,13 @@
438547 struct mlx5_core_dev *dev = req->dev;
439548 int err = 0;
440549
441
- if (req->npages < 0)
442
- err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
550
+ if (req->release_all)
551
+ release_all_pages(dev, req->func_id, req->ec_function);
552
+ else if (req->npages < 0)
553
+ err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
554
+ req->ec_function);
443555 else if (req->npages > 0)
444
- err = give_pages(dev, req->func_id, req->npages, 1);
556
+ err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
445557
446558 if (err)
447559 mlx5_core_warn(dev, "%s fail %d\n",
....@@ -450,28 +562,54 @@
450562 kfree(req);
451563 }
452564
453
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
454
- s32 npages)
565
+enum {
566
+ EC_FUNCTION_MASK = 0x8000,
567
+ RELEASE_ALL_PAGES_MASK = 0x4000,
568
+};
569
+
570
+static int req_pages_handler(struct notifier_block *nb,
571
+ unsigned long type, void *data)
455572 {
456573 struct mlx5_pages_req *req;
574
+ struct mlx5_core_dev *dev;
575
+ struct mlx5_priv *priv;
576
+ struct mlx5_eqe *eqe;
577
+ bool ec_function;
578
+ bool release_all;
579
+ u16 func_id;
580
+ s32 npages;
457581
582
+ priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
583
+ dev = container_of(priv, struct mlx5_core_dev, priv);
584
+ eqe = data;
585
+
586
+ func_id = be16_to_cpu(eqe->data.req_pages.func_id);
587
+ npages = be32_to_cpu(eqe->data.req_pages.num_pages);
588
+ ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
589
+ release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
590
+ RELEASE_ALL_PAGES_MASK;
591
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
592
+ func_id, npages, release_all);
458593 req = kzalloc(sizeof(*req), GFP_ATOMIC);
459594 if (!req) {
460595 mlx5_core_warn(dev, "failed to allocate pages request\n");
461
- return;
596
+ return NOTIFY_DONE;
462597 }
463598
464599 req->dev = dev;
465600 req->func_id = func_id;
466601 req->npages = npages;
602
+ req->ec_function = ec_function;
603
+ req->release_all = release_all;
467604 INIT_WORK(&req->work, pages_work_handler);
468605 queue_work(dev->priv.pg_wq, &req->work);
606
+ return NOTIFY_OK;
469607 }
470608
471609 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
472610 {
473
- u16 uninitialized_var(func_id);
474
- s32 uninitialized_var(npages);
611
+ u16 func_id;
612
+ s32 npages;
475613 int err;
476614
477615 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
....@@ -481,7 +619,7 @@
481619 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
482620 npages, boot ? "boot" : "init", func_id);
483621
484
- return give_pages(dev, func_id, npages, 0);
622
+ return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
485623 }
486624
487625 enum {
....@@ -501,35 +639,49 @@
501639 return ret;
502640 }
503641
504
-int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
642
+static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
643
+ struct rb_root *root, u16 func_id)
505644 {
506645 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
507
- struct fw_page *fwp;
508
- struct rb_node *p;
509
- int nclaimed = 0;
510
- int err = 0;
511646
512
- do {
513
- p = rb_first(&dev->priv.page_root);
514
- if (p) {
515
- fwp = rb_entry(p, struct fw_page, rb_node);
516
- err = reclaim_pages(dev, fwp->func_id,
517
- optimal_reclaimed_pages(),
518
- &nclaimed);
647
+ while (!RB_EMPTY_ROOT(root)) {
648
+ int nclaimed;
649
+ int err;
519650
520
- if (err) {
521
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
522
- err);
523
- return err;
524
- }
525
- if (nclaimed)
526
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
651
+ err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
652
+ &nclaimed, mlx5_core_is_ecpf(dev));
653
+ if (err) {
654
+ mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
655
+ err, func_id);
656
+ return err;
527657 }
658
+
659
+ if (nclaimed)
660
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
661
+
528662 if (time_after(jiffies, end)) {
529663 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
530664 break;
531665 }
532
- } while (p);
666
+ }
667
+
668
+ return 0;
669
+}
670
+
671
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
672
+{
673
+ struct rb_root *root;
674
+ unsigned long id;
675
+ void *entry;
676
+
677
+ xa_for_each(&dev->priv.page_root_xa, id, entry) {
678
+ root = entry;
679
+ mlx5_reclaim_root_pages(dev, root, id);
680
+ xa_erase(&dev->priv.page_root_xa, id);
681
+ kfree(root);
682
+ }
683
+
684
+ WARN_ON(!xa_empty(&dev->priv.page_root_xa));
533685
534686 WARN(dev->priv.fw_pages,
535687 "FW pages counter is %d after reclaiming all pages\n",
....@@ -537,39 +689,47 @@
537689 WARN(dev->priv.vfs_pages,
538690 "VFs FW pages counter is %d after reclaiming all pages\n",
539691 dev->priv.vfs_pages);
692
+ WARN(dev->priv.peer_pf_pages,
693
+ "Peer PF FW pages counter is %d after reclaiming all pages\n",
694
+ dev->priv.peer_pf_pages);
540695
541696 return 0;
542697 }
543698
544
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
699
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
545700 {
546
- dev->priv.page_root = RB_ROOT;
547701 INIT_LIST_HEAD(&dev->priv.free_list);
548
-}
549
-
550
-void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
551
-{
552
- /* nothing */
553
-}
554
-
555
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
556
-{
557702 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
558703 if (!dev->priv.pg_wq)
559704 return -ENOMEM;
560705
706
+ xa_init(&dev->priv.page_root_xa);
707
+
561708 return 0;
709
+}
710
+
711
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
712
+{
713
+ xa_destroy(&dev->priv.page_root_xa);
714
+ destroy_workqueue(dev->priv.pg_wq);
715
+}
716
+
717
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
718
+{
719
+ MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
720
+ mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
562721 }
563722
564723 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
565724 {
566
- destroy_workqueue(dev->priv.pg_wq);
725
+ mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
726
+ flush_workqueue(dev->priv.pg_wq);
567727 }
568728
569
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
729
+int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
570730 {
571731 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
572
- int prev_vfs_pages = dev->priv.vfs_pages;
732
+ int prev_pages = *pages;
573733
574734 /* In case of internal error we will free the pages manually later */
575735 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
....@@ -577,20 +737,19 @@
577737 return 0;
578738 }
579739
580
- mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
581
- dev->priv.name);
582
- while (dev->priv.vfs_pages) {
740
+ mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
741
+ while (*pages) {
583742 if (time_after(jiffies, end)) {
584
- mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
743
+ mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
585744 return -ETIMEDOUT;
586745 }
587
- if (dev->priv.vfs_pages < prev_vfs_pages) {
746
+ if (*pages < prev_pages) {
588747 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
589
- prev_vfs_pages = dev->priv.vfs_pages;
748
+ prev_pages = *pages;
590749 }
591750 msleep(50);
592751 }
593752
594
- mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
753
+ mlx5_core_dbg(dev, "All pages received\n");
595754 return 0;
596755 }