forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
....@@ -35,8 +35,9 @@
3535 #include <linux/module.h>
3636 #include <linux/delay.h>
3737 #include <linux/mlx5/driver.h>
38
-#include <linux/mlx5/cmd.h>
38
+#include <linux/xarray.h>
3939 #include "mlx5_core.h"
40
+#include "lib/eq.h"
4041
4142 enum {
4243 MLX5_PAGES_CANT_GIVE = 0,
....@@ -47,15 +48,17 @@
4748 struct mlx5_pages_req {
4849 struct mlx5_core_dev *dev;
4950 u16 func_id;
51
+ u8 ec_function;
5052 s32 npages;
5153 struct work_struct work;
54
+ u8 release_all;
5255 };
5356
5457 struct fw_page {
5558 struct rb_node rb_node;
5659 u64 addr;
5760 struct page *page;
58
- u16 func_id;
61
+ u32 function;
5962 unsigned long bitmask;
6063 struct list_head list;
6164 unsigned free_count;
....@@ -71,14 +74,49 @@
7174 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
7275 };
7376
74
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
77
+static u32 get_function(u16 func_id, bool ec_function)
7578 {
76
- struct rb_root *root = &dev->priv.page_root;
77
- struct rb_node **new = &root->rb_node;
79
+ return (u32)func_id | (ec_function << 16);
80
+}
81
+
82
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
83
+{
84
+ struct rb_root *root;
85
+ int err;
86
+
87
+ root = xa_load(&dev->priv.page_root_xa, function);
88
+ if (root)
89
+ return root;
90
+
91
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
92
+ if (!root)
93
+ return ERR_PTR(-ENOMEM);
94
+
95
+ err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
96
+ if (err) {
97
+ kfree(root);
98
+ return ERR_PTR(err);
99
+ }
100
+
101
+ *root = RB_ROOT;
102
+
103
+ return root;
104
+}
105
+
106
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
107
+{
78108 struct rb_node *parent = NULL;
109
+ struct rb_root *root;
110
+ struct rb_node **new;
79111 struct fw_page *nfp;
80112 struct fw_page *tfp;
81113 int i;
114
+
115
+ root = page_root_per_function(dev, function);
116
+ if (IS_ERR(root))
117
+ return PTR_ERR(root);
118
+
119
+ new = &root->rb_node;
82120
83121 while (*new) {
84122 parent = *new;
....@@ -97,7 +135,7 @@
97135
98136 nfp->addr = addr;
99137 nfp->page = page;
100
- nfp->func_id = func_id;
138
+ nfp->function = function;
101139 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
102140 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
103141 set_bit(i, &nfp->bitmask);
....@@ -109,12 +147,19 @@
109147 return 0;
110148 }
111149
112
-static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
150
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
151
+ u32 function)
113152 {
114
- struct rb_root *root = &dev->priv.page_root;
115
- struct rb_node *tmp = root->rb_node;
116153 struct fw_page *result = NULL;
154
+ struct rb_root *root;
155
+ struct rb_node *tmp;
117156 struct fw_page *tfp;
157
+
158
+ root = xa_load(&dev->priv.page_root_xa, function);
159
+ if (WARN_ON_ONCE(!root))
160
+ return NULL;
161
+
162
+ tmp = root->rb_node;
118163
119164 while (tmp) {
120165 tfp = rb_entry(tmp, struct fw_page, rb_node);
....@@ -134,16 +179,17 @@
134179 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
135180 s32 *npages, int boot)
136181 {
137
- u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
138
- u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
182
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
183
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
139184 int err;
140185
141186 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142187 MLX5_SET(query_pages_in, in, op_mod, boot ?
143188 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
144189 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
190
+ MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
145191
146
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
192
+ err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
147193 if (err)
148194 return err;
149195
....@@ -153,15 +199,21 @@
153199 return err;
154200 }
155201
156
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
202
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
157203 {
158
- struct fw_page *fp;
204
+ struct fw_page *fp = NULL;
205
+ struct fw_page *iter;
159206 unsigned n;
160207
161
- if (list_empty(&dev->priv.free_list))
208
+ list_for_each_entry(iter, &dev->priv.free_list, list) {
209
+ if (iter->function != function)
210
+ continue;
211
+ fp = iter;
212
+ }
213
+
214
+ if (list_empty(&dev->priv.free_list) || !fp)
162215 return -ENOMEM;
163216
164
- fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165217 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166218 if (n >= MLX5_NUM_4K_IN_PAGE) {
167219 mlx5_core_warn(dev, "alloc 4k bug\n");
....@@ -179,40 +231,51 @@
179231
180232 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
181233
182
-static void free_4k(struct mlx5_core_dev *dev, u64 addr)
234
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
235
+ bool in_free_list)
236
+{
237
+ struct rb_root *root;
238
+
239
+ root = xa_load(&dev->priv.page_root_xa, fwp->function);
240
+ if (WARN_ON_ONCE(!root))
241
+ return;
242
+
243
+ rb_erase(&fwp->rb_node, root);
244
+ if (in_free_list)
245
+ list_del(&fwp->list);
246
+ dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
247
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
248
+ __free_page(fwp->page);
249
+ kfree(fwp);
250
+}
251
+
252
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
183253 {
184254 struct fw_page *fwp;
185255 int n;
186256
187
- fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
257
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
188258 if (!fwp) {
189
- mlx5_core_warn(dev, "page not found\n");
259
+ mlx5_core_warn_rl(dev, "page not found\n");
190260 return;
191261 }
192
-
193262 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
194263 fwp->free_count++;
195264 set_bit(n, &fwp->bitmask);
196
- if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
197
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
198
- if (fwp->free_count != 1)
199
- list_del(&fwp->list);
200
- dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
201
- PAGE_SIZE, DMA_BIDIRECTIONAL);
202
- __free_page(fwp->page);
203
- kfree(fwp);
204
- } else if (fwp->free_count == 1) {
265
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
266
+ free_fwp(dev, fwp, fwp->free_count != 1);
267
+ else if (fwp->free_count == 1)
205268 list_add(&fwp->list, &dev->priv.free_list);
206
- }
207269 }
208270
209
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
271
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
210272 {
273
+ struct device *device = mlx5_core_dma_dev(dev);
274
+ int nid = dev_to_node(device);
211275 struct page *page;
212276 u64 zero_addr = 1;
213277 u64 addr;
214278 int err;
215
- int nid = dev_to_node(&dev->pdev->dev);
216279
217280 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
218281 if (!page) {
....@@ -220,9 +283,8 @@
220283 return -ENOMEM;
221284 }
222285 map:
223
- addr = dma_map_page(&dev->pdev->dev, page, 0,
224
- PAGE_SIZE, DMA_BIDIRECTIONAL);
225
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
286
+ addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
287
+ if (dma_mapping_error(device, addr)) {
226288 mlx5_core_warn(dev, "failed dma mapping page\n");
227289 err = -ENOMEM;
228290 goto err_mapping;
....@@ -234,11 +296,10 @@
234296 goto map;
235297 }
236298
237
- err = insert_page(dev, addr, page, func_id);
299
+ err = insert_page(dev, addr, page, function);
238300 if (err) {
239301 mlx5_core_err(dev, "failed to track allocated page\n");
240
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
241
- DMA_BIDIRECTIONAL);
302
+ dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
242303 }
243304
244305 err_mapping:
....@@ -246,31 +307,33 @@
246307 __free_page(page);
247308
248309 if (zero_addr == 0)
249
- dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
310
+ dma_unmap_page(device, zero_addr, PAGE_SIZE,
250311 DMA_BIDIRECTIONAL);
251312
252313 return err;
253314 }
254315
255
-static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
316
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
317
+ bool ec_function)
256318 {
257
- u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
258
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
319
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
259320 int err;
260321
261322 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
262323 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
263324 MLX5_SET(manage_pages_in, in, function_id, func_id);
325
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
264326
265
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
327
+ err = mlx5_cmd_exec_in(dev, manage_pages, in);
266328 if (err)
267329 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
268330 func_id, err);
269331 }
270332
271333 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
272
- int notify_fail)
334
+ int notify_fail, bool ec_function)
273335 {
336
+ u32 function = get_function(func_id, ec_function);
274337 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
275338 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
276339 u64 addr;
....@@ -288,10 +351,10 @@
288351
289352 for (i = 0; i < npages; i++) {
290353 retry:
291
- err = alloc_4k(dev, &addr);
354
+ err = alloc_4k(dev, &addr, function);
292355 if (err) {
293356 if (err == -ENOMEM)
294
- err = alloc_system_page(dev, func_id);
357
+ err = alloc_system_page(dev, function);
295358 if (err)
296359 goto out_4k;
297360
....@@ -304,6 +367,7 @@
304367 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
305368 MLX5_SET(manage_pages_in, in, function_id, func_id);
306369 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
370
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
307371
308372 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
309373 if (err) {
....@@ -315,20 +379,54 @@
315379 dev->priv.fw_pages += npages;
316380 if (func_id)
317381 dev->priv.vfs_pages += npages;
382
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
383
+ dev->priv.peer_pf_pages += npages;
318384
319
- mlx5_core_dbg(dev, "err %d\n", err);
385
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
386
+ npages, ec_function, func_id, err);
320387
321388 kvfree(in);
322389 return 0;
323390
324391 out_4k:
325392 for (i--; i >= 0; i--)
326
- free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
393
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
327394 out_free:
328395 kvfree(in);
329396 if (notify_fail)
330
- page_notify_fail(dev, func_id);
397
+ page_notify_fail(dev, func_id, ec_function);
331398 return err;
399
+}
400
+
401
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
402
+ bool ec_function)
403
+{
404
+ u32 function = get_function(func_id, ec_function);
405
+ struct rb_root *root;
406
+ struct rb_node *p;
407
+ int npages = 0;
408
+
409
+ root = xa_load(&dev->priv.page_root_xa, function);
410
+ if (WARN_ON_ONCE(!root))
411
+ return;
412
+
413
+ p = rb_first(root);
414
+ while (p) {
415
+ struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
416
+
417
+ p = rb_next(p);
418
+ npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
419
+ free_fwp(dev, fwp, fwp->free_count);
420
+ }
421
+
422
+ dev->priv.fw_pages -= npages;
423
+ if (func_id)
424
+ dev->priv.vfs_pages -= npages;
425
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
426
+ dev->priv.peer_pf_pages -= npages;
427
+
428
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
429
+ npages, ec_function, func_id);
332430 }
333431
334432 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
....@@ -352,25 +450,30 @@
352450 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
353451 u32 *in, int in_size, u32 *out, int out_size)
354452 {
453
+ struct rb_root *root;
355454 struct fw_page *fwp;
356455 struct rb_node *p;
456
+ bool ec_function;
357457 u32 func_id;
358458 u32 npages;
359459 u32 i = 0;
360460
361
- if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
461
+ if (!mlx5_cmd_is_down(dev))
362462 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
363463
364464 /* No hard feelings, we want our pages back! */
365465 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
366466 func_id = MLX5_GET(manage_pages_in, in, function_id);
467
+ ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
367468
368
- p = rb_first(&dev->priv.page_root);
469
+ root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
470
+ if (WARN_ON_ONCE(!root))
471
+ return -EEXIST;
472
+
473
+ p = rb_first(root);
369474 while (p && i < npages) {
370475 fwp = rb_entry(p, struct fw_page, rb_node);
371476 p = rb_next(p);
372
- if (fwp->func_id != func_id)
373
- continue;
374477
375478 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
376479 }
....@@ -379,11 +482,12 @@
379482 return 0;
380483 }
381484
382
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
383
- int *nclaimed)
485
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
486
+ int *nclaimed, bool ec_function)
384487 {
488
+ u32 function = get_function(func_id, ec_function);
385489 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
386
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
490
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
387491 int num_claimed;
388492 u32 *out;
389493 int err;
....@@ -401,8 +505,10 @@
401505 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
402506 MLX5_SET(manage_pages_in, in, function_id, func_id);
403507 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
508
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
404509
405
- mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
510
+ mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
511
+ func_id, npages, outlen);
406512 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
407513 if (err) {
408514 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
....@@ -418,7 +524,7 @@
418524 }
419525
420526 for (i = 0; i < num_claimed; i++)
421
- free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
527
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
422528
423529 if (nclaimed)
424530 *nclaimed = num_claimed;
....@@ -426,6 +532,8 @@
426532 dev->priv.fw_pages -= num_claimed;
427533 if (func_id)
428534 dev->priv.vfs_pages -= num_claimed;
535
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
536
+ dev->priv.peer_pf_pages -= num_claimed;
429537
430538 out_free:
431539 kvfree(out);
....@@ -438,10 +546,13 @@
438546 struct mlx5_core_dev *dev = req->dev;
439547 int err = 0;
440548
441
- if (req->npages < 0)
442
- err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
549
+ if (req->release_all)
550
+ release_all_pages(dev, req->func_id, req->ec_function);
551
+ else if (req->npages < 0)
552
+ err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
553
+ req->ec_function);
443554 else if (req->npages > 0)
444
- err = give_pages(dev, req->func_id, req->npages, 1);
555
+ err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
445556
446557 if (err)
447558 mlx5_core_warn(dev, "%s fail %d\n",
....@@ -450,28 +561,54 @@
450561 kfree(req);
451562 }
452563
453
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
454
- s32 npages)
564
+enum {
565
+ EC_FUNCTION_MASK = 0x8000,
566
+ RELEASE_ALL_PAGES_MASK = 0x4000,
567
+};
568
+
569
+static int req_pages_handler(struct notifier_block *nb,
570
+ unsigned long type, void *data)
455571 {
456572 struct mlx5_pages_req *req;
573
+ struct mlx5_core_dev *dev;
574
+ struct mlx5_priv *priv;
575
+ struct mlx5_eqe *eqe;
576
+ bool ec_function;
577
+ bool release_all;
578
+ u16 func_id;
579
+ s32 npages;
457580
581
+ priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
582
+ dev = container_of(priv, struct mlx5_core_dev, priv);
583
+ eqe = data;
584
+
585
+ func_id = be16_to_cpu(eqe->data.req_pages.func_id);
586
+ npages = be32_to_cpu(eqe->data.req_pages.num_pages);
587
+ ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
588
+ release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
589
+ RELEASE_ALL_PAGES_MASK;
590
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
591
+ func_id, npages, release_all);
458592 req = kzalloc(sizeof(*req), GFP_ATOMIC);
459593 if (!req) {
460594 mlx5_core_warn(dev, "failed to allocate pages request\n");
461
- return;
595
+ return NOTIFY_DONE;
462596 }
463597
464598 req->dev = dev;
465599 req->func_id = func_id;
466600 req->npages = npages;
601
+ req->ec_function = ec_function;
602
+ req->release_all = release_all;
467603 INIT_WORK(&req->work, pages_work_handler);
468604 queue_work(dev->priv.pg_wq, &req->work);
605
+ return NOTIFY_OK;
469606 }
470607
471608 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
472609 {
473
- u16 uninitialized_var(func_id);
474
- s32 uninitialized_var(npages);
610
+ u16 func_id;
611
+ s32 npages;
475612 int err;
476613
477614 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
....@@ -481,7 +618,7 @@
481618 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
482619 npages, boot ? "boot" : "init", func_id);
483620
484
- return give_pages(dev, func_id, npages, 0);
621
+ return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
485622 }
486623
487624 enum {
....@@ -501,35 +638,49 @@
501638 return ret;
502639 }
503640
504
-int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
641
+static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
642
+ struct rb_root *root, u16 func_id)
505643 {
506644 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
507
- struct fw_page *fwp;
508
- struct rb_node *p;
509
- int nclaimed = 0;
510
- int err = 0;
511645
512
- do {
513
- p = rb_first(&dev->priv.page_root);
514
- if (p) {
515
- fwp = rb_entry(p, struct fw_page, rb_node);
516
- err = reclaim_pages(dev, fwp->func_id,
517
- optimal_reclaimed_pages(),
518
- &nclaimed);
646
+ while (!RB_EMPTY_ROOT(root)) {
647
+ int nclaimed;
648
+ int err;
519649
520
- if (err) {
521
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
522
- err);
523
- return err;
524
- }
525
- if (nclaimed)
526
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
650
+ err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
651
+ &nclaimed, mlx5_core_is_ecpf(dev));
652
+ if (err) {
653
+ mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
654
+ err, func_id);
655
+ return err;
527656 }
657
+
658
+ if (nclaimed)
659
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
660
+
528661 if (time_after(jiffies, end)) {
529662 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
530663 break;
531664 }
532
- } while (p);
665
+ }
666
+
667
+ return 0;
668
+}
669
+
670
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
671
+{
672
+ struct rb_root *root;
673
+ unsigned long id;
674
+ void *entry;
675
+
676
+ xa_for_each(&dev->priv.page_root_xa, id, entry) {
677
+ root = entry;
678
+ mlx5_reclaim_root_pages(dev, root, id);
679
+ xa_erase(&dev->priv.page_root_xa, id);
680
+ kfree(root);
681
+ }
682
+
683
+ WARN_ON(!xa_empty(&dev->priv.page_root_xa));
533684
534685 WARN(dev->priv.fw_pages,
535686 "FW pages counter is %d after reclaiming all pages\n",
....@@ -537,39 +688,47 @@
537688 WARN(dev->priv.vfs_pages,
538689 "VFs FW pages counter is %d after reclaiming all pages\n",
539690 dev->priv.vfs_pages);
691
+ WARN(dev->priv.peer_pf_pages,
692
+ "Peer PF FW pages counter is %d after reclaiming all pages\n",
693
+ dev->priv.peer_pf_pages);
540694
541695 return 0;
542696 }
543697
544
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
698
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
545699 {
546
- dev->priv.page_root = RB_ROOT;
547700 INIT_LIST_HEAD(&dev->priv.free_list);
548
-}
549
-
550
-void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
551
-{
552
- /* nothing */
553
-}
554
-
555
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
556
-{
557701 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
558702 if (!dev->priv.pg_wq)
559703 return -ENOMEM;
560704
705
+ xa_init(&dev->priv.page_root_xa);
706
+
561707 return 0;
708
+}
709
+
710
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
711
+{
712
+ xa_destroy(&dev->priv.page_root_xa);
713
+ destroy_workqueue(dev->priv.pg_wq);
714
+}
715
+
716
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
717
+{
718
+ MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
719
+ mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
562720 }
563721
564722 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
565723 {
566
- destroy_workqueue(dev->priv.pg_wq);
724
+ mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
725
+ flush_workqueue(dev->priv.pg_wq);
567726 }
568727
569
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
728
+int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
570729 {
571730 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
572
- int prev_vfs_pages = dev->priv.vfs_pages;
731
+ int prev_pages = *pages;
573732
574733 /* In case of internal error we will free the pages manually later */
575734 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
....@@ -577,20 +736,19 @@
577736 return 0;
578737 }
579738
580
- mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
581
- dev->priv.name);
582
- while (dev->priv.vfs_pages) {
739
+ mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
740
+ while (*pages) {
583741 if (time_after(jiffies, end)) {
584
- mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
742
+ mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
585743 return -ETIMEDOUT;
586744 }
587
- if (dev->priv.vfs_pages < prev_vfs_pages) {
745
+ if (*pages < prev_pages) {
588746 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
589
- prev_vfs_pages = dev->priv.vfs_pages;
747
+ prev_pages = *pages;
590748 }
591749 msleep(50);
592750 }
593751
594
- mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
752
+ mlx5_core_dbg(dev, "All pages received\n");
595753 return 0;
596754 }