From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 19 Dec 2024 01:47:39 +0000 Subject: [PATCH] add wifi6 8852be driver --- kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 377 ++++++++++++++++++++++++++++++++++++++--------------- 1 files changed, 268 insertions(+), 109 deletions(-) diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9c3653e..1ea71f0 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -35,8 +35,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/mlx5/driver.h> -#include <linux/mlx5/cmd.h> +#include <linux/xarray.h> #include "mlx5_core.h" +#include "lib/eq.h" enum { MLX5_PAGES_CANT_GIVE = 0, @@ -47,15 +48,17 @@ struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; + u8 ec_function; s32 npages; struct work_struct work; + u8 release_all; }; struct fw_page { struct rb_node rb_node; u64 addr; struct page *page; - u16 func_id; + u32 function; unsigned long bitmask; struct list_head list; unsigned free_count; @@ -71,14 +74,49 @@ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; -static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +static u32 get_function(u16 func_id, bool ec_function) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node **new = &root->rb_node; + return (u32)func_id | (ec_function << 16); +} + +static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) +{ + struct rb_root *root; + int err; + + root = xa_load(&dev->priv.page_root_xa, function); + if (root) + return root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + + err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL); + if (err) { + kfree(root); + return ERR_PTR(err); + } + + *root = RB_ROOT; + + return root; +} + +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function) +{ struct rb_node *parent = NULL; + struct rb_root *root; + struct rb_node **new; struct fw_page *nfp; struct fw_page *tfp; int i; + + root = page_root_per_function(dev, function); + if (IS_ERR(root)) + return PTR_ERR(root); + + new = &root->rb_node; while (*new) { parent = *new; @@ -97,7 +135,7 @@ nfp->addr = addr; nfp->page = page; - nfp->func_id = func_id; + nfp->function = function; nfp->free_count = MLX5_NUM_4K_IN_PAGE; for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) set_bit(i, &nfp->bitmask); @@ -109,12 +147,19 @@ return 0; } -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, + u32 function) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node *tmp = root->rb_node; struct fw_page *result = NULL; + struct rb_root *root; + struct rb_node *tmp; struct fw_page *tfp; + + root = xa_load(&dev->priv.page_root_xa, function); + if (WARN_ON_ONCE(!root)) + return NULL; + + tmp = root->rb_node; while (tmp) { tfp = rb_entry(tmp, struct fw_page, rb_node); @@ -134,16 +179,17 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { - u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {}; int err; MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); MLX5_SET(query_pages_in, in, op_mod, boot ? MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); + MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_inout(dev, query_pages, in, out); if (err) return err; @@ -153,18 +199,25 @@ return err; } -static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function) { - struct fw_page *fp; + struct fw_page *fp = NULL; + struct fw_page *iter; unsigned n; - if (list_empty(&dev->priv.free_list)) + list_for_each_entry(iter, &dev->priv.free_list, list) { + if (iter->function != function) + continue; + fp = iter; + } + + if (list_empty(&dev->priv.free_list) || !fp) return -ENOMEM; - fp = list_entry(dev->priv.free_list.next, struct fw_page, list); n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); if (n >= MLX5_NUM_4K_IN_PAGE) { - mlx5_core_warn(dev, "alloc 4k bug\n"); + mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n", + fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE); return -ENOENT; } clear_bit(n, &fp->bitmask); @@ -179,40 +232,51 @@ #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) -static void free_4k(struct mlx5_core_dev *dev, u64 addr) +static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, + bool in_free_list) +{ + struct rb_root *root; + + root = xa_load(&dev->priv.page_root_xa, fwp->function); + if (WARN_ON_ONCE(!root)) + return; + + rb_erase(&fwp->rb_node, root); + if (in_free_list) + list_del(&fwp->list); + dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); +} + +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function); if (!fwp) { - mlx5_core_warn(dev, "page not found\n"); + mlx5_core_warn_rl(dev, "page not found\n"); return; } - n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); - if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { - rb_erase(&fwp->rb_node, &dev->priv.page_root); - if (fwp->free_count != 1) - list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(fwp->page); - kfree(fwp); - } else if (fwp->free_count == 1) { + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) + free_fwp(dev, fwp, fwp->free_count != 1); + else if (fwp->free_count == 1) list_add(&fwp->list, &dev->priv.free_list); - } } -static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +static int alloc_system_page(struct mlx5_core_dev *dev, u32 function) { + struct device *device = mlx5_core_dma_dev(dev); + int nid = dev_to_node(device); struct page *page; u64 zero_addr = 1; u64 addr; int err; - int nid = dev_to_node(&dev->pdev->dev); page = alloc_pages_node(nid, GFP_HIGHUSER, 0); if (!page) { @@ -220,9 +284,8 @@ return -ENOMEM; } map: - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { + addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(device, addr)) { mlx5_core_warn(dev, "failed dma mapping page\n"); err = -ENOMEM; goto err_mapping; @@ -234,11 +297,10 @@ goto map; } - err = insert_page(dev, addr, page, func_id); + err = insert_page(dev, addr, page, function); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); } err_mapping: @@ -246,31 +308,33 @@ __free_page(page); if (zero_addr == 0) - dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, + dma_unmap_page(device, zero_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); return err; } -static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) +static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, + bool ec_function) { - u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int err; MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec_in(dev, manage_pages, in); if (err) mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", func_id, err); } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int notify_fail) + int notify_fail, bool ec_function) { + u32 function = get_function(func_id, ec_function); u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); u64 addr; @@ -288,10 +352,10 @@ for (i = 0; i < npages; i++) { retry: - err = alloc_4k(dev, &addr); + err = alloc_4k(dev, &addr, function); if (err) { if (err == -ENOMEM) - err = alloc_system_page(dev, func_id); + err = alloc_system_page(dev, function); if (err) goto out_4k; @@ -304,6 +368,7 @@ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) { @@ -315,20 +380,54 @@ dev->priv.fw_pages += npages; if (func_id) dev->priv.vfs_pages += npages; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages += npages; - mlx5_core_dbg(dev, "err %d\n", err); + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", + npages, ec_function, func_id, err); kvfree(in); return 0; out_4k: for (i--; i >= 0; i--) - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); out_free: kvfree(in); if (notify_fail) - page_notify_fail(dev, func_id); + page_notify_fail(dev, func_id, ec_function); return err; +} + +static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, + bool ec_function) +{ + u32 function = get_function(func_id, ec_function); + struct rb_root *root; + struct rb_node *p; + int npages = 0; + + root = xa_load(&dev->priv.page_root_xa, function); + if (WARN_ON_ONCE(!root)) + return; + + p = rb_first(root); + while (p) { + struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); + + p = rb_next(p); + npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); + free_fwp(dev, fwp, fwp->free_count); + } + + dev->priv.fw_pages -= npages; + if (func_id) + dev->priv.vfs_pages -= npages; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages -= npages; + + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", + npages, ec_function, func_id); } static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, @@ -352,25 +451,30 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { + struct rb_root *root; struct fw_page *fwp; struct rb_node *p; + bool ec_function; u32 func_id; u32 npages; u32 i = 0; - if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) + if (!mlx5_cmd_is_down(dev)) return mlx5_cmd_exec(dev, in, in_size, out, out_size); /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); func_id = MLX5_GET(manage_pages_in, in, function_id); + ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function); - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); + if (WARN_ON_ONCE(!root)) + return -EEXIST; + + p = rb_first(root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); } @@ -379,11 +483,12 @@ return 0; } -static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, - int *nclaimed) +static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + int *nclaimed, bool ec_function) { + u32 function = get_function(func_id, ec_function); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int num_claimed; u32 *out; int err; @@ -401,8 +506,10 @@ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", + func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); @@ -418,7 +525,7 @@ } for (i = 0; i < num_claimed; i++) - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function); if (nclaimed) *nclaimed = num_claimed; @@ -426,6 +533,8 @@ dev->priv.fw_pages -= num_claimed; if (func_id) dev->priv.vfs_pages -= num_claimed; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages -= num_claimed; out_free: kvfree(out); @@ -438,10 +547,13 @@ struct mlx5_core_dev *dev = req->dev; int err = 0; - if (req->npages < 0) - err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + if (req->release_all) + release_all_pages(dev, req->func_id, req->ec_function); + else if (req->npages < 0) + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, + req->ec_function); else if (req->npages > 0) - err = give_pages(dev, req->func_id, req->npages, 1); + err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); if (err) mlx5_core_warn(dev, "%s fail %d\n", @@ -450,28 +562,54 @@ kfree(req); } -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages) +enum { + EC_FUNCTION_MASK = 0x8000, + RELEASE_ALL_PAGES_MASK = 0x4000, +}; + +static int req_pages_handler(struct notifier_block *nb, + unsigned long type, void *data) { struct mlx5_pages_req *req; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + struct mlx5_eqe *eqe; + bool ec_function; + bool release_all; + u16 func_id; + s32 npages; + priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb); + dev = container_of(priv, struct mlx5_core_dev, priv); + eqe = data; + + func_id = be16_to_cpu(eqe->data.req_pages.func_id); + npages = be32_to_cpu(eqe->data.req_pages.num_pages); + ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; + release_all = be16_to_cpu(eqe->data.req_pages.ec_function) & + RELEASE_ALL_PAGES_MASK; + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n", + func_id, npages, release_all); req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) { mlx5_core_warn(dev, "failed to allocate pages request\n"); - return; + return NOTIFY_DONE; } req->dev = dev; req->func_id = func_id; req->npages = npages; + req->ec_function = ec_function; + req->release_all = release_all; INIT_WORK(&req->work, pages_work_handler); queue_work(dev->priv.pg_wq, &req->work); + return NOTIFY_OK; } int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(func_id); - s32 uninitialized_var(npages); + u16 func_id; + s32 npages; int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); @@ -481,7 +619,7 @@ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); - return give_pages(dev, func_id, npages, 0); + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); } enum { @@ -501,35 +639,49 @@ return ret; } -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, + struct rb_root *root, u16 func_id) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); - struct fw_page *fwp; - struct rb_node *p; - int nclaimed = 0; - int err = 0; - do { - p = rb_first(&dev->priv.page_root); - if (p) { - fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed); + while (!RB_EMPTY_ROOT(root)) { + int nclaimed; + int err; - if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", - err); - return err; - } - if (nclaimed) - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), + &nclaimed, mlx5_core_is_ecpf(dev)); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", + err, func_id); + return err; } + + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } - } while (p); + } + + return 0; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + struct rb_root *root; + unsigned long id; + void *entry; + + xa_for_each(&dev->priv.page_root_xa, id, entry) { + root = entry; + mlx5_reclaim_root_pages(dev, root, id); + xa_erase(&dev->priv.page_root_xa, id); + kfree(root); + } + + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n", @@ -537,39 +689,47 @@ WARN(dev->priv.vfs_pages, "VFs FW pages counter is %d after reclaiming all pages\n", dev->priv.vfs_pages); + WARN(dev->priv.peer_pf_pages, + "Peer PF FW pages counter is %d after reclaiming all pages\n", + dev->priv.peer_pf_pages); return 0; } -void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +int mlx5_pagealloc_init(struct mlx5_core_dev *dev) { - dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); -} - -void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) -{ - /* nothing */ -} - -int mlx5_pagealloc_start(struct mlx5_core_dev *dev) -{ dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; + xa_init(&dev->priv.page_root_xa); + return 0; +} + +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) +{ + xa_destroy(&dev->priv.page_root_xa); + destroy_workqueue(dev->priv.pg_wq); +} + +void mlx5_pagealloc_start(struct mlx5_core_dev *dev) +{ + MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST); + mlx5_eq_notifier_register(dev, &dev->priv.pg_nb); } void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { - destroy_workqueue(dev->priv.pg_wq); + mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb); + flush_workqueue(dev->priv.pg_wq); } -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - int prev_vfs_pages = dev->priv.vfs_pages; + int prev_pages = *pages; /* In case of internal error we will free the pages manually later */ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { @@ -577,20 +737,19 @@ return 0; } - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, - dev->priv.name); - while (dev->priv.vfs_pages) { + mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); + while (*pages) { if (time_after(jiffies, end)) { - mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); return -ETIMEDOUT; } - if (dev->priv.vfs_pages < prev_vfs_pages) { + if (*pages < prev_pages) { end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - prev_vfs_pages = dev->priv.vfs_pages; + prev_pages = *pages; } msleep(50); } - mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); + mlx5_core_dbg(dev, "All pages received\n"); return 0; } -- Gitblit v1.6.2