| .. | .. |
|---|
| 35 | 35 | #include <linux/module.h> |
|---|
| 36 | 36 | #include <linux/delay.h> |
|---|
| 37 | 37 | #include <linux/mlx5/driver.h> |
|---|
| 38 | | -#include <linux/mlx5/cmd.h> |
|---|
| 38 | +#include <linux/xarray.h> |
|---|
| 39 | 39 | #include "mlx5_core.h" |
|---|
| 40 | +#include "lib/eq.h" |
|---|
| 40 | 41 | |
|---|
| 41 | 42 | enum { |
|---|
| 42 | 43 | MLX5_PAGES_CANT_GIVE = 0, |
|---|
| .. | .. |
|---|
| 47 | 48 | struct mlx5_pages_req { |
|---|
| 48 | 49 | struct mlx5_core_dev *dev; |
|---|
| 49 | 50 | u16 func_id; |
|---|
| 51 | + u8 ec_function; |
|---|
| 50 | 52 | s32 npages; |
|---|
| 51 | 53 | struct work_struct work; |
|---|
| 54 | + u8 release_all; |
|---|
| 52 | 55 | }; |
|---|
| 53 | 56 | |
|---|
| 54 | 57 | struct fw_page { |
|---|
| 55 | 58 | struct rb_node rb_node; |
|---|
| 56 | 59 | u64 addr; |
|---|
| 57 | 60 | struct page *page; |
|---|
| 58 | | - u16 func_id; |
|---|
| 61 | + u32 function; |
|---|
| 59 | 62 | unsigned long bitmask; |
|---|
| 60 | 63 | struct list_head list; |
|---|
| 61 | 64 | unsigned free_count; |
|---|
| .. | .. |
|---|
| 71 | 74 | MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
|---|
| 72 | 75 | }; |
|---|
| 73 | 76 | |
|---|
| 74 | | -static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
|---|
| 77 | +static u32 get_function(u16 func_id, bool ec_function) |
|---|
| 75 | 78 | { |
|---|
| 76 | | - struct rb_root *root = &dev->priv.page_root; |
|---|
| 77 | | - struct rb_node **new = &root->rb_node; |
|---|
| 79 | + return (u32)func_id | (ec_function << 16); |
|---|
| 80 | +} |
|---|
| 81 | + |
|---|
| 82 | +static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) |
|---|
| 83 | +{ |
|---|
| 84 | + struct rb_root *root; |
|---|
| 85 | + int err; |
|---|
| 86 | + |
|---|
| 87 | + root = xa_load(&dev->priv.page_root_xa, function); |
|---|
| 88 | + if (root) |
|---|
| 89 | + return root; |
|---|
| 90 | + |
|---|
| 91 | + root = kzalloc(sizeof(*root), GFP_KERNEL); |
|---|
| 92 | + if (!root) |
|---|
| 93 | + return ERR_PTR(-ENOMEM); |
|---|
| 94 | + |
|---|
| 95 | + err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL); |
|---|
| 96 | + if (err) { |
|---|
| 97 | + kfree(root); |
|---|
| 98 | + return ERR_PTR(err); |
|---|
| 99 | + } |
|---|
| 100 | + |
|---|
| 101 | + *root = RB_ROOT; |
|---|
| 102 | + |
|---|
| 103 | + return root; |
|---|
| 104 | +} |
|---|
| 105 | + |
|---|
| 106 | +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function) |
|---|
| 107 | +{ |
|---|
| 78 | 108 | struct rb_node *parent = NULL; |
|---|
| 109 | + struct rb_root *root; |
|---|
| 110 | + struct rb_node **new; |
|---|
| 79 | 111 | struct fw_page *nfp; |
|---|
| 80 | 112 | struct fw_page *tfp; |
|---|
| 81 | 113 | int i; |
|---|
| 114 | + |
|---|
| 115 | + root = page_root_per_function(dev, function); |
|---|
| 116 | + if (IS_ERR(root)) |
|---|
| 117 | + return PTR_ERR(root); |
|---|
| 118 | + |
|---|
| 119 | + new = &root->rb_node; |
|---|
| 82 | 120 | |
|---|
| 83 | 121 | while (*new) { |
|---|
| 84 | 122 | parent = *new; |
|---|
| .. | .. |
|---|
| 97 | 135 | |
|---|
| 98 | 136 | nfp->addr = addr; |
|---|
| 99 | 137 | nfp->page = page; |
|---|
| 100 | | - nfp->func_id = func_id; |
|---|
| 138 | + nfp->function = function; |
|---|
| 101 | 139 | nfp->free_count = MLX5_NUM_4K_IN_PAGE; |
|---|
| 102 | 140 | for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) |
|---|
| 103 | 141 | set_bit(i, &nfp->bitmask); |
|---|
| .. | .. |
|---|
| 109 | 147 | return 0; |
|---|
| 110 | 148 | } |
|---|
| 111 | 149 | |
|---|
| 112 | | -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) |
|---|
| 150 | +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, |
|---|
| 151 | + u32 function) |
|---|
| 113 | 152 | { |
|---|
| 114 | | - struct rb_root *root = &dev->priv.page_root; |
|---|
| 115 | | - struct rb_node *tmp = root->rb_node; |
|---|
| 116 | 153 | struct fw_page *result = NULL; |
|---|
| 154 | + struct rb_root *root; |
|---|
| 155 | + struct rb_node *tmp; |
|---|
| 117 | 156 | struct fw_page *tfp; |
|---|
| 157 | + |
|---|
| 158 | + root = xa_load(&dev->priv.page_root_xa, function); |
|---|
| 159 | + if (WARN_ON_ONCE(!root)) |
|---|
| 160 | + return NULL; |
|---|
| 161 | + |
|---|
| 162 | + tmp = root->rb_node; |
|---|
| 118 | 163 | |
|---|
| 119 | 164 | while (tmp) { |
|---|
| 120 | 165 | tfp = rb_entry(tmp, struct fw_page, rb_node); |
|---|
| .. | .. |
|---|
| 134 | 179 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
|---|
| 135 | 180 | s32 *npages, int boot) |
|---|
| 136 | 181 | { |
|---|
| 137 | | - u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; |
|---|
| 138 | | - u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; |
|---|
| 182 | + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {}; |
|---|
| 183 | + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {}; |
|---|
| 139 | 184 | int err; |
|---|
| 140 | 185 | |
|---|
| 141 | 186 | MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); |
|---|
| 142 | 187 | MLX5_SET(query_pages_in, in, op_mod, boot ? |
|---|
| 143 | 188 | MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : |
|---|
| 144 | 189 | MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); |
|---|
| 190 | + MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); |
|---|
| 145 | 191 | |
|---|
| 146 | | - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
|---|
| 192 | + err = mlx5_cmd_exec_inout(dev, query_pages, in, out); |
|---|
| 147 | 193 | if (err) |
|---|
| 148 | 194 | return err; |
|---|
| 149 | 195 | |
|---|
| .. | .. |
|---|
| 153 | 199 | return err; |
|---|
| 154 | 200 | } |
|---|
| 155 | 201 | |
|---|
| 156 | | -static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) |
|---|
| 202 | +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function) |
|---|
| 157 | 203 | { |
|---|
| 158 | | - struct fw_page *fp; |
|---|
| 204 | + struct fw_page *fp = NULL; |
|---|
| 205 | + struct fw_page *iter; |
|---|
| 159 | 206 | unsigned n; |
|---|
| 160 | 207 | |
|---|
| 161 | | - if (list_empty(&dev->priv.free_list)) |
|---|
| 208 | + list_for_each_entry(iter, &dev->priv.free_list, list) { |
|---|
| 209 | + if (iter->function != function) |
|---|
| 210 | + continue; |
|---|
| 211 | + fp = iter; |
|---|
| 212 | + } |
|---|
| 213 | + |
|---|
| 214 | + if (list_empty(&dev->priv.free_list) || !fp) |
|---|
| 162 | 215 | return -ENOMEM; |
|---|
| 163 | 216 | |
|---|
| 164 | | - fp = list_entry(dev->priv.free_list.next, struct fw_page, list); |
|---|
| 165 | 217 | n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); |
|---|
| 166 | 218 | if (n >= MLX5_NUM_4K_IN_PAGE) { |
|---|
| 167 | 219 | mlx5_core_warn(dev, "alloc 4k bug\n"); |
|---|
| .. | .. |
|---|
| 179 | 231 | |
|---|
| 180 | 232 | #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) |
|---|
| 181 | 233 | |
|---|
| 182 | | -static void free_4k(struct mlx5_core_dev *dev, u64 addr) |
|---|
| 234 | +static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, |
|---|
| 235 | + bool in_free_list) |
|---|
| 236 | +{ |
|---|
| 237 | + struct rb_root *root; |
|---|
| 238 | + |
|---|
| 239 | + root = xa_load(&dev->priv.page_root_xa, fwp->function); |
|---|
| 240 | + if (WARN_ON_ONCE(!root)) |
|---|
| 241 | + return; |
|---|
| 242 | + |
|---|
| 243 | + rb_erase(&fwp->rb_node, root); |
|---|
| 244 | + if (in_free_list) |
|---|
| 245 | + list_del(&fwp->list); |
|---|
| 246 | + dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK, |
|---|
| 247 | + PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 248 | + __free_page(fwp->page); |
|---|
| 249 | + kfree(fwp); |
|---|
| 250 | +} |
|---|
| 251 | + |
|---|
| 252 | +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function) |
|---|
| 183 | 253 | { |
|---|
| 184 | 254 | struct fw_page *fwp; |
|---|
| 185 | 255 | int n; |
|---|
| 186 | 256 | |
|---|
| 187 | | - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); |
|---|
| 257 | + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function); |
|---|
| 188 | 258 | if (!fwp) { |
|---|
| 189 | | - mlx5_core_warn(dev, "page not found\n"); |
|---|
| 259 | + mlx5_core_warn_rl(dev, "page not found\n"); |
|---|
| 190 | 260 | return; |
|---|
| 191 | 261 | } |
|---|
| 192 | | - |
|---|
| 193 | 262 | n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; |
|---|
| 194 | 263 | fwp->free_count++; |
|---|
| 195 | 264 | set_bit(n, &fwp->bitmask); |
|---|
| 196 | | - if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { |
|---|
| 197 | | - rb_erase(&fwp->rb_node, &dev->priv.page_root); |
|---|
| 198 | | - if (fwp->free_count != 1) |
|---|
| 199 | | - list_del(&fwp->list); |
|---|
| 200 | | - dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, |
|---|
| 201 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 202 | | - __free_page(fwp->page); |
|---|
| 203 | | - kfree(fwp); |
|---|
| 204 | | - } else if (fwp->free_count == 1) { |
|---|
| 265 | + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) |
|---|
| 266 | + free_fwp(dev, fwp, fwp->free_count != 1); |
|---|
| 267 | + else if (fwp->free_count == 1) |
|---|
| 205 | 268 | list_add(&fwp->list, &dev->priv.free_list); |
|---|
| 206 | | - } |
|---|
| 207 | 269 | } |
|---|
| 208 | 270 | |
|---|
| 209 | | -static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) |
|---|
| 271 | +static int alloc_system_page(struct mlx5_core_dev *dev, u32 function) |
|---|
| 210 | 272 | { |
|---|
| 273 | + struct device *device = mlx5_core_dma_dev(dev); |
|---|
| 274 | + int nid = dev_to_node(device); |
|---|
| 211 | 275 | struct page *page; |
|---|
| 212 | 276 | u64 zero_addr = 1; |
|---|
| 213 | 277 | u64 addr; |
|---|
| 214 | 278 | int err; |
|---|
| 215 | | - int nid = dev_to_node(&dev->pdev->dev); |
|---|
| 216 | 279 | |
|---|
| 217 | 280 | page = alloc_pages_node(nid, GFP_HIGHUSER, 0); |
|---|
| 218 | 281 | if (!page) { |
|---|
| .. | .. |
|---|
| 220 | 283 | return -ENOMEM; |
|---|
| 221 | 284 | } |
|---|
| 222 | 285 | map: |
|---|
| 223 | | - addr = dma_map_page(&dev->pdev->dev, page, 0, |
|---|
| 224 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 225 | | - if (dma_mapping_error(&dev->pdev->dev, addr)) { |
|---|
| 286 | + addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 287 | + if (dma_mapping_error(device, addr)) { |
|---|
| 226 | 288 | mlx5_core_warn(dev, "failed dma mapping page\n"); |
|---|
| 227 | 289 | err = -ENOMEM; |
|---|
| 228 | 290 | goto err_mapping; |
|---|
| .. | .. |
|---|
| 234 | 296 | goto map; |
|---|
| 235 | 297 | } |
|---|
| 236 | 298 | |
|---|
| 237 | | - err = insert_page(dev, addr, page, func_id); |
|---|
| 299 | + err = insert_page(dev, addr, page, function); |
|---|
| 238 | 300 | if (err) { |
|---|
| 239 | 301 | mlx5_core_err(dev, "failed to track allocated page\n"); |
|---|
| 240 | | - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, |
|---|
| 241 | | - DMA_BIDIRECTIONAL); |
|---|
| 302 | + dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 242 | 303 | } |
|---|
| 243 | 304 | |
|---|
| 244 | 305 | err_mapping: |
|---|
| .. | .. |
|---|
| 246 | 307 | __free_page(page); |
|---|
| 247 | 308 | |
|---|
| 248 | 309 | if (zero_addr == 0) |
|---|
| 249 | | - dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, |
|---|
| 310 | + dma_unmap_page(device, zero_addr, PAGE_SIZE, |
|---|
| 250 | 311 | DMA_BIDIRECTIONAL); |
|---|
| 251 | 312 | |
|---|
| 252 | 313 | return err; |
|---|
| 253 | 314 | } |
|---|
| 254 | 315 | |
|---|
| 255 | | -static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) |
|---|
| 316 | +static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, |
|---|
| 317 | + bool ec_function) |
|---|
| 256 | 318 | { |
|---|
| 257 | | - u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
|---|
| 258 | | - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; |
|---|
| 319 | + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
|---|
| 259 | 320 | int err; |
|---|
| 260 | 321 | |
|---|
| 261 | 322 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
|---|
| 262 | 323 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); |
|---|
| 263 | 324 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
|---|
| 325 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
|---|
| 264 | 326 | |
|---|
| 265 | | - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
|---|
| 327 | + err = mlx5_cmd_exec_in(dev, manage_pages, in); |
|---|
| 266 | 328 | if (err) |
|---|
| 267 | 329 | mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", |
|---|
| 268 | 330 | func_id, err); |
|---|
| 269 | 331 | } |
|---|
| 270 | 332 | |
|---|
| 271 | 333 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
|---|
| 272 | | - int notify_fail) |
|---|
| 334 | + int notify_fail, bool ec_function) |
|---|
| 273 | 335 | { |
|---|
| 336 | + u32 function = get_function(func_id, ec_function); |
|---|
| 274 | 337 | u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
|---|
| 275 | 338 | int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); |
|---|
| 276 | 339 | u64 addr; |
|---|
| .. | .. |
|---|
| 288 | 351 | |
|---|
| 289 | 352 | for (i = 0; i < npages; i++) { |
|---|
| 290 | 353 | retry: |
|---|
| 291 | | - err = alloc_4k(dev, &addr); |
|---|
| 354 | + err = alloc_4k(dev, &addr, function); |
|---|
| 292 | 355 | if (err) { |
|---|
| 293 | 356 | if (err == -ENOMEM) |
|---|
| 294 | | - err = alloc_system_page(dev, func_id); |
|---|
| 357 | + err = alloc_system_page(dev, function); |
|---|
| 295 | 358 | if (err) |
|---|
| 296 | 359 | goto out_4k; |
|---|
| 297 | 360 | |
|---|
| .. | .. |
|---|
| 304 | 367 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); |
|---|
| 305 | 368 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
|---|
| 306 | 369 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); |
|---|
| 370 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
|---|
| 307 | 371 | |
|---|
| 308 | 372 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
|---|
| 309 | 373 | if (err) { |
|---|
| .. | .. |
|---|
| 315 | 379 | dev->priv.fw_pages += npages; |
|---|
| 316 | 380 | if (func_id) |
|---|
| 317 | 381 | dev->priv.vfs_pages += npages; |
|---|
| 382 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
|---|
| 383 | + dev->priv.peer_pf_pages += npages; |
|---|
| 318 | 384 | |
|---|
| 319 | | - mlx5_core_dbg(dev, "err %d\n", err); |
|---|
| 385 | + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", |
|---|
| 386 | + npages, ec_function, func_id, err); |
|---|
| 320 | 387 | |
|---|
| 321 | 388 | kvfree(in); |
|---|
| 322 | 389 | return 0; |
|---|
| 323 | 390 | |
|---|
| 324 | 391 | out_4k: |
|---|
| 325 | 392 | for (i--; i >= 0; i--) |
|---|
| 326 | | - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); |
|---|
| 393 | + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); |
|---|
| 327 | 394 | out_free: |
|---|
| 328 | 395 | kvfree(in); |
|---|
| 329 | 396 | if (notify_fail) |
|---|
| 330 | | - page_notify_fail(dev, func_id); |
|---|
| 397 | + page_notify_fail(dev, func_id, ec_function); |
|---|
| 331 | 398 | return err; |
|---|
| 399 | +} |
|---|
| 400 | + |
|---|
| 401 | +static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, |
|---|
| 402 | + bool ec_function) |
|---|
| 403 | +{ |
|---|
| 404 | + u32 function = get_function(func_id, ec_function); |
|---|
| 405 | + struct rb_root *root; |
|---|
| 406 | + struct rb_node *p; |
|---|
| 407 | + int npages = 0; |
|---|
| 408 | + |
|---|
| 409 | + root = xa_load(&dev->priv.page_root_xa, function); |
|---|
| 410 | + if (WARN_ON_ONCE(!root)) |
|---|
| 411 | + return; |
|---|
| 412 | + |
|---|
| 413 | + p = rb_first(root); |
|---|
| 414 | + while (p) { |
|---|
| 415 | + struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); |
|---|
| 416 | + |
|---|
| 417 | + p = rb_next(p); |
|---|
| 418 | + npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); |
|---|
| 419 | + free_fwp(dev, fwp, fwp->free_count); |
|---|
| 420 | + } |
|---|
| 421 | + |
|---|
| 422 | + dev->priv.fw_pages -= npages; |
|---|
| 423 | + if (func_id) |
|---|
| 424 | + dev->priv.vfs_pages -= npages; |
|---|
| 425 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
|---|
| 426 | + dev->priv.peer_pf_pages -= npages; |
|---|
| 427 | + |
|---|
| 428 | + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", |
|---|
| 429 | + npages, ec_function, func_id); |
|---|
| 332 | 430 | } |
|---|
| 333 | 431 | |
|---|
| 334 | 432 | static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, |
|---|
| .. | .. |
|---|
| 352 | 450 | static int reclaim_pages_cmd(struct mlx5_core_dev *dev, |
|---|
| 353 | 451 | u32 *in, int in_size, u32 *out, int out_size) |
|---|
| 354 | 452 | { |
|---|
| 453 | + struct rb_root *root; |
|---|
| 355 | 454 | struct fw_page *fwp; |
|---|
| 356 | 455 | struct rb_node *p; |
|---|
| 456 | + bool ec_function; |
|---|
| 357 | 457 | u32 func_id; |
|---|
| 358 | 458 | u32 npages; |
|---|
| 359 | 459 | u32 i = 0; |
|---|
| 360 | 460 | |
|---|
| 361 | | - if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) |
|---|
| 461 | + if (!mlx5_cmd_is_down(dev)) |
|---|
| 362 | 462 | return mlx5_cmd_exec(dev, in, in_size, out, out_size); |
|---|
| 363 | 463 | |
|---|
| 364 | 464 | /* No hard feelings, we want our pages back! */ |
|---|
| 365 | 465 | npages = MLX5_GET(manage_pages_in, in, input_num_entries); |
|---|
| 366 | 466 | func_id = MLX5_GET(manage_pages_in, in, function_id); |
|---|
| 467 | + ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function); |
|---|
| 367 | 468 | |
|---|
| 368 | | - p = rb_first(&dev->priv.page_root); |
|---|
| 469 | + root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); |
|---|
| 470 | + if (WARN_ON_ONCE(!root)) |
|---|
| 471 | + return -EEXIST; |
|---|
| 472 | + |
|---|
| 473 | + p = rb_first(root); |
|---|
| 369 | 474 | while (p && i < npages) { |
|---|
| 370 | 475 | fwp = rb_entry(p, struct fw_page, rb_node); |
|---|
| 371 | 476 | p = rb_next(p); |
|---|
| 372 | | - if (fwp->func_id != func_id) |
|---|
| 373 | | - continue; |
|---|
| 374 | 477 | |
|---|
| 375 | 478 | i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); |
|---|
| 376 | 479 | } |
|---|
| .. | .. |
|---|
| 379 | 482 | return 0; |
|---|
| 380 | 483 | } |
|---|
| 381 | 484 | |
|---|
| 382 | | -static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, |
|---|
| 383 | | - int *nclaimed) |
|---|
| 485 | +static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
|---|
| 486 | + int *nclaimed, bool ec_function) |
|---|
| 384 | 487 | { |
|---|
| 488 | + u32 function = get_function(func_id, ec_function); |
|---|
| 385 | 489 | int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); |
|---|
| 386 | | - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; |
|---|
| 490 | + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
|---|
| 387 | 491 | int num_claimed; |
|---|
| 388 | 492 | u32 *out; |
|---|
| 389 | 493 | int err; |
|---|
| .. | .. |
|---|
| 401 | 505 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); |
|---|
| 402 | 506 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
|---|
| 403 | 507 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); |
|---|
| 508 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
|---|
| 404 | 509 | |
|---|
| 405 | | - mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
|---|
| 510 | + mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", |
|---|
| 511 | + func_id, npages, outlen); |
|---|
| 406 | 512 | err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); |
|---|
| 407 | 513 | if (err) { |
|---|
| 408 | 514 | mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); |
|---|
| .. | .. |
|---|
| 418 | 524 | } |
|---|
| 419 | 525 | |
|---|
| 420 | 526 | for (i = 0; i < num_claimed; i++) |
|---|
| 421 | | - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); |
|---|
| 527 | + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function); |
|---|
| 422 | 528 | |
|---|
| 423 | 529 | if (nclaimed) |
|---|
| 424 | 530 | *nclaimed = num_claimed; |
|---|
| .. | .. |
|---|
| 426 | 532 | dev->priv.fw_pages -= num_claimed; |
|---|
| 427 | 533 | if (func_id) |
|---|
| 428 | 534 | dev->priv.vfs_pages -= num_claimed; |
|---|
| 535 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
|---|
| 536 | + dev->priv.peer_pf_pages -= num_claimed; |
|---|
| 429 | 537 | |
|---|
| 430 | 538 | out_free: |
|---|
| 431 | 539 | kvfree(out); |
|---|
| .. | .. |
|---|
| 438 | 546 | struct mlx5_core_dev *dev = req->dev; |
|---|
| 439 | 547 | int err = 0; |
|---|
| 440 | 548 | |
|---|
| 441 | | - if (req->npages < 0) |
|---|
| 442 | | - err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); |
|---|
| 549 | + if (req->release_all) |
|---|
| 550 | + release_all_pages(dev, req->func_id, req->ec_function); |
|---|
| 551 | + else if (req->npages < 0) |
|---|
| 552 | + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, |
|---|
| 553 | + req->ec_function); |
|---|
| 443 | 554 | else if (req->npages > 0) |
|---|
| 444 | | - err = give_pages(dev, req->func_id, req->npages, 1); |
|---|
| 555 | + err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); |
|---|
| 445 | 556 | |
|---|
| 446 | 557 | if (err) |
|---|
| 447 | 558 | mlx5_core_warn(dev, "%s fail %d\n", |
|---|
| .. | .. |
|---|
| 450 | 561 | kfree(req); |
|---|
| 451 | 562 | } |
|---|
| 452 | 563 | |
|---|
| 453 | | -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
|---|
| 454 | | - s32 npages) |
|---|
| 564 | +enum { |
|---|
| 565 | + EC_FUNCTION_MASK = 0x8000, |
|---|
| 566 | + RELEASE_ALL_PAGES_MASK = 0x4000, |
|---|
| 567 | +}; |
|---|
| 568 | + |
|---|
| 569 | +static int req_pages_handler(struct notifier_block *nb, |
|---|
| 570 | + unsigned long type, void *data) |
|---|
| 455 | 571 | { |
|---|
| 456 | 572 | struct mlx5_pages_req *req; |
|---|
| 573 | + struct mlx5_core_dev *dev; |
|---|
| 574 | + struct mlx5_priv *priv; |
|---|
| 575 | + struct mlx5_eqe *eqe; |
|---|
| 576 | + bool ec_function; |
|---|
| 577 | + bool release_all; |
|---|
| 578 | + u16 func_id; |
|---|
| 579 | + s32 npages; |
|---|
| 457 | 580 | |
|---|
| 581 | + priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb); |
|---|
| 582 | + dev = container_of(priv, struct mlx5_core_dev, priv); |
|---|
| 583 | + eqe = data; |
|---|
| 584 | + |
|---|
| 585 | + func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
|---|
| 586 | + npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
|---|
| 587 | + ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; |
|---|
| 588 | + release_all = be16_to_cpu(eqe->data.req_pages.ec_function) & |
|---|
| 589 | + RELEASE_ALL_PAGES_MASK; |
|---|
| 590 | + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n", |
|---|
| 591 | + func_id, npages, release_all); |
|---|
| 458 | 592 | req = kzalloc(sizeof(*req), GFP_ATOMIC); |
|---|
| 459 | 593 | if (!req) { |
|---|
| 460 | 594 | mlx5_core_warn(dev, "failed to allocate pages request\n"); |
|---|
| 461 | | - return; |
|---|
| 595 | + return NOTIFY_DONE; |
|---|
| 462 | 596 | } |
|---|
| 463 | 597 | |
|---|
| 464 | 598 | req->dev = dev; |
|---|
| 465 | 599 | req->func_id = func_id; |
|---|
| 466 | 600 | req->npages = npages; |
|---|
| 601 | + req->ec_function = ec_function; |
|---|
| 602 | + req->release_all = release_all; |
|---|
| 467 | 603 | INIT_WORK(&req->work, pages_work_handler); |
|---|
| 468 | 604 | queue_work(dev->priv.pg_wq, &req->work); |
|---|
| 605 | + return NOTIFY_OK; |
|---|
| 469 | 606 | } |
|---|
| 470 | 607 | |
|---|
| 471 | 608 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
|---|
| 472 | 609 | { |
|---|
| 473 | | - u16 uninitialized_var(func_id); |
|---|
| 474 | | - s32 uninitialized_var(npages); |
|---|
| 610 | + u16 func_id; |
|---|
| 611 | + s32 npages; |
|---|
| 475 | 612 | int err; |
|---|
| 476 | 613 | |
|---|
| 477 | 614 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
|---|
| .. | .. |
|---|
| 481 | 618 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
|---|
| 482 | 619 | npages, boot ? "boot" : "init", func_id); |
|---|
| 483 | 620 | |
|---|
| 484 | | - return give_pages(dev, func_id, npages, 0); |
|---|
| 621 | + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); |
|---|
| 485 | 622 | } |
|---|
| 486 | 623 | |
|---|
| 487 | 624 | enum { |
|---|
| .. | .. |
|---|
| 501 | 638 | return ret; |
|---|
| 502 | 639 | } |
|---|
| 503 | 640 | |
|---|
| 504 | | -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
|---|
| 641 | +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, |
|---|
| 642 | + struct rb_root *root, u16 func_id) |
|---|
| 505 | 643 | { |
|---|
| 506 | 644 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
|---|
| 507 | | - struct fw_page *fwp; |
|---|
| 508 | | - struct rb_node *p; |
|---|
| 509 | | - int nclaimed = 0; |
|---|
| 510 | | - int err = 0; |
|---|
| 511 | 645 | |
|---|
| 512 | | - do { |
|---|
| 513 | | - p = rb_first(&dev->priv.page_root); |
|---|
| 514 | | - if (p) { |
|---|
| 515 | | - fwp = rb_entry(p, struct fw_page, rb_node); |
|---|
| 516 | | - err = reclaim_pages(dev, fwp->func_id, |
|---|
| 517 | | - optimal_reclaimed_pages(), |
|---|
| 518 | | - &nclaimed); |
|---|
| 646 | + while (!RB_EMPTY_ROOT(root)) { |
|---|
| 647 | + int nclaimed; |
|---|
| 648 | + int err; |
|---|
| 519 | 649 | |
|---|
| 520 | | - if (err) { |
|---|
| 521 | | - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", |
|---|
| 522 | | - err); |
|---|
| 523 | | - return err; |
|---|
| 524 | | - } |
|---|
| 525 | | - if (nclaimed) |
|---|
| 526 | | - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
|---|
| 650 | + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), |
|---|
| 651 | + &nclaimed, mlx5_core_is_ecpf(dev)); |
|---|
| 652 | + if (err) { |
|---|
| 653 | + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", |
|---|
| 654 | + err, func_id); |
|---|
| 655 | + return err; |
|---|
| 527 | 656 | } |
|---|
| 657 | + |
|---|
| 658 | + if (nclaimed) |
|---|
| 659 | + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
|---|
| 660 | + |
|---|
| 528 | 661 | if (time_after(jiffies, end)) { |
|---|
| 529 | 662 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); |
|---|
| 530 | 663 | break; |
|---|
| 531 | 664 | } |
|---|
| 532 | | - } while (p); |
|---|
| 665 | + } |
|---|
| 666 | + |
|---|
| 667 | + return 0; |
|---|
| 668 | +} |
|---|
| 669 | + |
|---|
| 670 | +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
|---|
| 671 | +{ |
|---|
| 672 | + struct rb_root *root; |
|---|
| 673 | + unsigned long id; |
|---|
| 674 | + void *entry; |
|---|
| 675 | + |
|---|
| 676 | + xa_for_each(&dev->priv.page_root_xa, id, entry) { |
|---|
| 677 | + root = entry; |
|---|
| 678 | + mlx5_reclaim_root_pages(dev, root, id); |
|---|
| 679 | + xa_erase(&dev->priv.page_root_xa, id); |
|---|
| 680 | + kfree(root); |
|---|
| 681 | + } |
|---|
| 682 | + |
|---|
| 683 | + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); |
|---|
| 533 | 684 | |
|---|
| 534 | 685 | WARN(dev->priv.fw_pages, |
|---|
| 535 | 686 | "FW pages counter is %d after reclaiming all pages\n", |
|---|
| .. | .. |
|---|
| 537 | 688 | WARN(dev->priv.vfs_pages, |
|---|
| 538 | 689 | "VFs FW pages counter is %d after reclaiming all pages\n", |
|---|
| 539 | 690 | dev->priv.vfs_pages); |
|---|
| 691 | + WARN(dev->priv.peer_pf_pages, |
|---|
| 692 | + "Peer PF FW pages counter is %d after reclaiming all pages\n", |
|---|
| 693 | + dev->priv.peer_pf_pages); |
|---|
| 540 | 694 | |
|---|
| 541 | 695 | return 0; |
|---|
| 542 | 696 | } |
|---|
| 543 | 697 | |
|---|
| 544 | | -void mlx5_pagealloc_init(struct mlx5_core_dev *dev) |
|---|
| 698 | +int mlx5_pagealloc_init(struct mlx5_core_dev *dev) |
|---|
| 545 | 699 | { |
|---|
| 546 | | - dev->priv.page_root = RB_ROOT; |
|---|
| 547 | 700 | INIT_LIST_HEAD(&dev->priv.free_list); |
|---|
| 548 | | -} |
|---|
| 549 | | - |
|---|
| 550 | | -void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) |
|---|
| 551 | | -{ |
|---|
| 552 | | - /* nothing */ |
|---|
| 553 | | -} |
|---|
| 554 | | - |
|---|
| 555 | | -int mlx5_pagealloc_start(struct mlx5_core_dev *dev) |
|---|
| 556 | | -{ |
|---|
| 557 | 701 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); |
|---|
| 558 | 702 | if (!dev->priv.pg_wq) |
|---|
| 559 | 703 | return -ENOMEM; |
|---|
| 560 | 704 | |
|---|
| 705 | + xa_init(&dev->priv.page_root_xa); |
|---|
| 706 | + |
|---|
| 561 | 707 | return 0; |
|---|
| 708 | +} |
|---|
| 709 | + |
|---|
| 710 | +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) |
|---|
| 711 | +{ |
|---|
| 712 | + xa_destroy(&dev->priv.page_root_xa); |
|---|
| 713 | + destroy_workqueue(dev->priv.pg_wq); |
|---|
| 714 | +} |
|---|
| 715 | + |
|---|
| 716 | +void mlx5_pagealloc_start(struct mlx5_core_dev *dev) |
|---|
| 717 | +{ |
|---|
| 718 | + MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST); |
|---|
| 719 | + mlx5_eq_notifier_register(dev, &dev->priv.pg_nb); |
|---|
| 562 | 720 | } |
|---|
| 563 | 721 | |
|---|
| 564 | 722 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) |
|---|
| 565 | 723 | { |
|---|
| 566 | | - destroy_workqueue(dev->priv.pg_wq); |
|---|
| 724 | + mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb); |
|---|
| 725 | + flush_workqueue(dev->priv.pg_wq); |
|---|
| 567 | 726 | } |
|---|
| 568 | 727 | |
|---|
| 569 | | -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) |
|---|
| 728 | +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) |
|---|
| 570 | 729 | { |
|---|
| 571 | 730 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); |
|---|
| 572 | | - int prev_vfs_pages = dev->priv.vfs_pages; |
|---|
| 731 | + int prev_pages = *pages; |
|---|
| 573 | 732 | |
|---|
| 574 | 733 | /* In case of internal error we will free the pages manually later */ |
|---|
| 575 | 734 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
|---|
| .. | .. |
|---|
| 577 | 736 | return 0; |
|---|
| 578 | 737 | } |
|---|
| 579 | 738 | |
|---|
| 580 | | - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, |
|---|
| 581 | | - dev->priv.name); |
|---|
| 582 | | - while (dev->priv.vfs_pages) { |
|---|
| 739 | + mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); |
|---|
| 740 | + while (*pages) { |
|---|
| 583 | 741 | if (time_after(jiffies, end)) { |
|---|
| 584 | | - mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); |
|---|
| 742 | + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); |
|---|
| 585 | 743 | return -ETIMEDOUT; |
|---|
| 586 | 744 | } |
|---|
| 587 | | - if (dev->priv.vfs_pages < prev_vfs_pages) { |
|---|
| 745 | + if (*pages < prev_pages) { |
|---|
| 588 | 746 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); |
|---|
| 589 | | - prev_vfs_pages = dev->priv.vfs_pages; |
|---|
| 747 | + prev_pages = *pages; |
|---|
| 590 | 748 | } |
|---|
| 591 | 749 | msleep(50); |
|---|
| 592 | 750 | } |
|---|
| 593 | 751 | |
|---|
| 594 | | - mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); |
|---|
| 752 | + mlx5_core_dbg(dev, "All pages received\n"); |
|---|
| 595 | 753 | return 0; |
|---|
| 596 | 754 | } |
|---|