.. | .. |
---|
35 | 35 | #include <linux/module.h> |
---|
36 | 36 | #include <linux/delay.h> |
---|
37 | 37 | #include <linux/mlx5/driver.h> |
---|
38 | | -#include <linux/mlx5/cmd.h> |
---|
| 38 | +#include <linux/xarray.h> |
---|
39 | 39 | #include "mlx5_core.h" |
---|
| 40 | +#include "lib/eq.h" |
---|
40 | 41 | |
---|
41 | 42 | enum { |
---|
42 | 43 | MLX5_PAGES_CANT_GIVE = 0, |
---|
.. | .. |
---|
47 | 48 | struct mlx5_pages_req { |
---|
48 | 49 | struct mlx5_core_dev *dev; |
---|
49 | 50 | u16 func_id; |
---|
| 51 | + u8 ec_function; |
---|
50 | 52 | s32 npages; |
---|
51 | 53 | struct work_struct work; |
---|
| 54 | + u8 release_all; |
---|
52 | 55 | }; |
---|
53 | 56 | |
---|
54 | 57 | struct fw_page { |
---|
55 | 58 | struct rb_node rb_node; |
---|
56 | 59 | u64 addr; |
---|
57 | 60 | struct page *page; |
---|
58 | | - u16 func_id; |
---|
| 61 | + u32 function; |
---|
59 | 62 | unsigned long bitmask; |
---|
60 | 63 | struct list_head list; |
---|
61 | 64 | unsigned free_count; |
---|
.. | .. |
---|
71 | 74 | MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
---|
72 | 75 | }; |
---|
73 | 76 | |
---|
74 | | -static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
---|
| 77 | +static u32 get_function(u16 func_id, bool ec_function) |
---|
75 | 78 | { |
---|
76 | | - struct rb_root *root = &dev->priv.page_root; |
---|
77 | | - struct rb_node **new = &root->rb_node; |
---|
| 79 | + return (u32)func_id | (ec_function << 16); |
---|
| 80 | +} |
---|
| 81 | + |
---|
| 82 | +static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) |
---|
| 83 | +{ |
---|
| 84 | + struct rb_root *root; |
---|
| 85 | + int err; |
---|
| 86 | + |
---|
| 87 | + root = xa_load(&dev->priv.page_root_xa, function); |
---|
| 88 | + if (root) |
---|
| 89 | + return root; |
---|
| 90 | + |
---|
| 91 | + root = kzalloc(sizeof(*root), GFP_KERNEL); |
---|
| 92 | + if (!root) |
---|
| 93 | + return ERR_PTR(-ENOMEM); |
---|
| 94 | + |
---|
| 95 | + err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL); |
---|
| 96 | + if (err) { |
---|
| 97 | + kfree(root); |
---|
| 98 | + return ERR_PTR(err); |
---|
| 99 | + } |
---|
| 100 | + |
---|
| 101 | + *root = RB_ROOT; |
---|
| 102 | + |
---|
| 103 | + return root; |
---|
| 104 | +} |
---|
| 105 | + |
---|
| 106 | +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function) |
---|
| 107 | +{ |
---|
78 | 108 | struct rb_node *parent = NULL; |
---|
| 109 | + struct rb_root *root; |
---|
| 110 | + struct rb_node **new; |
---|
79 | 111 | struct fw_page *nfp; |
---|
80 | 112 | struct fw_page *tfp; |
---|
81 | 113 | int i; |
---|
| 114 | + |
---|
| 115 | + root = page_root_per_function(dev, function); |
---|
| 116 | + if (IS_ERR(root)) |
---|
| 117 | + return PTR_ERR(root); |
---|
| 118 | + |
---|
| 119 | + new = &root->rb_node; |
---|
82 | 120 | |
---|
83 | 121 | while (*new) { |
---|
84 | 122 | parent = *new; |
---|
.. | .. |
---|
97 | 135 | |
---|
98 | 136 | nfp->addr = addr; |
---|
99 | 137 | nfp->page = page; |
---|
100 | | - nfp->func_id = func_id; |
---|
| 138 | + nfp->function = function; |
---|
101 | 139 | nfp->free_count = MLX5_NUM_4K_IN_PAGE; |
---|
102 | 140 | for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) |
---|
103 | 141 | set_bit(i, &nfp->bitmask); |
---|
.. | .. |
---|
109 | 147 | return 0; |
---|
110 | 148 | } |
---|
111 | 149 | |
---|
112 | | -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) |
---|
| 150 | +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, |
---|
| 151 | + u32 function) |
---|
113 | 152 | { |
---|
114 | | - struct rb_root *root = &dev->priv.page_root; |
---|
115 | | - struct rb_node *tmp = root->rb_node; |
---|
116 | 153 | struct fw_page *result = NULL; |
---|
| 154 | + struct rb_root *root; |
---|
| 155 | + struct rb_node *tmp; |
---|
117 | 156 | struct fw_page *tfp; |
---|
| 157 | + |
---|
| 158 | + root = xa_load(&dev->priv.page_root_xa, function); |
---|
| 159 | + if (WARN_ON_ONCE(!root)) |
---|
| 160 | + return NULL; |
---|
| 161 | + |
---|
| 162 | + tmp = root->rb_node; |
---|
118 | 163 | |
---|
119 | 164 | while (tmp) { |
---|
120 | 165 | tfp = rb_entry(tmp, struct fw_page, rb_node); |
---|
.. | .. |
---|
134 | 179 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
---|
135 | 180 | s32 *npages, int boot) |
---|
136 | 181 | { |
---|
137 | | - u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; |
---|
138 | | - u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; |
---|
| 182 | + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {}; |
---|
| 183 | + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {}; |
---|
139 | 184 | int err; |
---|
140 | 185 | |
---|
141 | 186 | MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); |
---|
142 | 187 | MLX5_SET(query_pages_in, in, op_mod, boot ? |
---|
143 | 188 | MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : |
---|
144 | 189 | MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); |
---|
| 190 | + MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); |
---|
145 | 191 | |
---|
146 | | - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 192 | + err = mlx5_cmd_exec_inout(dev, query_pages, in, out); |
---|
147 | 193 | if (err) |
---|
148 | 194 | return err; |
---|
149 | 195 | |
---|
.. | .. |
---|
153 | 199 | return err; |
---|
154 | 200 | } |
---|
155 | 201 | |
---|
156 | | -static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) |
---|
| 202 | +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function) |
---|
157 | 203 | { |
---|
158 | | - struct fw_page *fp; |
---|
| 204 | + struct fw_page *fp = NULL; |
---|
| 205 | + struct fw_page *iter; |
---|
159 | 206 | unsigned n; |
---|
160 | 207 | |
---|
161 | | - if (list_empty(&dev->priv.free_list)) |
---|
| 208 | + list_for_each_entry(iter, &dev->priv.free_list, list) { |
---|
| 209 | + if (iter->function != function) |
---|
| 210 | + continue; |
---|
| 211 | + fp = iter; |
---|
| 212 | + } |
---|
| 213 | + |
---|
| 214 | + if (list_empty(&dev->priv.free_list) || !fp) |
---|
162 | 215 | return -ENOMEM; |
---|
163 | 216 | |
---|
164 | | - fp = list_entry(dev->priv.free_list.next, struct fw_page, list); |
---|
165 | 217 | n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); |
---|
166 | 218 | if (n >= MLX5_NUM_4K_IN_PAGE) { |
---|
167 | | - mlx5_core_warn(dev, "alloc 4k bug\n"); |
---|
| 219 | + mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n", |
---|
| 220 | + fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE); |
---|
168 | 221 | return -ENOENT; |
---|
169 | 222 | } |
---|
170 | 223 | clear_bit(n, &fp->bitmask); |
---|
.. | .. |
---|
179 | 232 | |
---|
180 | 233 | #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) |
---|
181 | 234 | |
---|
182 | | -static void free_4k(struct mlx5_core_dev *dev, u64 addr) |
---|
| 235 | +static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, |
---|
| 236 | + bool in_free_list) |
---|
| 237 | +{ |
---|
| 238 | + struct rb_root *root; |
---|
| 239 | + |
---|
| 240 | + root = xa_load(&dev->priv.page_root_xa, fwp->function); |
---|
| 241 | + if (WARN_ON_ONCE(!root)) |
---|
| 242 | + return; |
---|
| 243 | + |
---|
| 244 | + rb_erase(&fwp->rb_node, root); |
---|
| 245 | + if (in_free_list) |
---|
| 246 | + list_del(&fwp->list); |
---|
| 247 | + dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK, |
---|
| 248 | + PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
| 249 | + __free_page(fwp->page); |
---|
| 250 | + kfree(fwp); |
---|
| 251 | +} |
---|
| 252 | + |
---|
| 253 | +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function) |
---|
183 | 254 | { |
---|
184 | 255 | struct fw_page *fwp; |
---|
185 | 256 | int n; |
---|
186 | 257 | |
---|
187 | | - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); |
---|
| 258 | + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function); |
---|
188 | 259 | if (!fwp) { |
---|
189 | | - mlx5_core_warn(dev, "page not found\n"); |
---|
| 260 | + mlx5_core_warn_rl(dev, "page not found\n"); |
---|
190 | 261 | return; |
---|
191 | 262 | } |
---|
192 | | - |
---|
193 | 263 | n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; |
---|
194 | 264 | fwp->free_count++; |
---|
195 | 265 | set_bit(n, &fwp->bitmask); |
---|
196 | | - if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { |
---|
197 | | - rb_erase(&fwp->rb_node, &dev->priv.page_root); |
---|
198 | | - if (fwp->free_count != 1) |
---|
199 | | - list_del(&fwp->list); |
---|
200 | | - dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, |
---|
201 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
202 | | - __free_page(fwp->page); |
---|
203 | | - kfree(fwp); |
---|
204 | | - } else if (fwp->free_count == 1) { |
---|
| 266 | + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) |
---|
| 267 | + free_fwp(dev, fwp, fwp->free_count != 1); |
---|
| 268 | + else if (fwp->free_count == 1) |
---|
205 | 269 | list_add(&fwp->list, &dev->priv.free_list); |
---|
206 | | - } |
---|
207 | 270 | } |
---|
208 | 271 | |
---|
209 | | -static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) |
---|
| 272 | +static int alloc_system_page(struct mlx5_core_dev *dev, u32 function) |
---|
210 | 273 | { |
---|
| 274 | + struct device *device = mlx5_core_dma_dev(dev); |
---|
| 275 | + int nid = dev_to_node(device); |
---|
211 | 276 | struct page *page; |
---|
212 | 277 | u64 zero_addr = 1; |
---|
213 | 278 | u64 addr; |
---|
214 | 279 | int err; |
---|
215 | | - int nid = dev_to_node(&dev->pdev->dev); |
---|
216 | 280 | |
---|
217 | 281 | page = alloc_pages_node(nid, GFP_HIGHUSER, 0); |
---|
218 | 282 | if (!page) { |
---|
.. | .. |
---|
220 | 284 | return -ENOMEM; |
---|
221 | 285 | } |
---|
222 | 286 | map: |
---|
223 | | - addr = dma_map_page(&dev->pdev->dev, page, 0, |
---|
224 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
225 | | - if (dma_mapping_error(&dev->pdev->dev, addr)) { |
---|
| 287 | + addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
| 288 | + if (dma_mapping_error(device, addr)) { |
---|
226 | 289 | mlx5_core_warn(dev, "failed dma mapping page\n"); |
---|
227 | 290 | err = -ENOMEM; |
---|
228 | 291 | goto err_mapping; |
---|
.. | .. |
---|
234 | 297 | goto map; |
---|
235 | 298 | } |
---|
236 | 299 | |
---|
237 | | - err = insert_page(dev, addr, page, func_id); |
---|
| 300 | + err = insert_page(dev, addr, page, function); |
---|
238 | 301 | if (err) { |
---|
239 | 302 | mlx5_core_err(dev, "failed to track allocated page\n"); |
---|
240 | | - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, |
---|
241 | | - DMA_BIDIRECTIONAL); |
---|
| 303 | + dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
242 | 304 | } |
---|
243 | 305 | |
---|
244 | 306 | err_mapping: |
---|
.. | .. |
---|
246 | 308 | __free_page(page); |
---|
247 | 309 | |
---|
248 | 310 | if (zero_addr == 0) |
---|
249 | | - dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, |
---|
| 311 | + dma_unmap_page(device, zero_addr, PAGE_SIZE, |
---|
250 | 312 | DMA_BIDIRECTIONAL); |
---|
251 | 313 | |
---|
252 | 314 | return err; |
---|
253 | 315 | } |
---|
254 | 316 | |
---|
255 | | -static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) |
---|
| 317 | +static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, |
---|
| 318 | + bool ec_function) |
---|
256 | 319 | { |
---|
257 | | - u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
---|
258 | | - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; |
---|
| 320 | + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
---|
259 | 321 | int err; |
---|
260 | 322 | |
---|
261 | 323 | MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); |
---|
262 | 324 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); |
---|
263 | 325 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
---|
| 326 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
---|
264 | 327 | |
---|
265 | | - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 328 | + err = mlx5_cmd_exec_in(dev, manage_pages, in); |
---|
266 | 329 | if (err) |
---|
267 | 330 | mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", |
---|
268 | 331 | func_id, err); |
---|
269 | 332 | } |
---|
270 | 333 | |
---|
271 | 334 | static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
---|
272 | | - int notify_fail) |
---|
| 335 | + int notify_fail, bool ec_function) |
---|
273 | 336 | { |
---|
| 337 | + u32 function = get_function(func_id, ec_function); |
---|
274 | 338 | u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; |
---|
275 | 339 | int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); |
---|
276 | 340 | u64 addr; |
---|
.. | .. |
---|
288 | 352 | |
---|
289 | 353 | for (i = 0; i < npages; i++) { |
---|
290 | 354 | retry: |
---|
291 | | - err = alloc_4k(dev, &addr); |
---|
| 355 | + err = alloc_4k(dev, &addr, function); |
---|
292 | 356 | if (err) { |
---|
293 | 357 | if (err == -ENOMEM) |
---|
294 | | - err = alloc_system_page(dev, func_id); |
---|
| 358 | + err = alloc_system_page(dev, function); |
---|
295 | 359 | if (err) |
---|
296 | 360 | goto out_4k; |
---|
297 | 361 | |
---|
.. | .. |
---|
304 | 368 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); |
---|
305 | 369 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
---|
306 | 370 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); |
---|
| 371 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
---|
307 | 372 | |
---|
308 | 373 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
---|
309 | 374 | if (err) { |
---|
.. | .. |
---|
315 | 380 | dev->priv.fw_pages += npages; |
---|
316 | 381 | if (func_id) |
---|
317 | 382 | dev->priv.vfs_pages += npages; |
---|
| 383 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
---|
| 384 | + dev->priv.peer_pf_pages += npages; |
---|
318 | 385 | |
---|
319 | | - mlx5_core_dbg(dev, "err %d\n", err); |
---|
| 386 | + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", |
---|
| 387 | + npages, ec_function, func_id, err); |
---|
320 | 388 | |
---|
321 | 389 | kvfree(in); |
---|
322 | 390 | return 0; |
---|
323 | 391 | |
---|
324 | 392 | out_4k: |
---|
325 | 393 | for (i--; i >= 0; i--) |
---|
326 | | - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); |
---|
| 394 | + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); |
---|
327 | 395 | out_free: |
---|
328 | 396 | kvfree(in); |
---|
329 | 397 | if (notify_fail) |
---|
330 | | - page_notify_fail(dev, func_id); |
---|
| 398 | + page_notify_fail(dev, func_id, ec_function); |
---|
331 | 399 | return err; |
---|
| 400 | +} |
---|
| 401 | + |
---|
| 402 | +static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, |
---|
| 403 | + bool ec_function) |
---|
| 404 | +{ |
---|
| 405 | + u32 function = get_function(func_id, ec_function); |
---|
| 406 | + struct rb_root *root; |
---|
| 407 | + struct rb_node *p; |
---|
| 408 | + int npages = 0; |
---|
| 409 | + |
---|
| 410 | + root = xa_load(&dev->priv.page_root_xa, function); |
---|
| 411 | + if (WARN_ON_ONCE(!root)) |
---|
| 412 | + return; |
---|
| 413 | + |
---|
| 414 | + p = rb_first(root); |
---|
| 415 | + while (p) { |
---|
| 416 | + struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); |
---|
| 417 | + |
---|
| 418 | + p = rb_next(p); |
---|
| 419 | + npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); |
---|
| 420 | + free_fwp(dev, fwp, fwp->free_count); |
---|
| 421 | + } |
---|
| 422 | + |
---|
| 423 | + dev->priv.fw_pages -= npages; |
---|
| 424 | + if (func_id) |
---|
| 425 | + dev->priv.vfs_pages -= npages; |
---|
| 426 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
---|
| 427 | + dev->priv.peer_pf_pages -= npages; |
---|
| 428 | + |
---|
| 429 | + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", |
---|
| 430 | + npages, ec_function, func_id); |
---|
332 | 431 | } |
---|
333 | 432 | |
---|
334 | 433 | static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, |
---|
.. | .. |
---|
352 | 451 | static int reclaim_pages_cmd(struct mlx5_core_dev *dev, |
---|
353 | 452 | u32 *in, int in_size, u32 *out, int out_size) |
---|
354 | 453 | { |
---|
| 454 | + struct rb_root *root; |
---|
355 | 455 | struct fw_page *fwp; |
---|
356 | 456 | struct rb_node *p; |
---|
| 457 | + bool ec_function; |
---|
357 | 458 | u32 func_id; |
---|
358 | 459 | u32 npages; |
---|
359 | 460 | u32 i = 0; |
---|
360 | 461 | |
---|
361 | | - if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) |
---|
| 462 | + if (!mlx5_cmd_is_down(dev)) |
---|
362 | 463 | return mlx5_cmd_exec(dev, in, in_size, out, out_size); |
---|
363 | 464 | |
---|
364 | 465 | /* No hard feelings, we want our pages back! */ |
---|
365 | 466 | npages = MLX5_GET(manage_pages_in, in, input_num_entries); |
---|
366 | 467 | func_id = MLX5_GET(manage_pages_in, in, function_id); |
---|
| 468 | + ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function); |
---|
367 | 469 | |
---|
368 | | - p = rb_first(&dev->priv.page_root); |
---|
| 470 | + root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); |
---|
| 471 | + if (WARN_ON_ONCE(!root)) |
---|
| 472 | + return -EEXIST; |
---|
| 473 | + |
---|
| 474 | + p = rb_first(root); |
---|
369 | 475 | while (p && i < npages) { |
---|
370 | 476 | fwp = rb_entry(p, struct fw_page, rb_node); |
---|
371 | 477 | p = rb_next(p); |
---|
372 | | - if (fwp->func_id != func_id) |
---|
373 | | - continue; |
---|
374 | 478 | |
---|
375 | 479 | i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); |
---|
376 | 480 | } |
---|
.. | .. |
---|
379 | 483 | return 0; |
---|
380 | 484 | } |
---|
381 | 485 | |
---|
382 | | -static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, |
---|
383 | | - int *nclaimed) |
---|
| 486 | +static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, |
---|
| 487 | + int *nclaimed, bool ec_function) |
---|
384 | 488 | { |
---|
| 489 | + u32 function = get_function(func_id, ec_function); |
---|
385 | 490 | int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); |
---|
386 | | - u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; |
---|
| 491 | + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; |
---|
387 | 492 | int num_claimed; |
---|
388 | 493 | u32 *out; |
---|
389 | 494 | int err; |
---|
.. | .. |
---|
401 | 506 | MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); |
---|
402 | 507 | MLX5_SET(manage_pages_in, in, function_id, func_id); |
---|
403 | 508 | MLX5_SET(manage_pages_in, in, input_num_entries, npages); |
---|
| 509 | + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); |
---|
404 | 510 | |
---|
405 | | - mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
---|
| 511 | + mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", |
---|
| 512 | + func_id, npages, outlen); |
---|
406 | 513 | err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); |
---|
407 | 514 | if (err) { |
---|
408 | 515 | mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); |
---|
.. | .. |
---|
418 | 525 | } |
---|
419 | 526 | |
---|
420 | 527 | for (i = 0; i < num_claimed; i++) |
---|
421 | | - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); |
---|
| 528 | + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function); |
---|
422 | 529 | |
---|
423 | 530 | if (nclaimed) |
---|
424 | 531 | *nclaimed = num_claimed; |
---|
.. | .. |
---|
426 | 533 | dev->priv.fw_pages -= num_claimed; |
---|
427 | 534 | if (func_id) |
---|
428 | 535 | dev->priv.vfs_pages -= num_claimed; |
---|
| 536 | + else if (mlx5_core_is_ecpf(dev) && !ec_function) |
---|
| 537 | + dev->priv.peer_pf_pages -= num_claimed; |
---|
429 | 538 | |
---|
430 | 539 | out_free: |
---|
431 | 540 | kvfree(out); |
---|
.. | .. |
---|
438 | 547 | struct mlx5_core_dev *dev = req->dev; |
---|
439 | 548 | int err = 0; |
---|
440 | 549 | |
---|
441 | | - if (req->npages < 0) |
---|
442 | | - err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); |
---|
| 550 | + if (req->release_all) |
---|
| 551 | + release_all_pages(dev, req->func_id, req->ec_function); |
---|
| 552 | + else if (req->npages < 0) |
---|
| 553 | + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, |
---|
| 554 | + req->ec_function); |
---|
443 | 555 | else if (req->npages > 0) |
---|
444 | | - err = give_pages(dev, req->func_id, req->npages, 1); |
---|
| 556 | + err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); |
---|
445 | 557 | |
---|
446 | 558 | if (err) |
---|
447 | 559 | mlx5_core_warn(dev, "%s fail %d\n", |
---|
.. | .. |
---|
450 | 562 | kfree(req); |
---|
451 | 563 | } |
---|
452 | 564 | |
---|
453 | | -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
---|
454 | | - s32 npages) |
---|
| 565 | +enum { |
---|
| 566 | + EC_FUNCTION_MASK = 0x8000, |
---|
| 567 | + RELEASE_ALL_PAGES_MASK = 0x4000, |
---|
| 568 | +}; |
---|
| 569 | + |
---|
| 570 | +static int req_pages_handler(struct notifier_block *nb, |
---|
| 571 | + unsigned long type, void *data) |
---|
455 | 572 | { |
---|
456 | 573 | struct mlx5_pages_req *req; |
---|
| 574 | + struct mlx5_core_dev *dev; |
---|
| 575 | + struct mlx5_priv *priv; |
---|
| 576 | + struct mlx5_eqe *eqe; |
---|
| 577 | + bool ec_function; |
---|
| 578 | + bool release_all; |
---|
| 579 | + u16 func_id; |
---|
| 580 | + s32 npages; |
---|
457 | 581 | |
---|
| 582 | + priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb); |
---|
| 583 | + dev = container_of(priv, struct mlx5_core_dev, priv); |
---|
| 584 | + eqe = data; |
---|
| 585 | + |
---|
| 586 | + func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
---|
| 587 | + npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
---|
| 588 | + ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; |
---|
| 589 | + release_all = be16_to_cpu(eqe->data.req_pages.ec_function) & |
---|
| 590 | + RELEASE_ALL_PAGES_MASK; |
---|
| 591 | + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n", |
---|
| 592 | + func_id, npages, release_all); |
---|
458 | 593 | req = kzalloc(sizeof(*req), GFP_ATOMIC); |
---|
459 | 594 | if (!req) { |
---|
460 | 595 | mlx5_core_warn(dev, "failed to allocate pages request\n"); |
---|
461 | | - return; |
---|
| 596 | + return NOTIFY_DONE; |
---|
462 | 597 | } |
---|
463 | 598 | |
---|
464 | 599 | req->dev = dev; |
---|
465 | 600 | req->func_id = func_id; |
---|
466 | 601 | req->npages = npages; |
---|
| 602 | + req->ec_function = ec_function; |
---|
| 603 | + req->release_all = release_all; |
---|
467 | 604 | INIT_WORK(&req->work, pages_work_handler); |
---|
468 | 605 | queue_work(dev->priv.pg_wq, &req->work); |
---|
| 606 | + return NOTIFY_OK; |
---|
469 | 607 | } |
---|
470 | 608 | |
---|
471 | 609 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
---|
472 | 610 | { |
---|
473 | | - u16 uninitialized_var(func_id); |
---|
474 | | - s32 uninitialized_var(npages); |
---|
| 611 | + u16 func_id; |
---|
| 612 | + s32 npages; |
---|
475 | 613 | int err; |
---|
476 | 614 | |
---|
477 | 615 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
---|
.. | .. |
---|
481 | 619 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
---|
482 | 620 | npages, boot ? "boot" : "init", func_id); |
---|
483 | 621 | |
---|
484 | | - return give_pages(dev, func_id, npages, 0); |
---|
| 622 | + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); |
---|
485 | 623 | } |
---|
486 | 624 | |
---|
487 | 625 | enum { |
---|
.. | .. |
---|
501 | 639 | return ret; |
---|
502 | 640 | } |
---|
503 | 641 | |
---|
504 | | -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
---|
| 642 | +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, |
---|
| 643 | + struct rb_root *root, u16 func_id) |
---|
505 | 644 | { |
---|
506 | 645 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
---|
507 | | - struct fw_page *fwp; |
---|
508 | | - struct rb_node *p; |
---|
509 | | - int nclaimed = 0; |
---|
510 | | - int err = 0; |
---|
511 | 646 | |
---|
512 | | - do { |
---|
513 | | - p = rb_first(&dev->priv.page_root); |
---|
514 | | - if (p) { |
---|
515 | | - fwp = rb_entry(p, struct fw_page, rb_node); |
---|
516 | | - err = reclaim_pages(dev, fwp->func_id, |
---|
517 | | - optimal_reclaimed_pages(), |
---|
518 | | - &nclaimed); |
---|
| 647 | + while (!RB_EMPTY_ROOT(root)) { |
---|
| 648 | + int nclaimed; |
---|
| 649 | + int err; |
---|
519 | 650 | |
---|
520 | | - if (err) { |
---|
521 | | - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", |
---|
522 | | - err); |
---|
523 | | - return err; |
---|
524 | | - } |
---|
525 | | - if (nclaimed) |
---|
526 | | - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
---|
| 651 | + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), |
---|
| 652 | + &nclaimed, mlx5_core_is_ecpf(dev)); |
---|
| 653 | + if (err) { |
---|
| 654 | + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", |
---|
| 655 | + err, func_id); |
---|
| 656 | + return err; |
---|
527 | 657 | } |
---|
| 658 | + |
---|
| 659 | + if (nclaimed) |
---|
| 660 | + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
---|
| 661 | + |
---|
528 | 662 | if (time_after(jiffies, end)) { |
---|
529 | 663 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); |
---|
530 | 664 | break; |
---|
531 | 665 | } |
---|
532 | | - } while (p); |
---|
| 666 | + } |
---|
| 667 | + |
---|
| 668 | + return 0; |
---|
| 669 | +} |
---|
| 670 | + |
---|
| 671 | +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
---|
| 672 | +{ |
---|
| 673 | + struct rb_root *root; |
---|
| 674 | + unsigned long id; |
---|
| 675 | + void *entry; |
---|
| 676 | + |
---|
| 677 | + xa_for_each(&dev->priv.page_root_xa, id, entry) { |
---|
| 678 | + root = entry; |
---|
| 679 | + mlx5_reclaim_root_pages(dev, root, id); |
---|
| 680 | + xa_erase(&dev->priv.page_root_xa, id); |
---|
| 681 | + kfree(root); |
---|
| 682 | + } |
---|
| 683 | + |
---|
| 684 | + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); |
---|
533 | 685 | |
---|
534 | 686 | WARN(dev->priv.fw_pages, |
---|
535 | 687 | "FW pages counter is %d after reclaiming all pages\n", |
---|
.. | .. |
---|
537 | 689 | WARN(dev->priv.vfs_pages, |
---|
538 | 690 | "VFs FW pages counter is %d after reclaiming all pages\n", |
---|
539 | 691 | dev->priv.vfs_pages); |
---|
| 692 | + WARN(dev->priv.peer_pf_pages, |
---|
| 693 | + "Peer PF FW pages counter is %d after reclaiming all pages\n", |
---|
| 694 | + dev->priv.peer_pf_pages); |
---|
540 | 695 | |
---|
541 | 696 | return 0; |
---|
542 | 697 | } |
---|
543 | 698 | |
---|
544 | | -void mlx5_pagealloc_init(struct mlx5_core_dev *dev) |
---|
| 699 | +int mlx5_pagealloc_init(struct mlx5_core_dev *dev) |
---|
545 | 700 | { |
---|
546 | | - dev->priv.page_root = RB_ROOT; |
---|
547 | 701 | INIT_LIST_HEAD(&dev->priv.free_list); |
---|
548 | | -} |
---|
549 | | - |
---|
550 | | -void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) |
---|
551 | | -{ |
---|
552 | | - /* nothing */ |
---|
553 | | -} |
---|
554 | | - |
---|
555 | | -int mlx5_pagealloc_start(struct mlx5_core_dev *dev) |
---|
556 | | -{ |
---|
557 | 702 | dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); |
---|
558 | 703 | if (!dev->priv.pg_wq) |
---|
559 | 704 | return -ENOMEM; |
---|
560 | 705 | |
---|
| 706 | + xa_init(&dev->priv.page_root_xa); |
---|
| 707 | + |
---|
561 | 708 | return 0; |
---|
| 709 | +} |
---|
| 710 | + |
---|
| 711 | +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) |
---|
| 712 | +{ |
---|
| 713 | + xa_destroy(&dev->priv.page_root_xa); |
---|
| 714 | + destroy_workqueue(dev->priv.pg_wq); |
---|
| 715 | +} |
---|
| 716 | + |
---|
| 717 | +void mlx5_pagealloc_start(struct mlx5_core_dev *dev) |
---|
| 718 | +{ |
---|
| 719 | + MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST); |
---|
| 720 | + mlx5_eq_notifier_register(dev, &dev->priv.pg_nb); |
---|
562 | 721 | } |
---|
563 | 722 | |
---|
564 | 723 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) |
---|
565 | 724 | { |
---|
566 | | - destroy_workqueue(dev->priv.pg_wq); |
---|
| 725 | + mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb); |
---|
| 726 | + flush_workqueue(dev->priv.pg_wq); |
---|
567 | 727 | } |
---|
568 | 728 | |
---|
569 | | -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) |
---|
| 729 | +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) |
---|
570 | 730 | { |
---|
571 | 731 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); |
---|
572 | | - int prev_vfs_pages = dev->priv.vfs_pages; |
---|
| 732 | + int prev_pages = *pages; |
---|
573 | 733 | |
---|
574 | 734 | /* In case of internal error we will free the pages manually later */ |
---|
575 | 735 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
---|
.. | .. |
---|
577 | 737 | return 0; |
---|
578 | 738 | } |
---|
579 | 739 | |
---|
580 | | - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, |
---|
581 | | - dev->priv.name); |
---|
582 | | - while (dev->priv.vfs_pages) { |
---|
| 740 | + mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); |
---|
| 741 | + while (*pages) { |
---|
583 | 742 | if (time_after(jiffies, end)) { |
---|
584 | | - mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); |
---|
| 743 | + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); |
---|
585 | 744 | return -ETIMEDOUT; |
---|
586 | 745 | } |
---|
587 | | - if (dev->priv.vfs_pages < prev_vfs_pages) { |
---|
| 746 | + if (*pages < prev_pages) { |
---|
588 | 747 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); |
---|
589 | | - prev_vfs_pages = dev->priv.vfs_pages; |
---|
| 748 | + prev_pages = *pages; |
---|
590 | 749 | } |
---|
591 | 750 | msleep(50); |
---|
592 | 751 | } |
---|
593 | 752 | |
---|
594 | | - mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); |
---|
| 753 | + mlx5_core_dbg(dev, "All pages received\n"); |
---|
595 | 754 | return 0; |
---|
596 | 755 | } |
---|