| .. | .. |
|---|
| 1097 | 1097 | static void handle_temp_err(struct hfi1_devdata *dd); |
|---|
| 1098 | 1098 | static void dc_shutdown(struct hfi1_devdata *dd); |
|---|
| 1099 | 1099 | static void dc_start(struct hfi1_devdata *dd); |
|---|
| 1100 | | -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
|---|
| 1100 | +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
|---|
| 1101 | 1101 | unsigned int *np); |
|---|
| 1102 | 1102 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); |
|---|
| 1103 | 1103 | static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); |
|---|
| .. | .. |
|---|
| 12348 | 12348 | |
|---|
| 12349 | 12349 | if (dd->synth_stats_timer.function) |
|---|
| 12350 | 12350 | del_timer_sync(&dd->synth_stats_timer); |
|---|
| 12351 | + cancel_work_sync(&dd->update_cntr_work); |
|---|
| 12351 | 12352 | ppd = (struct hfi1_pportdata *)(dd + 1); |
|---|
| 12352 | 12353 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
|---|
| 12353 | 12354 | kfree(ppd->cntrs); |
|---|
| .. | .. |
|---|
| 13403 | 13404 | int ret; |
|---|
| 13404 | 13405 | unsigned ngroups; |
|---|
| 13405 | 13406 | int rmt_count; |
|---|
| 13406 | | - int user_rmt_reduced; |
|---|
| 13407 | 13407 | u32 n_usr_ctxts; |
|---|
| 13408 | 13408 | u32 send_contexts = chip_send_contexts(dd); |
|---|
| 13409 | 13409 | u32 rcv_contexts = chip_rcv_contexts(dd); |
|---|
| .. | .. |
|---|
| 13462 | 13462 | (num_kernel_contexts + n_usr_ctxts), |
|---|
| 13463 | 13463 | &node_affinity.real_cpu_mask); |
|---|
| 13464 | 13464 | /* |
|---|
| 13465 | | - * The RMT entries are currently allocated as shown below: |
|---|
| 13466 | | - * 1. QOS (0 to 128 entries); |
|---|
| 13467 | | - * 2. FECN (num_kernel_context - 1 + num_user_contexts + |
|---|
| 13468 | | - * num_netdev_contexts); |
|---|
| 13469 | | - * 3. netdev (num_netdev_contexts). |
|---|
| 13470 | | - * It should be noted that FECN oversubscribe num_netdev_contexts |
|---|
| 13471 | | - * entries of RMT because both netdev and PSM could allocate any receive |
|---|
| 13472 | | - * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, |
|---|
| 13473 | | - * and PSM FECN must reserve an RMT entry for each possible PSM receive |
|---|
| 13474 | | - * context. |
|---|
| 13465 | + * RMT entries are allocated as follows: |
|---|
| 13466 | + * 1. QOS (0 to 128 entries) |
|---|
| 13467 | + * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + |
|---|
| 13468 | + * num_netdev_contexts [b]) |
|---|
| 13469 | + * 3. netdev (NUM_NETDEV_MAP_ENTRIES) |
|---|
| 13470 | + * |
|---|
| 13471 | + * Notes: |
|---|
| 13472 | + * [a] Kernel contexts (except control) are included in FECN if kernel |
|---|
| 13473 | + * TID_RDMA is active. |
|---|
| 13474 | + * [b] Netdev and user contexts are randomly allocated from the same |
|---|
| 13475 | + * context pool, so FECN must cover all contexts in the pool. |
|---|
| 13475 | 13476 | */ |
|---|
| 13476 | | - rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2); |
|---|
| 13477 | | - if (HFI1_CAP_IS_KSET(TID_RDMA)) |
|---|
| 13478 | | - rmt_count += num_kernel_contexts - 1; |
|---|
| 13479 | | - if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { |
|---|
| 13480 | | - user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; |
|---|
| 13481 | | - dd_dev_err(dd, |
|---|
| 13482 | | - "RMT size is reducing the number of user receive contexts from %u to %d\n", |
|---|
| 13483 | | - n_usr_ctxts, |
|---|
| 13484 | | - user_rmt_reduced); |
|---|
| 13485 | | - /* recalculate */ |
|---|
| 13486 | | - n_usr_ctxts = user_rmt_reduced; |
|---|
| 13477 | + rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) |
|---|
| 13478 | + + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 |
|---|
| 13479 | + : 0) |
|---|
| 13480 | + + n_usr_ctxts |
|---|
| 13481 | + + num_netdev_contexts |
|---|
| 13482 | + + NUM_NETDEV_MAP_ENTRIES; |
|---|
| 13483 | + if (rmt_count > NUM_MAP_ENTRIES) { |
|---|
| 13484 | + int over = rmt_count - NUM_MAP_ENTRIES; |
|---|
| 13485 | + /* try to squish user contexts, minimum of 1 */ |
|---|
| 13486 | + if (over >= n_usr_ctxts) { |
|---|
| 13487 | + dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); |
|---|
| 13488 | + return -EINVAL; |
|---|
| 13489 | + } |
|---|
| 13490 | + dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", |
|---|
| 13491 | + n_usr_ctxts, n_usr_ctxts - over); |
|---|
| 13492 | + n_usr_ctxts -= over; |
|---|
| 13487 | 13493 | } |
|---|
| 13488 | 13494 | |
|---|
| 13489 | 13495 | /* the first N are kernel contexts, the rest are user/netdev contexts */ |
|---|
| .. | .. |
|---|
| 14340 | 14346 | } |
|---|
| 14341 | 14347 | |
|---|
| 14342 | 14348 | /* return the number of RSM map table entries that will be used for QOS */ |
|---|
| 14343 | | -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
|---|
| 14349 | +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
|---|
| 14344 | 14350 | unsigned int *np) |
|---|
| 14345 | 14351 | { |
|---|
| 14346 | 14352 | int i; |
|---|
| 14347 | 14353 | unsigned int m, n; |
|---|
| 14348 | | - u8 max_by_vl = 0; |
|---|
| 14354 | + uint max_by_vl = 0; |
|---|
| 14349 | 14355 | |
|---|
| 14350 | 14356 | /* is QOS active at all? */ |
|---|
| 14351 | | - if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || |
|---|
| 14357 | + if (n_krcv_queues < MIN_KERNEL_KCTXTS || |
|---|
| 14352 | 14358 | num_vls == 1 || |
|---|
| 14353 | 14359 | krcvqsset <= 1) |
|---|
| 14354 | 14360 | goto no_qos; |
|---|
| .. | .. |
|---|
| 14406 | 14412 | |
|---|
| 14407 | 14413 | if (!rmt) |
|---|
| 14408 | 14414 | goto bail; |
|---|
| 14409 | | - rmt_entries = qos_rmt_entries(dd, &m, &n); |
|---|
| 14415 | + rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); |
|---|
| 14410 | 14416 | if (rmt_entries == 0) |
|---|
| 14411 | 14417 | goto bail; |
|---|
| 14412 | 14418 | qpns_per_vl = 1 << m; |
|---|